diff --git a/.changes/1.34.106.json b/.changes/1.34.106.json new file mode 100644 index 0000000000..12704461fb --- /dev/null +++ b/.changes/1.34.106.json @@ -0,0 +1,32 @@ +[ + { + "category": "``bedrock-agent-runtime``", + "description": "Updating Bedrock Knowledge Base Metadata & Filters feature with two new filters listContains and stringContains", + "type": "api-change" + }, + { + "category": "``codebuild``", + "description": "CodeBuild Reserved Capacity VPC Support", + "type": "api-change" + }, + { + "category": "``datasync``", + "description": "Task executions now display a CANCELLING status when an execution is in the process of being cancelled.", + "type": "api-change" + }, + { + "category": "``grafana``", + "description": "This release adds new ServiceAccount and ServiceAccountToken APIs.", + "type": "api-change" + }, + { + "category": "``medical-imaging``", + "description": "Added support for importing medical imaging data from Amazon S3 buckets across accounts and regions.", + "type": "api-change" + }, + { + "category": "``securityhub``", + "description": "Documentation-only update for AWS Security Hub", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.107.json b/.changes/1.34.107.json new file mode 100644 index 0000000000..383d39b3bb --- /dev/null +++ b/.changes/1.34.107.json @@ -0,0 +1,42 @@ +[ + { + "category": "``acm-pca``", + "description": "This release adds support for waiters to fail on AccessDeniedException when having insufficient permissions", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Adding Contact Flow metrics to the GetMetricDataV2 API", + "type": "api-change" + }, + { + "category": "``kafka``", + "description": "AWS MSK support for Broker Removal.", + "type": "api-change" + }, + { + "category": "``mwaa``", + "description": "Amazon MWAA now supports Airflow web server auto scaling to automatically handle increased demand from REST APIs, Command Line Interface (CLI), or more Airflow User Interface (UI) users. Customers can specify maximum and minimum web server instances during environment creation and update workflow.", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "This release adds DescribeKeyRegistration and UpdateKeyRegistration APIs to manage QuickSight Customer Managed Keys (CMK).", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Introduced WorkerAccessConfiguration to SageMaker Workteam. This allows customers to configure resource access for workers in a workteam.", + "type": "api-change" + }, + { + "category": "``secretsmanager``", + "description": "Documentation updates for AWS Secrets Manager", + "type": "api-change" + }, + { + "category": "retries", + "description": "Fix backoff calculation for truncated binary exponential backoff (`#3178 `__)", + "type": "bugfix" + } +] \ No newline at end of file diff --git a/.changes/1.34.108.json b/.changes/1.34.108.json new file mode 100644 index 0000000000..25b8a8ec9f --- /dev/null +++ b/.changes/1.34.108.json @@ -0,0 +1,27 @@ +[ + { + "category": "``application-autoscaling``", + "description": "add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``codebuild``", + "description": "Aws CodeBuild now supports 36 hours build timeout", + "type": "api-change" + }, + { + "category": "``elbv2``", + "description": "This release adds dualstack-without-public-ipv4 IP address type for ALB.", + "type": "api-change" + }, + { + "category": "``lakeformation``", + "description": "Introduces a new API, GetDataLakePrincipal, that returns the identity of the invoking principal", + "type": "api-change" + }, + { + "category": "``transfer``", + "description": "Enable use of CloudFormation traits in Smithy model to improve generated CloudFormation schema from the Smithy API model.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.109.json b/.changes/1.34.109.json new file mode 100644 index 0000000000..447147c7cf --- /dev/null +++ b/.changes/1.34.109.json @@ -0,0 +1,32 @@ +[ + { + "category": "``bedrock-agent``", + "description": "This release adds support for using Guardrails with Bedrock Agents.", + "type": "api-change" + }, + { + "category": "``bedrock-agent-runtime``", + "description": "This release adds support for using Guardrails with Bedrock Agents.", + "type": "api-change" + }, + { + "category": "``controltower``", + "description": "Added ListControlOperations API and filtering support for ListEnabledControls API. Updates also includes added metadata for enabled controls and control operations.", + "type": "api-change" + }, + { + "category": "``osis``", + "description": "Add support for creating an OpenSearch Ingestion pipeline that is attached to a provided VPC. Add information about the destinations of an OpenSearch Ingestion pipeline to the GetPipeline and ListPipelines APIs.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "This release adds support for EngineLifecycleSupport on DBInstances, DBClusters, and GlobalClusters.", + "type": "api-change" + }, + { + "category": "``secretsmanager``", + "description": "add v2 smoke tests and smithy smokeTests trait for SDK testing", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.110.json b/.changes/1.34.110.json new file mode 100644 index 0000000000..b0ae3518b3 --- /dev/null +++ b/.changes/1.34.110.json @@ -0,0 +1,37 @@ +[ + { + "category": "``cloudfront``", + "description": "Model update; no change to SDK functionality.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Add Maintenance window to CreateJob and UpdateJob APIs and JobRun response. Add a new Job Run State for EXPIRED.", + "type": "api-change" + }, + { + "category": "``lightsail``", + "description": "This release adds support for Amazon Lightsail instances to switch between dual-stack or IPv4 only and IPv6-only public IP address types.", + "type": "api-change" + }, + { + "category": "``mailmanager``", + "description": "This release includes a new Amazon SES feature called Mail Manager, which is a set of email gateway capabilities designed to help customers strengthen their organization's email infrastructure, simplify email workflow management, and streamline email compliance control.", + "type": "api-change" + }, + { + "category": "``pi``", + "description": "Performance Insights added a new input parameter called AuthorizedActions to support the fine-grained access feature. Performance Insights also restricted the acceptable input characters.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Updates Amazon RDS documentation for Db2 license through AWS Marketplace.", + "type": "api-change" + }, + { + "category": "``storagegateway``", + "description": "Added new SMBSecurityStrategy enum named MandatoryEncryptionNoAes128, new mode enforces encryption and disables AES 128-bit algorithums.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.111.json b/.changes/1.34.111.json new file mode 100644 index 0000000000..cf003dd004 --- /dev/null +++ b/.changes/1.34.111.json @@ -0,0 +1,27 @@ +[ + { + "category": "``chatbot``", + "description": "This change adds support for tagging Chatbot configurations.", + "type": "api-change" + }, + { + "category": "``cloudformation``", + "description": "Added DeletionMode FORCE_DELETE_STACK for deleting a stack that is stuck in DELETE_FAILED state due to resource deletion failure.", + "type": "api-change" + }, + { + "category": "``kms``", + "description": "This release includes feature to import customer's asymmetric (RSA, ECC and SM2) and HMAC keys into KMS in China.", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "This release adds support for enabling or disabling a data source configured as part of Zero-ETL integration with Amazon S3, by setting its status.", + "type": "api-change" + }, + { + "category": "``wafv2``", + "description": "You can now use Security Lake to collect web ACL traffic data.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.112.json b/.changes/1.34.112.json new file mode 100644 index 0000000000..bc271e8796 --- /dev/null +++ b/.changes/1.34.112.json @@ -0,0 +1,12 @@ +[ + { + "category": "``emr-serverless``", + "description": "This release adds the capability to run interactive workloads using Apache Livy Endpoint.", + "type": "api-change" + }, + { + "category": "``opsworks``", + "description": "Documentation-only update for OpsWorks Stacks.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.113.json b/.changes/1.34.113.json new file mode 100644 index 0000000000..cfa389e1f4 --- /dev/null +++ b/.changes/1.34.113.json @@ -0,0 +1,17 @@ +[ + { + "category": "``dynamodb``", + "description": "Documentation only updates for DynamoDB.", + "type": "api-change" + }, + { + "category": "``iotfleetwise``", + "description": "AWS IoT FleetWise now supports listing vehicles with attributes filter, ListVehicles API is updated to support additional attributes filter.", + "type": "api-change" + }, + { + "category": "``managedblockchain``", + "description": "This is a minor documentation update to address the impact of the shut down of the Goerli and Polygon networks.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.114.json b/.changes/1.34.114.json new file mode 100644 index 0000000000..bd7cd32b2c --- /dev/null +++ b/.changes/1.34.114.json @@ -0,0 +1,22 @@ +[ + { + "category": "``dynamodb``", + "description": "Doc-only update for DynamoDB. Specified the IAM actions needed to authorize a user to create a table with a resource-based policy.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Providing support to accept BgpAsnExtended attribute", + "type": "api-change" + }, + { + "category": "``kafka``", + "description": "Adds ControllerNodeInfo in ListNodes response to support Raft mode for MSK", + "type": "api-change" + }, + { + "category": "``swf``", + "description": "This release adds new APIs for deleting activity type and workflow type resources.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.115.json b/.changes/1.34.115.json new file mode 100644 index 0000000000..a66e32cfa7 --- /dev/null +++ b/.changes/1.34.115.json @@ -0,0 +1,27 @@ +[ + { + "category": "``athena``", + "description": "Throwing validation errors on CreateNotebook with Name containing `/`,`:`,`\\`", + "type": "api-change" + }, + { + "category": "``codebuild``", + "description": "AWS CodeBuild now supports manually creating GitHub webhooks", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "This release includes changes to DescribeContact API's response by including ConnectedToSystemTimestamp, RoutingCriteria, Customer, Campaign, AnsweringMachineDetectionStatus, CustomerVoiceActivity, QualityMetrics, DisconnectDetails, and SegmentAttributes information from a contact in Amazon Connect.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Add optional field JobMode to CreateJob and UpdateJob APIs.", + "type": "api-change" + }, + { + "category": "``securityhub``", + "description": "Add ROOT type for TargetType model", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.116.json b/.changes/1.34.116.json new file mode 100644 index 0000000000..c256a14397 --- /dev/null +++ b/.changes/1.34.116.json @@ -0,0 +1,42 @@ +[ + { + "category": "``acm``", + "description": "add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``bedrock-agent``", + "description": "With this release, Knowledge bases for Bedrock adds support for Titan Text Embedding v2.", + "type": "api-change" + }, + { + "category": "``bedrock-runtime``", + "description": "This release adds Converse and ConverseStream APIs to Bedrock Runtime", + "type": "api-change" + }, + { + "category": "``cloudtrail``", + "description": "CloudTrail Lake returns PartitionKeys in the GetEventDataStore API response. Events are grouped into partitions based on these keys for better query performance. For example, the calendarday key groups events by day, while combining the calendarday key with the hour key groups them by day and hour.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Adding associatedQueueIds as a SearchCriteria and response field to the SearchRoutingProfiles API", + "type": "api-change" + }, + { + "category": "``emr-serverless``", + "description": "The release adds support for spark structured streaming.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Updates Amazon RDS documentation for Aurora Postgres DBname.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Adds Model Card information as a new component to Model Package. Autopilot launches algorithm selection for TimeSeries modality to generate AutoML candidates per algorithm.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.117.json b/.changes/1.34.117.json new file mode 100644 index 0000000000..abbe7aba70 --- /dev/null +++ b/.changes/1.34.117.json @@ -0,0 +1,22 @@ +[ + { + "category": "``codebuild``", + "description": "AWS CodeBuild now supports Self-hosted GitHub Actions runners for Github Enterprise", + "type": "api-change" + }, + { + "category": "``codeguru-security``", + "description": "This release includes minor model updates and documentation updates.", + "type": "api-change" + }, + { + "category": "``elasticache``", + "description": "Update to attributes of TestFailover and minor revisions.", + "type": "api-change" + }, + { + "category": "``launch-wizard``", + "description": "This release adds support for describing workload deployment specifications, deploying additional workload types, and managing tags for Launch Wizard resources with API operations.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.118.json b/.changes/1.34.118.json new file mode 100644 index 0000000000..280f1d1025 --- /dev/null +++ b/.changes/1.34.118.json @@ -0,0 +1,22 @@ +[ + { + "category": "``amplify``", + "description": "This doc-only update identifies fields that are specific to Gen 1 and Gen 2 applications.", + "type": "api-change" + }, + { + "category": "``batch``", + "description": "This release adds support for the AWS Batch GetJobQueueSnapshot API operation.", + "type": "api-change" + }, + { + "category": "``eks``", + "description": "Adds support for EKS add-ons pod identity associations integration", + "type": "api-change" + }, + { + "category": "``iottwinmaker``", + "description": "Support RESET_VALUE UpdateType for PropertyUpdates to reset property value to default or null", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.119.json b/.changes/1.34.119.json new file mode 100644 index 0000000000..2c5fcc1c7b --- /dev/null +++ b/.changes/1.34.119.json @@ -0,0 +1,22 @@ +[ + { + "category": "``ec2``", + "description": "U7i instances with up to 32 TiB of DDR5 memory and 896 vCPUs are now available. C7i-flex instances are launched and are lower-priced variants of the Amazon EC2 C7i instances that offer a baseline level of CPU performance with the ability to scale up to the full compute performance 95% of the time.", + "type": "api-change" + }, + { + "category": "``pipes``", + "description": "This release adds Timestream for LiveAnalytics as a supported target in EventBridge Pipes", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Extend DescribeClusterNode response with private DNS hostname and IP address, and placement information about availability zone and availability zone ID.", + "type": "api-change" + }, + { + "category": "``taxsettings``", + "description": "Initial release of AWS Tax Settings API", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.120.json b/.changes/1.34.120.json new file mode 100644 index 0000000000..97b8f00d41 --- /dev/null +++ b/.changes/1.34.120.json @@ -0,0 +1,17 @@ +[ + { + "category": "``globalaccelerator``", + "description": "This release contains a new optional ip-addresses input field for the update accelerator and update custom routing accelerator apis. This input enables consumers to replace IPv4 addresses on existing accelerators with addresses provided in the input.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "AWS Glue now supports native SaaS connectivity: Salesforce connector available now", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "Added new params copySource and key to copyObject API for supporting S3 Access Grants plugin. These changes will not change any of the existing S3 API functionality.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.121.json b/.changes/1.34.121.json new file mode 100644 index 0000000000..3d4995b4ce --- /dev/null +++ b/.changes/1.34.121.json @@ -0,0 +1,62 @@ +[ + { + "category": "``account``", + "description": "This release adds 3 new APIs (AcceptPrimaryEmailUpdate, GetPrimaryEmail, and StartPrimaryEmailUpdate) used to centrally manage the root user email address of member accounts within an AWS organization.", + "type": "api-change" + }, + { + "category": "``alexaforbusiness``", + "description": "The alexaforbusiness client has been removed following the deprecation of the service.", + "type": "api-change" + }, + { + "category": "``firehose``", + "description": "Adds integration with Secrets Manager for Redshift, Splunk, HttpEndpoint, and Snowflake destinations", + "type": "api-change" + }, + { + "category": "``fsx``", + "description": "This release adds support to increase metadata performance on FSx for Lustre file systems beyond the default level provisioned when a file system is created. This can be done by specifying MetadataConfiguration during the creation of Persistent_2 file systems or by updating it on demand.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "This release adds support for creating and updating Glue Data Catalog Views.", + "type": "api-change" + }, + { + "category": "``honeycode``", + "description": "The honeycode client has been removed following the deprecation of the service.", + "type": "api-change" + }, + { + "category": "``iotwireless``", + "description": "Adds support for wireless device to be in Conflict FUOTA Device Status due to a FUOTA Task, so it couldn't be attached to a new one.", + "type": "api-change" + }, + { + "category": "``location``", + "description": "Added two new APIs, VerifyDevicePosition and ForecastGeofenceEvents. Added support for putting larger geofences up to 100,000 vertices with Geobuf fields.", + "type": "api-change" + }, + { + "category": "``sns``", + "description": "Doc-only update for SNS. These changes include customer-reported issues and TXC3 updates.", + "type": "api-change" + }, + { + "category": "``sqs``", + "description": "Doc only updates for SQS. These updates include customer-reported issues and TCX3 modifications.", + "type": "api-change" + }, + { + "category": "``storagegateway``", + "description": "Adds SoftwareUpdatePreferences to DescribeMaintenanceStartTime and UpdateMaintenanceStartTime, a structure which contains AutomaticUpdatePolicy.", + "type": "api-change" + }, + { + "category": "AWSCRT", + "description": "Update awscrt version to 0.20.11", + "type": "enhancement" + } +] \ No newline at end of file diff --git a/.changes/1.34.122.json b/.changes/1.34.122.json new file mode 100644 index 0000000000..211f72fb64 --- /dev/null +++ b/.changes/1.34.122.json @@ -0,0 +1,27 @@ +[ + { + "category": "``auditmanager``", + "description": "New feature: common controls. When creating custom controls, you can now use pre-grouped AWS data sources based on common compliance themes. Also, the awsServices parameter is deprecated because we now manage services in scope for you. If used, the input is ignored and an empty list is returned.", + "type": "api-change" + }, + { + "category": "``b2bi``", + "description": "Added exceptions to B2Bi List operations and ConflictException to B2Bi StartTransformerJob operation. Also made capabilities field explicitly required when creating a Partnership.", + "type": "api-change" + }, + { + "category": "``codepipeline``", + "description": "CodePipeline now supports overriding S3 Source Object Key during StartPipelineExecution, as part of Source Overrides.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "This release introduces a new optional parameter: InferenceAmiVersion, in ProductionVariant.", + "type": "api-change" + }, + { + "category": "``verifiedpermissions``", + "description": "This release adds OpenIdConnect (OIDC) configuration support for IdentitySources, allowing for external IDPs to be used in authorization requests.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.123.json b/.changes/1.34.123.json new file mode 100644 index 0000000000..5e768982a8 --- /dev/null +++ b/.changes/1.34.123.json @@ -0,0 +1,17 @@ +[ + { + "category": "``application-signals``", + "description": "This is the initial SDK release for Amazon CloudWatch Application Signals. Amazon CloudWatch Application Signals provides curated application performance monitoring for developers to monitor and troubleshoot application health using pre-built dashboards and Service Level Objectives.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "This release introduces a new cluster configuration to support the customer-managed keys for ECS managed storage encryption.", + "type": "api-change" + }, + { + "category": "``imagebuilder``", + "description": "This release updates the regex pattern for Image Builder ARNs.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.124.json b/.changes/1.34.124.json new file mode 100644 index 0000000000..0144a29417 --- /dev/null +++ b/.changes/1.34.124.json @@ -0,0 +1,27 @@ +[ + { + "category": "``accessanalyzer``", + "description": "IAM Access Analyzer now provides policy recommendations to help resolve unused permissions for IAM roles and users. Additionally, IAM Access Analyzer now extends its custom policy checks to detect when IAM policies grant public access or access to critical resources ahead of deployments.", + "type": "api-change" + }, + { + "category": "``guardduty``", + "description": "Added API support for GuardDuty Malware Protection for S3.", + "type": "api-change" + }, + { + "category": "``networkmanager``", + "description": "This is model changes & documentation update for Service Insertion feature for AWS Cloud WAN. This feature allows insertion of AWS/3rd party security services on Cloud WAN. This allows to steer inter/intra segment traffic via security appliances and provide visibility to the route updates.", + "type": "api-change" + }, + { + "category": "``pca-connector-scep``", + "description": "Connector for SCEP allows you to use a managed, cloud CA to enroll mobile devices and networking gear. SCEP is a widely-adopted protocol used by mobile device management (MDM) solutions for enrolling mobile devices. With the connector, you can use AWS Private CA with popular MDM solutions.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Introduced Scope and AuthenticationRequestExtraParams to SageMaker Workforce OIDC configuration; this allows customers to modify these options for their private Workforce IdP integration. Model Registry Cross-account model package groups are discoverable.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.125.json b/.changes/1.34.125.json new file mode 100644 index 0000000000..05eaa5781d --- /dev/null +++ b/.changes/1.34.125.json @@ -0,0 +1,42 @@ +[ + { + "category": "``apptest``", + "description": "AWS Mainframe Modernization Application Testing is an AWS Mainframe Modernization service feature that automates functional equivalence testing for mainframe application modernization and migration to AWS, and regression testing.", + "type": "api-change" + }, + { + "category": "``backupstorage``", + "description": "The backupstorage client has been removed following the deprecation of the service.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Tagging support for Traffic Mirroring FilterRule resource", + "type": "api-change" + }, + { + "category": "``osis``", + "description": "SDK changes for self-managed vpc endpoint to OpenSearch ingestion pipelines.", + "type": "api-change" + }, + { + "category": "``redshift``", + "description": "Updates to remove DC1 and DS2 node types.", + "type": "api-change" + }, + { + "category": "``secretsmanager``", + "description": "Introducing RotationToken parameter for PutSecretValue API", + "type": "api-change" + }, + { + "category": "``securitylake``", + "description": "This release updates request validation regex to account for non-commercial aws partitions.", + "type": "api-change" + }, + { + "category": "``sesv2``", + "description": "This release adds support for Amazon EventBridge as an email sending events destination.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.126.json b/.changes/1.34.126.json new file mode 100644 index 0000000000..f2f6b12bad --- /dev/null +++ b/.changes/1.34.126.json @@ -0,0 +1,27 @@ +[ + { + "category": "``cloudhsmv2``", + "description": "Added support for hsm type hsm2m.medium. Added supported for creating a cluster in FIPS or NON_FIPS mode.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "This release adds support for configuration of evaluation method for composite rules in Glue Data Quality rulesets.", + "type": "api-change" + }, + { + "category": "``iotwireless``", + "description": "Add RoamingDeviceSNR and RoamingDeviceRSSI to Customer Metrics.", + "type": "api-change" + }, + { + "category": "``kms``", + "description": "This feature allows customers to use their keys stored in KMS to derive a shared secret which can then be used to establish a secured channel for communication, provide proof of possession, or establish trust with other parties.", + "type": "api-change" + }, + { + "category": "``mediapackagev2``", + "description": "This release adds support for CMAF ingest (DASH-IF live media ingest protocol interface 1)", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.127.json b/.changes/1.34.127.json new file mode 100644 index 0000000000..bf1b45b7bc --- /dev/null +++ b/.changes/1.34.127.json @@ -0,0 +1,27 @@ +[ + { + "category": "``datazone``", + "description": "This release introduces a new default service blueprint for custom environment creation.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Documentation updates for Amazon EC2.", + "type": "api-change" + }, + { + "category": "``macie2``", + "description": "This release adds support for managing the status of automated sensitive data discovery for individual accounts in an organization, and determining whether individual S3 buckets are included in the scope of the analyses.", + "type": "api-change" + }, + { + "category": "``mediaconvert``", + "description": "This release adds the ability to search for historical job records within the management console using a search box and/or via the SDK/CLI with partial string matching search on input file name.", + "type": "api-change" + }, + { + "category": "``route53domains``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.128.json b/.changes/1.34.128.json new file mode 100644 index 0000000000..123c80200a --- /dev/null +++ b/.changes/1.34.128.json @@ -0,0 +1,52 @@ +[ + { + "category": "``acm-pca``", + "description": "Doc-only update that adds name constraints as an allowed extension for ImportCertificateAuthorityCertificate.", + "type": "api-change" + }, + { + "category": "``batch``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``codebuild``", + "description": "AWS CodeBuild now supports global and organization GitHub webhooks", + "type": "api-change" + }, + { + "category": "``cognito-idp``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``ds``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``efs``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "This release introduces a new feature, Usage profiles. Usage profiles allow the AWS Glue admin to create different profiles for various classes of users within the account, enforcing limits and defaults for jobs and sessions.", + "type": "api-change" + }, + { + "category": "``mediaconvert``", + "description": "This release includes support for creating I-frame only video segments for DASH trick play.", + "type": "api-change" + }, + { + "category": "``secretsmanager``", + "description": "Doc only update for Secrets Manager", + "type": "api-change" + }, + { + "category": "``waf``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.129.json b/.changes/1.34.129.json new file mode 100644 index 0000000000..a2c2ddc2d8 --- /dev/null +++ b/.changes/1.34.129.json @@ -0,0 +1,52 @@ +[ + { + "category": "``bedrock-runtime``", + "description": "This release adds support for using Guardrails with the Converse and ConverseStream APIs.", + "type": "api-change" + }, + { + "category": "``cloudtrail``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``config``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``eks``", + "description": "This release adds support to surface async fargate customer errors from async path to customer through describe-fargate-profile API response.", + "type": "api-change" + }, + { + "category": "``lightsail``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``polly``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``rekognition``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Launched a new feature in SageMaker to provide managed MLflow Tracking Servers for customers to track ML experiments. This release also adds a new capability of attaching additional storage to SageMaker HyperPod cluster instances.", + "type": "api-change" + }, + { + "category": "``shield``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``snowball``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.130.json b/.changes/1.34.130.json new file mode 100644 index 0000000000..4ca212f92a --- /dev/null +++ b/.changes/1.34.130.json @@ -0,0 +1,32 @@ +[ + { + "category": "``artifact``", + "description": "This release adds an acceptanceType field to the ReportSummary structure (used in the ListReports API response).", + "type": "api-change" + }, + { + "category": "``athena``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``cur``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``directconnect``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``elastictranscoder``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "This release enables customers to use JSON Web Tokens (JWT) for authentication on their Amazon OpenSearch Service domains.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.131.json b/.changes/1.34.131.json new file mode 100644 index 0000000000..bb7c0df1ae --- /dev/null +++ b/.changes/1.34.131.json @@ -0,0 +1,47 @@ +[ + { + "category": "``bedrock-runtime``", + "description": "This release adds document support to Converse and ConverseStream APIs", + "type": "api-change" + }, + { + "category": "``codeartifact``", + "description": "Add support for the Cargo package format.", + "type": "api-change" + }, + { + "category": "``compute-optimizer``", + "description": "This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for Amazon RDS MySQL and RDS PostgreSQL.", + "type": "api-change" + }, + { + "category": "``cost-optimization-hub``", + "description": "This release enables AWS Cost Optimization Hub to show cost optimization recommendations for Amazon RDS MySQL and RDS PostgreSQL.", + "type": "api-change" + }, + { + "category": "``dynamodb``", + "description": "Doc-only update for DynamoDB. Fixed Important note in 6 Global table APIs - CreateGlobalTable, DescribeGlobalTable, DescribeGlobalTableSettings, ListGlobalTables, UpdateGlobalTable, and UpdateGlobalTableSettings.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Fix Glue paginators for Jobs, JobRuns, Triggers, Blueprints and Workflows.", + "type": "api-change" + }, + { + "category": "``ivs-realtime``", + "description": "IVS Real-Time now offers customers the ability to record individual stage participants to S3.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Adds support for model references in Hub service, and adds support for cross-account access of Hubs", + "type": "api-change" + }, + { + "category": "``securityhub``", + "description": "Documentation updates for Security Hub", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.132.json b/.changes/1.34.132.json new file mode 100644 index 0000000000..13608c1230 --- /dev/null +++ b/.changes/1.34.132.json @@ -0,0 +1,32 @@ +[ + { + "category": "``bedrock-runtime``", + "description": "Increases Converse API's document name length", + "type": "api-change" + }, + { + "category": "``customer-profiles``", + "description": "This release includes changes to ProfileObjectType APIs, adds functionality top set and get capacity for profile object types.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Fix EC2 multi-protocol info in models.", + "type": "api-change" + }, + { + "category": "``qbusiness``", + "description": "Allow enable/disable Q Apps when creating/updating a Q application; Return the Q Apps enablement information when getting a Q application.", + "type": "api-change" + }, + { + "category": "``ssm``", + "description": "Add sensitive trait to SSM IPAddress property for CloudTrail redaction", + "type": "api-change" + }, + { + "category": "``workspaces-web``", + "description": "Added ability to enable DeepLinking functionality on a Portal via UserSettings as well as added support for IdentityProvider resource tagging.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.133.json b/.changes/1.34.133.json new file mode 100644 index 0000000000..dc596151df --- /dev/null +++ b/.changes/1.34.133.json @@ -0,0 +1,22 @@ +[ + { + "category": "``autoscaling``", + "description": "Doc only update for Auto Scaling's TargetTrackingMetricDataQuery", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "This release is for the launch of the new u7ib-12tb.224xlarge, R8g, c7gn.metal and mac2-m1ultra.metal instance types", + "type": "api-change" + }, + { + "category": "``networkmanager``", + "description": "This is model changes & documentation update for the Asynchronous Error Reporting feature for AWS Cloud WAN. This feature allows customers to view errors that occur while their resources are being provisioned, enabling customers to fix their resources without needing external support.", + "type": "api-change" + }, + { + "category": "``workspaces-thin-client``", + "description": "This release adds the deviceCreationTags field to CreateEnvironment API input, UpdateEnvironment API input and GetEnvironment API output.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.134.json b/.changes/1.34.134.json new file mode 100644 index 0000000000..ddc52d381f --- /dev/null +++ b/.changes/1.34.134.json @@ -0,0 +1,27 @@ +[ + { + "category": "``controltower``", + "description": "Added ListLandingZoneOperations API.", + "type": "api-change" + }, + { + "category": "``eks``", + "description": "Added support for disabling unmanaged addons during cluster creation.", + "type": "api-change" + }, + { + "category": "``ivs-realtime``", + "description": "IVS Real-Time now offers customers the ability to upload public keys for customer vended participant tokens.", + "type": "api-change" + }, + { + "category": "``kinesisanalyticsv2``", + "description": "This release adds support for new ListApplicationOperations and DescribeApplicationOperation APIs. It adds a new configuration to enable system rollbacks, adds field ApplicationVersionCreateTimestamp for clarity and improves support for pagination for APIs.", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "This release adds support for enabling or disabling Natural Language Query Processing feature for Amazon OpenSearch Service domains, and provides visibility into the current state of the setup or tear-down.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.135.json b/.changes/1.34.135.json new file mode 100644 index 0000000000..a2071cef60 --- /dev/null +++ b/.changes/1.34.135.json @@ -0,0 +1,57 @@ +[ + { + "category": "``application-autoscaling``", + "description": "Amazon WorkSpaces customers can now use Application Auto Scaling to automatically scale the number of virtual desktops in a WorkSpaces pool.", + "type": "api-change" + }, + { + "category": "``chime-sdk-media-pipelines``", + "description": "Added Amazon Transcribe multi language identification to Chime SDK call analytics. Enabling customers sending single stream audio to generate call recordings using Chime SDK call analytics", + "type": "api-change" + }, + { + "category": "``cloudfront``", + "description": "Doc only update for CloudFront that fixes customer-reported issue", + "type": "api-change" + }, + { + "category": "``datazone``", + "description": "This release supports the data lineage feature of business data catalog in Amazon DataZone.", + "type": "api-change" + }, + { + "category": "``elasticache``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``mq``", + "description": "This release makes the EngineVersion field optional for both broker and configuration and uses the latest available version by default. The AutoMinorVersionUpgrade field is also now optional for broker creation and defaults to 'true'.", + "type": "api-change" + }, + { + "category": "``qconnect``", + "description": "Adds CreateContentAssociation, ListContentAssociations, GetContentAssociation, and DeleteContentAssociation APIs.", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "Adding support for Repeating Sections, Nested Filters", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Updates Amazon RDS documentation for TAZ export to S3.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Add capability for Admins to customize Studio experience for the user by showing or hiding Apps and MLTools.", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "Added support for WorkSpaces Pools.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.136.json b/.changes/1.34.136.json new file mode 100644 index 0000000000..b1e0c5319a --- /dev/null +++ b/.changes/1.34.136.json @@ -0,0 +1,47 @@ +[ + { + "category": "``acm-pca``", + "description": "Added CCPC_LEVEL_1_OR_HIGHER KeyStorageSecurityStandard and SM2 KeyAlgorithm and SM3WITHSM2 SigningAlgorithm for China regions.", + "type": "api-change" + }, + { + "category": "``cloudhsmv2``", + "description": "Added 3 new APIs to support backup sharing: GetResourcePolicy, PutResourcePolicy, and DeleteResourcePolicy. Added BackupArn to the output of the DescribeBackups API. Added support for BackupArn in the CreateCluster API.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "This release supports showing PreferredAgentRouting step via DescribeContact API.", + "type": "api-change" + }, + { + "category": "``emr``", + "description": "This release provides the support for new allocation strategies i.e. CAPACITY_OPTIMIZED_PRIORITIZED for Spot and PRIORITIZED for On-Demand by taking input of priority value for each instance type for instance fleet clusters.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Added AttributesToGet parameter to Glue GetDatabases, allowing caller to limit output to include only the database name.", + "type": "api-change" + }, + { + "category": "``kinesisanalyticsv2``", + "description": "Support for Flink 1.19 in Managed Service for Apache Flink", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "This release removes support for enabling or disabling Natural Language Query Processing feature for Amazon OpenSearch Service domains.", + "type": "api-change" + }, + { + "category": "``pi``", + "description": "Noting that the filter db.sql.db_id isn't available for RDS for SQL Server DB instances.", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "Added support for Red Hat Enterprise Linux 8 on Amazon WorkSpaces Personal.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.137.json b/.changes/1.34.137.json new file mode 100644 index 0000000000..f2d23906e7 --- /dev/null +++ b/.changes/1.34.137.json @@ -0,0 +1,52 @@ +[ + { + "category": "``apigateway``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``cognito-identity``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Authentication profiles are Amazon Connect resources (in gated preview) that allow you to configure authentication settings for users in your contact center. This release adds support for new ListAuthenticationProfiles, DescribeAuthenticationProfile and UpdateAuthenticationProfile APIs.", + "type": "api-change" + }, + { + "category": "``docdb``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``eks``", + "description": "Updates EKS managed node groups to support EC2 Capacity Blocks for ML", + "type": "api-change" + }, + { + "category": "``payment-cryptography``", + "description": "Added further restrictions on logging of potentially sensitive inputs and outputs.", + "type": "api-change" + }, + { + "category": "``payment-cryptography-data``", + "description": "Adding support for dynamic keys for encrypt, decrypt, re-encrypt and translate pin functions. With this change, customers can use one-time TR-31 keys directly in dataplane operations without the need to first import them into the service.", + "type": "api-change" + }, + { + "category": "``stepfunctions``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``swf``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``wafv2``", + "description": "JSON body inspection: Update documentation to clarify that JSON parsing doesn't include full validation.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.138.json b/.changes/1.34.138.json new file mode 100644 index 0000000000..2d5a97ed61 --- /dev/null +++ b/.changes/1.34.138.json @@ -0,0 +1,17 @@ +[ + { + "category": "``ec2``", + "description": "Documentation updates for Elastic Compute Cloud (EC2).", + "type": "api-change" + }, + { + "category": "``fms``", + "description": "Increases Customer API's ManagedServiceData length", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "Added response overrides to Head Object requests.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.139.json b/.changes/1.34.139.json new file mode 100644 index 0000000000..1f9ab9a47f --- /dev/null +++ b/.changes/1.34.139.json @@ -0,0 +1,27 @@ +[ + { + "category": "``application-autoscaling``", + "description": "Doc only update for Application Auto Scaling that fixes resource name.", + "type": "api-change" + }, + { + "category": "``directconnect``", + "description": "This update includes documentation for support of new native 400 GBps ports for Direct Connect.", + "type": "api-change" + }, + { + "category": "``organizations``", + "description": "Added a new reason under ConstraintViolationException in RegisterDelegatedAdministrator API to prevent registering suspended accounts as delegated administrator of a service.", + "type": "api-change" + }, + { + "category": "``rekognition``", + "description": "This release adds support for tagging projects and datasets with the CreateProject and CreateDataset APIs.", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "Fix create workspace bundle RootStorage/UserStorage to accept non null values", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.140.json b/.changes/1.34.140.json new file mode 100644 index 0000000000..05d5e01a91 --- /dev/null +++ b/.changes/1.34.140.json @@ -0,0 +1,22 @@ +[ + { + "category": "``acm``", + "description": "Documentation updates, including fixes for xml formatting, broken links, and ListCertificates description.", + "type": "api-change" + }, + { + "category": "``ecr``", + "description": "This release for Amazon ECR makes change to bring the SDK into sync with the API.", + "type": "api-change" + }, + { + "category": "``payment-cryptography-data``", + "description": "Added further restrictions on logging of potentially sensitive inputs and outputs.", + "type": "api-change" + }, + { + "category": "``qbusiness``", + "description": "Add personalization to Q Applications. Customers can enable or disable personalization when creating or updating a Q application with the personalization configuration.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.141.json b/.changes/1.34.141.json new file mode 100644 index 0000000000..385d826979 --- /dev/null +++ b/.changes/1.34.141.json @@ -0,0 +1,52 @@ +[ + { + "category": "``codedeploy``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``devicefarm``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``dms``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``elasticbeanstalk``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``es``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``firehose``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``gamelift``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``qapps``", + "description": "This is a general availability (GA) release of Amazon Q Apps, a capability of Amazon Q Business. Q Apps leverages data sources your company has provided to enable users to build, share, and customize apps within your organization.", + "type": "api-change" + }, + { + "category": "``route53resolver``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``ses``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.142.json b/.changes/1.34.142.json new file mode 100644 index 0000000000..35ab99cbfa --- /dev/null +++ b/.changes/1.34.142.json @@ -0,0 +1,22 @@ +[ + { + "category": "``datazone``", + "description": "This release deprecates dataProductItem field from SearchInventoryResultItem, along with some unused DataProduct shapes", + "type": "api-change" + }, + { + "category": "``fsx``", + "description": "Adds support for FSx for NetApp ONTAP 2nd Generation file systems, and FSx for OpenZFS Single AZ HA file systems.", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "This release adds support for enabling or disabling Natural Language Query Processing feature for Amazon OpenSearch Service domains, and provides visibility into the current state of the setup or tear-down.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "This release 1/ enables optimization jobs that allows customers to perform Ahead-of-time compilation and quantization. 2/ allows customers to control access to Amazon Q integration in SageMaker Studio. 3/ enables AdditionalModelDataSources for CreateModel action.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.143.json b/.changes/1.34.143.json new file mode 100644 index 0000000000..432c34ff85 --- /dev/null +++ b/.changes/1.34.143.json @@ -0,0 +1,52 @@ +[ + { + "category": "``batch``", + "description": "This feature allows AWS Batch Jobs with EKS container orchestration type to be run as Multi-Node Parallel Jobs.", + "type": "api-change" + }, + { + "category": "``bedrock``", + "description": "Add support for contextual grounding check for Guardrails for Amazon Bedrock.", + "type": "api-change" + }, + { + "category": "``bedrock-agent``", + "description": "Introduces new data sources and chunking strategies for Knowledge bases, advanced parsing logic using FMs, session summary generation, and code interpretation (preview) for Claude V3 Sonnet and Haiku models. Also introduces Prompt Flows (preview) to link prompts, foundational models, and resources.", + "type": "api-change" + }, + { + "category": "``bedrock-agent-runtime``", + "description": "Introduces query decomposition, enhanced Agents integration with Knowledge bases, session summary generation, and code interpretation (preview) for Claude V3 Sonnet and Haiku models. Also introduces Prompt Flows (preview) to link prompts, foundational models, and resources for end-to-end solutions.", + "type": "api-change" + }, + { + "category": "``bedrock-runtime``", + "description": "Add support for contextual grounding check and ApplyGuardrail API for Guardrails for Amazon Bedrock.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Add parameters to enable provisioning IPAM BYOIPv4 space at a Local Zone Network Border Group level", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Add recipe step support for recipe node", + "type": "api-change" + }, + { + "category": "``groundstation``", + "description": "Documentation update specifying OEM ephemeris units of measurement", + "type": "api-change" + }, + { + "category": "``license-manager-linux-subscriptions``", + "description": "Add support for third party subscription providers, starting with RHEL subscriptions through Red Hat Subscription Manager (RHSM). Additionally, add support for tagging subscription provider resources, and detect when an instance has more than one Linux subscription and notify the customer.", + "type": "api-change" + }, + { + "category": "``mediaconnect``", + "description": "AWS Elemental MediaConnect introduces the ability to disable outputs. Disabling an output allows you to keep the output attached to the flow, but stop streaming to the output destination. A disabled output does not incur data transfer costs.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.144.json b/.changes/1.34.144.json new file mode 100644 index 0000000000..f0fc3b2a4c --- /dev/null +++ b/.changes/1.34.144.json @@ -0,0 +1,27 @@ +[ + { + "category": "``acm-pca``", + "description": "Minor refactoring of C2J model for AWS Private CA", + "type": "api-change" + }, + { + "category": "``arc-zonal-shift``", + "description": "Adds the option to subscribe to get notifications when a zonal autoshift occurs in a region.", + "type": "api-change" + }, + { + "category": "``globalaccelerator``", + "description": "This feature adds exceptions to the Customer API to avoid throwing Internal Service errors", + "type": "api-change" + }, + { + "category": "``pinpoint``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "Vega ally control options and Support for Reviewed Answers in Topics", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.145.json b/.changes/1.34.145.json new file mode 100644 index 0000000000..5b8643f0e1 --- /dev/null +++ b/.changes/1.34.145.json @@ -0,0 +1,67 @@ +[ + { + "category": "``acm-pca``", + "description": "Fix broken waiters for the acm-pca client. Waiters broke in version 1.13.144 of the Boto3 SDK.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Amazon Connect expands search API coverage for additional resources. Search for hierarchy groups by name, ID, tag, or other criteria (new endpoint). Search for agent statuses by name, ID, tag, or other criteria (new endpoint). Search for users by their assigned proficiencies (enhanced endpoint)", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Amazon VPC IP Address Manager (IPAM) now supports Bring-Your-Own-IP (BYOIP) for IP addresses registered with any Internet Registry. This feature uses DNS TXT records to validate ownership of a public IP address range.", + "type": "api-change" + }, + { + "category": "``firehose``", + "description": "This release 1) Add configurable buffering hints for Snowflake as destination. 2) Add ReadFromTimestamp for MSK As Source. Firehose will start reading data from MSK Cluster using offset associated with this timestamp. 3) Gated public beta release to add Apache Iceberg tables as destination.", + "type": "api-change" + }, + { + "category": "``ivschat``", + "description": "Documentation update for IVS Chat API Reference.", + "type": "api-change" + }, + { + "category": "``medialive``", + "description": "AWS Elemental MediaLive now supports the SRT protocol via the new SRT Caller input type.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Updates Amazon RDS documentation to specify an eventual consistency model for DescribePendingMaintenanceActions.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "SageMaker Training supports R5, T3 and R5D instances family. And SageMaker Processing supports G5 and R5D instances family.", + "type": "api-change" + }, + { + "category": "``secretsmanager``", + "description": "Doc only update for Secrets Manager", + "type": "api-change" + }, + { + "category": "``taxsettings``", + "description": "Set default endpoint for aws partition. Requests from all regions in aws partition will be forward to us-east-1 endpoint.", + "type": "api-change" + }, + { + "category": "``timestream-query``", + "description": "Doc-only update for TimestreamQuery. Added guidance about the accepted valid value for the QueryPricingModel parameter.", + "type": "api-change" + }, + { + "category": "``workspaces-thin-client``", + "description": "Documentation update for WorkSpaces Thin Client.", + "type": "api-change" + }, + { + "category": "Waiter", + "description": "Update waiters to handle expected boolean values when matching errors (`boto/botocore#3220 `__)", + "type": "bugfix" + } +] \ No newline at end of file diff --git a/.changes/1.34.146.json b/.changes/1.34.146.json new file mode 100644 index 0000000000..6cc88fc453 --- /dev/null +++ b/.changes/1.34.146.json @@ -0,0 +1,27 @@ +[ + { + "category": "``datazone``", + "description": "This release adds 1/ support of register S3 locations of assets in AWS Lake Formation hybrid access mode for DefaultDataLake blueprint. 2/ support of CRUD operations for Asset Filters.", + "type": "api-change" + }, + { + "category": "``ivs``", + "description": "Documentation update for IVS Low Latency API Reference.", + "type": "api-change" + }, + { + "category": "``mobile``", + "description": "The mobile client has been removed following the deprecation of the service.", + "type": "api-change" + }, + { + "category": "``neptune-graph``", + "description": "Amazon Neptune Analytics provides new options for customers to start with smaller graphs at a lower cost. CreateGraph, CreaateGraphImportTask, UpdateGraph and StartImportTask APIs will now allow 32 and 64 for `provisioned-memory`", + "type": "api-change" + }, + { + "category": "``redshift-serverless``", + "description": "Adds dualstack support for Redshift Serverless workgroup.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.147.json b/.changes/1.34.147.json new file mode 100644 index 0000000000..e3a1fa4d4e --- /dev/null +++ b/.changes/1.34.147.json @@ -0,0 +1,37 @@ +[ + { + "category": "``appsync``", + "description": "Adding support for paginators in AppSync list APIs", + "type": "api-change" + }, + { + "category": "``cleanrooms``", + "description": "This release adds AWS Entity Resolution integration to associate ID namespaces & ID mapping workflow resources as part of ID namespace association and ID mapping table in AWS Clean Rooms. It also introduces a new ID_MAPPING_TABLE analysis rule to manage the protection on ID mapping table.", + "type": "api-change" + }, + { + "category": "``cleanroomsml``", + "description": "Adds SQL query as the source of seed audience for audience generation job.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Added PostContactSummary segment type on ListRealTimeContactAnalysisSegmentsV2 API", + "type": "api-change" + }, + { + "category": "``connect-contact-lens``", + "description": "Added PostContactSummary segment type on ListRealTimeContactAnalysisSegments API", + "type": "api-change" + }, + { + "category": "``datazone``", + "description": "This release removes the deprecated dataProductItem field from Search API output.", + "type": "api-change" + }, + { + "category": "``entityresolution``", + "description": "Support First Party ID Mapping", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.148.json b/.changes/1.34.148.json new file mode 100644 index 0000000000..b21c0f1964 --- /dev/null +++ b/.changes/1.34.148.json @@ -0,0 +1,32 @@ +[ + { + "category": "``cleanrooms``", + "description": "Three enhancements to the AWS Clean Rooms: Disallowed Output Columns, Flexible Result Receivers, SQL as a Seed", + "type": "api-change" + }, + { + "category": "``dynamodb``", + "description": "DynamoDB doc only update for July", + "type": "api-change" + }, + { + "category": "``iotsitewise``", + "description": "Adds support for creating SiteWise Edge gateways that run on a Siemens Industrial Edge Device.", + "type": "api-change" + }, + { + "category": "``mediapackagev2``", + "description": "This release adds support for Irdeto DRM encryption in DASH manifests.", + "type": "api-change" + }, + { + "category": "``medical-imaging``", + "description": "CopyImageSet API adds copying selected instances between image sets, and overriding inconsistent metadata with a force parameter. UpdateImageSetMetadata API enables reverting to prior versions; updates to Study, Series, and SOP Instance UIDs; and updates to private elements, with a force parameter.", + "type": "api-change" + }, + { + "category": "``pinpoint-sms-voice-v2``", + "description": "Update for rebrand to AWS End User Messaging SMS and Voice.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.149.json b/.changes/1.34.149.json new file mode 100644 index 0000000000..fc8c3beefa --- /dev/null +++ b/.changes/1.34.149.json @@ -0,0 +1,62 @@ +[ + { + "category": "``application-autoscaling``", + "description": "Application Auto Scaling is now more responsive to the changes in demand of your SageMaker Inference endpoints. To get started, create or update a Target Tracking policy based on High Resolution CloudWatch metrics.", + "type": "api-change" + }, + { + "category": "``application-signals``", + "description": "CloudWatch Application Signals now supports application logs correlation with traces and operational health metrics of applications running on EC2 instances. Users can view the most relevant telemetry to troubleshoot application health anomalies such as spikes in latency, errors, and availability.", + "type": "api-change" + }, + { + "category": "``bedrock-runtime``", + "description": "Provides ServiceUnavailableException error message", + "type": "api-change" + }, + { + "category": "``codecommit``", + "description": "CreateRepository API now throws OperationNotAllowedException when the account has been restricted from creating a repository.", + "type": "api-change" + }, + { + "category": "``datazone``", + "description": "Introduces GetEnvironmentCredentials operation to SDK", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "EC2 Fleet now supports using custom identifiers to reference Amazon Machine Images (AMI) in launch requests that are configured to choose from a diversified list of instance types.", + "type": "api-change" + }, + { + "category": "``ecr``", + "description": "API and documentation updates for Amazon ECR, adding support for creating, updating, describing and deleting ECR Repository Creation Template.", + "type": "api-change" + }, + { + "category": "``eks``", + "description": "This release adds support for EKS cluster to manage extended support.", + "type": "api-change" + }, + { + "category": "``elbv2``", + "description": "This release adds support for sharing trust stores across accounts and organizations through integration with AWS Resource Access Manager.", + "type": "api-change" + }, + { + "category": "``network-firewall``", + "description": "You can now log events that are related to TLS inspection, in addition to the existing alert and flow logging.", + "type": "api-change" + }, + { + "category": "``outposts``", + "description": "Adding default vCPU information to GetOutpostSupportedInstanceTypes and GetOutpostInstanceTypes responses", + "type": "api-change" + }, + { + "category": "``stepfunctions``", + "description": "This release adds support to customer managed KMS key encryption in AWS Step Functions.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.150.json b/.changes/1.34.150.json new file mode 100644 index 0000000000..a108a41228 --- /dev/null +++ b/.changes/1.34.150.json @@ -0,0 +1,12 @@ +[ + { + "category": "``elasticache``", + "description": "Renaming full service name as it appears in developer documentation.", + "type": "api-change" + }, + { + "category": "``memorydb``", + "description": "Renaming full service name as it appears in developer documentation.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.151.json b/.changes/1.34.151.json new file mode 100644 index 0000000000..a6b685f80c --- /dev/null +++ b/.changes/1.34.151.json @@ -0,0 +1,57 @@ +[ + { + "category": "``appstream``", + "description": "Added support for Red Hat Enterprise Linux 8 on Amazon AppStream 2.0", + "type": "api-change" + }, + { + "category": "``autoscaling``", + "description": "Increase the length limit for VPCZoneIdentifier from 2047 to 5000", + "type": "api-change" + }, + { + "category": "``codepipeline``", + "description": "AWS CodePipeline V2 type pipelines now support stage level conditions to enable development teams to safely release changes that meet quality and compliance requirements.", + "type": "api-change" + }, + { + "category": "``elasticache``", + "description": "Doc only update for changes to deletion API.", + "type": "api-change" + }, + { + "category": "``elb``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``events``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``lexv2-models``", + "description": "This release adds new capabilities to the AMAZON.QnAIntent: Custom prompting, Guardrails integration and ExactResponse support for Bedrock Knowledge Base.", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``rolesanywhere``", + "description": "IAM RolesAnywhere now supports custom role session name on the CreateSession. This release adds the acceptRoleSessionName option to a profile to control whether a role session name will be accepted in a session request with a given profile.", + "type": "api-change" + }, + { + "category": "``tnb``", + "description": "This release adds Network Service Update, through which customers will be able to update their instantiated networks to a new network package. See the documentation for limitations. The release also enhances the Get network operation API to return parameter overrides used during the operation.", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "Removing multi-session as it isn't supported for pools", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.152.json b/.changes/1.34.152.json new file mode 100644 index 0000000000..5b8075bf57 --- /dev/null +++ b/.changes/1.34.152.json @@ -0,0 +1,47 @@ +[ + { + "category": "``bedrock``", + "description": "API and Documentation for Bedrock Model Copy feature. This feature lets you share and copy a custom model from one region to another or one account to another.", + "type": "api-change" + }, + { + "category": "``controlcatalog``", + "description": "AWS Control Tower provides two new public APIs controlcatalog:ListControls and controlcatalog:GetControl under controlcatalog service namespace, which enable customers to programmatically retrieve control metadata of available controls.", + "type": "api-change" + }, + { + "category": "``controltower``", + "description": "Updated Control Tower service documentation for controlcatalog control ARN support with existing Control Tower public APIs", + "type": "api-change" + }, + { + "category": "``iam``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``memorydb``", + "description": "Doc only update for changes to deletion API.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "This release adds support for specifying optional MinACU parameter in CreateDBShardGroup and ModifyDBShardGroup API. DBShardGroup response will contain MinACU if specified.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "This release adds support for Amazon EMR Serverless applications in SageMaker Studio for running data processing jobs.", + "type": "api-change" + }, + { + "category": "``ssm-quicksetup``", + "description": "This release adds API support for the QuickSetup feature of AWS Systems Manager", + "type": "api-change" + }, + { + "category": "``support``", + "description": "Doc only updates to CaseDetails", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.153.json b/.changes/1.34.153.json new file mode 100644 index 0000000000..232e845ab0 --- /dev/null +++ b/.changes/1.34.153.json @@ -0,0 +1,27 @@ +[ + { + "category": "``cloudwatch``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``kinesis``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``resiliencehub``", + "description": "Customers are presented with the grouping recommendations and can determine if the recommendations are accurate and apply to their case. This feature simplifies onboarding by organizing resources into appropriate AppComponents.", + "type": "api-change" + }, + { + "category": "``route53``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``waf-regional``", + "description": "Add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.154.json b/.changes/1.34.154.json new file mode 100644 index 0000000000..e639f03e1e --- /dev/null +++ b/.changes/1.34.154.json @@ -0,0 +1,22 @@ +[ + { + "category": "``datazone``", + "description": "This releases Data Product feature. Data Products allow grouping data assets into cohesive, self-contained units for ease of publishing for data producers, and ease of finding and accessing for data consumers.", + "type": "api-change" + }, + { + "category": "``ecr``", + "description": "Released two new APIs along with documentation updates. The GetAccountSetting API is used to view the current basic scan type version setting for your registry, while the PutAccountSetting API is used to update the basic scan type version for your registry.", + "type": "api-change" + }, + { + "category": "``kinesis-video-webrtc-storage``", + "description": "Add JoinStorageSessionAsViewer API", + "type": "api-change" + }, + { + "category": "``pi``", + "description": "Added a description for the Dimension db.sql.tokenized_id on the DimensionGroup data type page.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.155.json b/.changes/1.34.155.json new file mode 100644 index 0000000000..4d8aac0997 --- /dev/null +++ b/.changes/1.34.155.json @@ -0,0 +1,27 @@ +[ + { + "category": "``bedrock-agent-runtime``", + "description": "Introduce model invocation output traces for orchestration traces, which contain the model's raw response and usage.", + "type": "api-change" + }, + { + "category": "``cognito-idp``", + "description": "Advanced security feature updates to include password history and log export for Cognito user pools.", + "type": "api-change" + }, + { + "category": "``cost-optimization-hub``", + "description": "This release adds savings percentage support to the ListRecommendationSummaries API.", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "Added support for BYOL_GRAPHICS_G4DN_WSP IngestionProcess", + "type": "api-change" + }, + { + "category": "AWSCRT", + "description": "Update awscrt version to 0.21.2", + "type": "enhancement" + } +] \ No newline at end of file diff --git a/.changes/1.34.156.json b/.changes/1.34.156.json new file mode 100644 index 0000000000..a9ca9e241a --- /dev/null +++ b/.changes/1.34.156.json @@ -0,0 +1,12 @@ +[ + { + "category": "``appintegrations``", + "description": "Updated CreateDataIntegration and CreateDataIntegrationAssociation API to support bulk data export from Amazon Connect Customer Profiles to the customer S3 bucket.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Introducing AWS Glue Data Quality anomaly detection, a new functionality that uses ML-based solutions to detect data anomalies users have not explicitly defined rules for.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.157.json b/.changes/1.34.157.json new file mode 100644 index 0000000000..a6cc4b5e12 --- /dev/null +++ b/.changes/1.34.157.json @@ -0,0 +1,22 @@ +[ + { + "category": "``cognito-idp``", + "description": "Added support for threat protection for custom authentication in Amazon Cognito user pools.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "This release fixes a regression in number of access control tags that are allowed to be added to a security profile in Amazon Connect. You can now add up to four access control tags on a single security profile.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Launch of private IPv6 addressing for VPCs and Subnets. VPC IPAM supports the planning and monitoring of private IPv6 usage.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "This release adds support to retrieve the validation status when creating or updating Glue Data Catalog Views. Also added is support for BasicCatalogTarget partition keys.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.158.json b/.changes/1.34.158.json new file mode 100644 index 0000000000..21456a4c20 --- /dev/null +++ b/.changes/1.34.158.json @@ -0,0 +1,17 @@ +[ + { + "category": "``cognito-idp``", + "description": "Fixed a description of AdvancedSecurityAdditionalFlows in Amazon Cognito user pool configuration.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "This release supports adding RoutingCriteria via UpdateContactRoutingData public API.", + "type": "api-change" + }, + { + "category": "``ssm``", + "description": "Systems Manager doc-only updates for August 2024.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.159.json b/.changes/1.34.159.json new file mode 100644 index 0000000000..6bcd6a839c --- /dev/null +++ b/.changes/1.34.159.json @@ -0,0 +1,37 @@ +[ + { + "category": "``compute-optimizer``", + "description": "Doc only update for Compute Optimizer that fixes several customer-reported issues relating to ECS finding classifications", + "type": "api-change" + }, + { + "category": "``config``", + "description": "Documentation update for the OrganizationConfigRuleName regex pattern.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "This release adds new capabilities to manage On-Demand Capacity Reservations including the ability to split your reservation, move capacity between reservations, and modify the instance eligibility of your reservation.", + "type": "api-change" + }, + { + "category": "``eks``", + "description": "Added support for new AL2023 GPU AMIs to the supported AMITypes.", + "type": "api-change" + }, + { + "category": "``groundstation``", + "description": "Updating documentation for OEMEphemeris to link to AWS Ground Station User Guide", + "type": "api-change" + }, + { + "category": "``medialive``", + "description": "AWS Elemental MediaLive now supports now supports editing the PID values for a Multiplex.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Releasing large data support as part of CreateAutoMLJobV2 in SageMaker Autopilot and CreateDomain API for SageMaker Canvas.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.160.json b/.changes/1.34.160.json new file mode 100644 index 0000000000..e9d58feca7 --- /dev/null +++ b/.changes/1.34.160.json @@ -0,0 +1,27 @@ +[ + { + "category": "``amplify``", + "description": "Add a new field \"cacheConfig\" that enables users to configure the CDN cache settings for an App", + "type": "api-change" + }, + { + "category": "``appstream``", + "description": "This release includes following new APIs: CreateThemeForStack, DescribeThemeForStack, UpdateThemeForStack, DeleteThemeForStack to support custom branding programmatically.", + "type": "api-change" + }, + { + "category": "``fis``", + "description": "This release adds support for additional error information on experiment failure. It adds the error code, location, and account id on relevant failures to the GetExperiment and ListExperiment API responses.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Add AttributesToGet parameter support for Glue GetTables", + "type": "api-change" + }, + { + "category": "``neptune-graph``", + "description": "Amazon Neptune Analytics provides a new option for customers to load data into a graph using the RDF (Resource Description Framework) NTRIPLES format. When loading NTRIPLES files, use the value `convertToIri` for the `blankNodeHandling` parameter.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.161.json b/.changes/1.34.161.json new file mode 100644 index 0000000000..1ecba0d08a --- /dev/null +++ b/.changes/1.34.161.json @@ -0,0 +1,7 @@ +[ + { + "category": "``codebuild``", + "description": "AWS CodeBuild now supports using Secrets Manager to store git credentials and using multiple source credentials in a single project.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.34.162.json b/.changes/1.34.162.json new file mode 100644 index 0000000000..03313bbe4a --- /dev/null +++ b/.changes/1.34.162.json @@ -0,0 +1,22 @@ +[ + { + "category": "``docdb``", + "description": "This release adds Global Cluster Failover capability which enables you to change your global cluster's primary AWS region, the region that serves writes, during a regional outage. Performing a failover action preserves your Global Cluster setup.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "This release introduces a new ContainerDefinition configuration to support the customer-managed keys for ECS container restart feature.", + "type": "api-change" + }, + { + "category": "``iam``", + "description": "Make the LastUsedDate field in the GetAccessKeyLastUsed response optional. This may break customers who only call the API for access keys with a valid LastUsedDate. This fixes a deserialization issue for access keys without a LastUsedDate, because the field was marked as required but could be null.", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "Amazon Simple Storage Service / Features : Adds support for pagination in the S3 ListBuckets API.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.35.0.json b/.changes/1.35.0.json new file mode 100644 index 0000000000..4e67543734 --- /dev/null +++ b/.changes/1.35.0.json @@ -0,0 +1,32 @@ +[ + { + "category": "``batch``", + "description": "Improvements of integration between AWS Batch and EC2.", + "type": "api-change" + }, + { + "category": "``inspector2``", + "description": "Update the correct format of key and values for resource tags", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "Amazon QuickSight launches Customer Managed Key (CMK) encryption for Data Source metadata", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Introduce Endpoint and EndpointConfig Arns in sagemaker:ListPipelineExecutionSteps API response", + "type": "api-change" + }, + { + "category": "``sesv2``", + "description": "Marking use case description field of account details as deprecated.", + "type": "api-change" + }, + { + "category": "signing", + "description": "Adds internal support for the new 'auth' trait to allow a priority list of auth types for a service or operation.", + "type": "feature" + } +] \ No newline at end of file diff --git a/.github/workflows/closed-issue-message.yml b/.github/workflows/closed-issue-message.yml index 3cc1f6390b..447d8c7e92 100644 --- a/.github/workflows/closed-issue-message.yml +++ b/.github/workflows/closed-issue-message.yml @@ -12,7 +12,7 @@ jobs: permissions: issues: write steps: - - uses: aws-actions/closed-issue-message@8b6324312193476beecf11f8e8539d73a3553bf4 + - uses: aws-actions/closed-issue-message@80edfc24bdf1283400eb04d20a8a605ae8bf7d48 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index b5c4218572..045208e47f 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -23,13 +23,13 @@ jobs: uses: "actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608" - name: "Run CodeQL init" - uses: "github/codeql-action/init@cdcdbb579706841c47f7063dda365e292e5cad7a" + uses: "github/codeql-action/init@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa" with: config-file: "./.github/codeql.yml" languages: "python" - name: "Run CodeQL autobuild" - uses: "github/codeql-action/autobuild@cdcdbb579706841c47f7063dda365e292e5cad7a" + uses: "github/codeql-action/autobuild@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa" - name: "Run CodeQL analyze" - uses: "github/codeql-action/analyze@cdcdbb579706841c47f7063dda365e292e5cad7a" + uses: "github/codeql-action/analyze@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa" diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 116b2399bb..8b6d251dfd 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -39,7 +39,7 @@ jobs: run: | python scripts/ci/run-tests --with-cov --with-xdist - name: Run codecov - uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed + uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673 with: directory: tests diff --git a/.github/workflows/stale_issue.yml b/.github/workflows/stale_issue.yml index a71e51c4bb..07acdbe065 100644 --- a/.github/workflows/stale_issue.yml +++ b/.github/workflows/stale_issue.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest name: Stale issue job steps: - - uses: aws-actions/stale-issue-cleanup@389be0117d7661840b887276b5da1cc6ddf95c8a + - uses: aws-actions/stale-issue-cleanup@2017b87caa8e25014362d434a980d60fa758f3af with: issue-types: issues stale-issue-message: Greetings! It looks like this issue hasn’t been diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7551b15cf1..e8947045ca 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,21 +19,13 @@ repos: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace - - repo: 'https://github.com/asottile/pyupgrade' - rev: v3.15.0 - hooks: - - id: pyupgrade - args: - - '--py38-plus' - repo: 'https://github.com/PyCQA/isort' rev: 5.12.0 hooks: - id: isort - - repo: 'https://github.com/psf/black' - rev: 23.11.0 - hooks: - - id: black - - repo: 'https://github.com/pycqa/flake8' - rev: 6.1.0 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.4.8 hooks: - - id: flake8 + - id: ruff + args: [ --fix ] + - id: ruff-format diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fd9f0ebae4..497b6a1fa3 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,645 @@ CHANGELOG ========= +1.35.0 +====== + +* api-change:``batch``: Improvements of integration between AWS Batch and EC2. +* api-change:``inspector2``: Update the correct format of key and values for resource tags +* api-change:``quicksight``: Amazon QuickSight launches Customer Managed Key (CMK) encryption for Data Source metadata +* api-change:``sagemaker``: Introduce Endpoint and EndpointConfig Arns in sagemaker:ListPipelineExecutionSteps API response +* api-change:``sesv2``: Marking use case description field of account details as deprecated. +* feature:signing: Adds internal support for the new 'auth' trait to allow a priority list of auth types for a service or operation. + + +1.34.162 +======== + +* api-change:``docdb``: This release adds Global Cluster Failover capability which enables you to change your global cluster's primary AWS region, the region that serves writes, during a regional outage. Performing a failover action preserves your Global Cluster setup. +* api-change:``ecs``: This release introduces a new ContainerDefinition configuration to support the customer-managed keys for ECS container restart feature. +* api-change:``iam``: Make the LastUsedDate field in the GetAccessKeyLastUsed response optional. This may break customers who only call the API for access keys with a valid LastUsedDate. This fixes a deserialization issue for access keys without a LastUsedDate, because the field was marked as required but could be null. +* api-change:``s3``: Amazon Simple Storage Service / Features : Adds support for pagination in the S3 ListBuckets API. + + +1.34.161 +======== + +* api-change:``codebuild``: AWS CodeBuild now supports using Secrets Manager to store git credentials and using multiple source credentials in a single project. + + +1.34.160 +======== + +* api-change:``amplify``: Add a new field "cacheConfig" that enables users to configure the CDN cache settings for an App +* api-change:``appstream``: This release includes following new APIs: CreateThemeForStack, DescribeThemeForStack, UpdateThemeForStack, DeleteThemeForStack to support custom branding programmatically. +* api-change:``fis``: This release adds support for additional error information on experiment failure. It adds the error code, location, and account id on relevant failures to the GetExperiment and ListExperiment API responses. +* api-change:``glue``: Add AttributesToGet parameter support for Glue GetTables +* api-change:``neptune-graph``: Amazon Neptune Analytics provides a new option for customers to load data into a graph using the RDF (Resource Description Framework) NTRIPLES format. When loading NTRIPLES files, use the value `convertToIri` for the `blankNodeHandling` parameter. + + +1.34.159 +======== + +* api-change:``compute-optimizer``: Doc only update for Compute Optimizer that fixes several customer-reported issues relating to ECS finding classifications +* api-change:``config``: Documentation update for the OrganizationConfigRuleName regex pattern. +* api-change:``ec2``: This release adds new capabilities to manage On-Demand Capacity Reservations including the ability to split your reservation, move capacity between reservations, and modify the instance eligibility of your reservation. +* api-change:``eks``: Added support for new AL2023 GPU AMIs to the supported AMITypes. +* api-change:``groundstation``: Updating documentation for OEMEphemeris to link to AWS Ground Station User Guide +* api-change:``medialive``: AWS Elemental MediaLive now supports now supports editing the PID values for a Multiplex. +* api-change:``sagemaker``: Releasing large data support as part of CreateAutoMLJobV2 in SageMaker Autopilot and CreateDomain API for SageMaker Canvas. + + +1.34.158 +======== + +* api-change:``cognito-idp``: Fixed a description of AdvancedSecurityAdditionalFlows in Amazon Cognito user pool configuration. +* api-change:``connect``: This release supports adding RoutingCriteria via UpdateContactRoutingData public API. +* api-change:``ssm``: Systems Manager doc-only updates for August 2024. + + +1.34.157 +======== + +* api-change:``cognito-idp``: Added support for threat protection for custom authentication in Amazon Cognito user pools. +* api-change:``connect``: This release fixes a regression in number of access control tags that are allowed to be added to a security profile in Amazon Connect. You can now add up to four access control tags on a single security profile. +* api-change:``ec2``: Launch of private IPv6 addressing for VPCs and Subnets. VPC IPAM supports the planning and monitoring of private IPv6 usage. +* api-change:``glue``: This release adds support to retrieve the validation status when creating or updating Glue Data Catalog Views. Also added is support for BasicCatalogTarget partition keys. + + +1.34.156 +======== + +* api-change:``appintegrations``: Updated CreateDataIntegration and CreateDataIntegrationAssociation API to support bulk data export from Amazon Connect Customer Profiles to the customer S3 bucket. +* api-change:``glue``: Introducing AWS Glue Data Quality anomaly detection, a new functionality that uses ML-based solutions to detect data anomalies users have not explicitly defined rules for. + + +1.34.155 +======== + +* api-change:``bedrock-agent-runtime``: Introduce model invocation output traces for orchestration traces, which contain the model's raw response and usage. +* api-change:``cognito-idp``: Advanced security feature updates to include password history and log export for Cognito user pools. +* api-change:``cost-optimization-hub``: This release adds savings percentage support to the ListRecommendationSummaries API. +* api-change:``workspaces``: Added support for BYOL_GRAPHICS_G4DN_WSP IngestionProcess +* enhancement:AWSCRT: Update awscrt version to 0.21.2 + + +1.34.154 +======== + +* api-change:``datazone``: This releases Data Product feature. Data Products allow grouping data assets into cohesive, self-contained units for ease of publishing for data producers, and ease of finding and accessing for data consumers. +* api-change:``ecr``: Released two new APIs along with documentation updates. The GetAccountSetting API is used to view the current basic scan type version setting for your registry, while the PutAccountSetting API is used to update the basic scan type version for your registry. +* api-change:``kinesis-video-webrtc-storage``: Add JoinStorageSessionAsViewer API +* api-change:``pi``: Added a description for the Dimension db.sql.tokenized_id on the DimensionGroup data type page. + + +1.34.153 +======== + +* api-change:``cloudwatch``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``kinesis``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``resiliencehub``: Customers are presented with the grouping recommendations and can determine if the recommendations are accurate and apply to their case. This feature simplifies onboarding by organizing resources into appropriate AppComponents. +* api-change:``route53``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``waf-regional``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. + + +1.34.152 +======== + +* api-change:``bedrock``: API and Documentation for Bedrock Model Copy feature. This feature lets you share and copy a custom model from one region to another or one account to another. +* api-change:``controlcatalog``: AWS Control Tower provides two new public APIs controlcatalog:ListControls and controlcatalog:GetControl under controlcatalog service namespace, which enable customers to programmatically retrieve control metadata of available controls. +* api-change:``controltower``: Updated Control Tower service documentation for controlcatalog control ARN support with existing Control Tower public APIs +* api-change:``iam``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``memorydb``: Doc only update for changes to deletion API. +* api-change:``rds``: This release adds support for specifying optional MinACU parameter in CreateDBShardGroup and ModifyDBShardGroup API. DBShardGroup response will contain MinACU if specified. +* api-change:``sagemaker``: This release adds support for Amazon EMR Serverless applications in SageMaker Studio for running data processing jobs. +* api-change:``ssm-quicksetup``: This release adds API support for the QuickSetup feature of AWS Systems Manager +* api-change:``support``: Doc only updates to CaseDetails + + +1.34.151 +======== + +* api-change:``appstream``: Added support for Red Hat Enterprise Linux 8 on Amazon AppStream 2.0 +* api-change:``autoscaling``: Increase the length limit for VPCZoneIdentifier from 2047 to 5000 +* api-change:``codepipeline``: AWS CodePipeline V2 type pipelines now support stage level conditions to enable development teams to safely release changes that meet quality and compliance requirements. +* api-change:``elasticache``: Doc only update for changes to deletion API. +* api-change:``elb``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``events``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``lexv2-models``: This release adds new capabilities to the AMAZON.QnAIntent: Custom prompting, Guardrails integration and ExactResponse support for Bedrock Knowledge Base. +* api-change:``logs``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``rolesanywhere``: IAM RolesAnywhere now supports custom role session name on the CreateSession. This release adds the acceptRoleSessionName option to a profile to control whether a role session name will be accepted in a session request with a given profile. +* api-change:``tnb``: This release adds Network Service Update, through which customers will be able to update their instantiated networks to a new network package. See the documentation for limitations. The release also enhances the Get network operation API to return parameter overrides used during the operation. +* api-change:``workspaces``: Removing multi-session as it isn't supported for pools + + +1.34.150 +======== + +* api-change:``elasticache``: Renaming full service name as it appears in developer documentation. +* api-change:``memorydb``: Renaming full service name as it appears in developer documentation. + + +1.34.149 +======== + +* api-change:``application-autoscaling``: Application Auto Scaling is now more responsive to the changes in demand of your SageMaker Inference endpoints. To get started, create or update a Target Tracking policy based on High Resolution CloudWatch metrics. +* api-change:``application-signals``: CloudWatch Application Signals now supports application logs correlation with traces and operational health metrics of applications running on EC2 instances. Users can view the most relevant telemetry to troubleshoot application health anomalies such as spikes in latency, errors, and availability. +* api-change:``bedrock-runtime``: Provides ServiceUnavailableException error message +* api-change:``codecommit``: CreateRepository API now throws OperationNotAllowedException when the account has been restricted from creating a repository. +* api-change:``datazone``: Introduces GetEnvironmentCredentials operation to SDK +* api-change:``ec2``: EC2 Fleet now supports using custom identifiers to reference Amazon Machine Images (AMI) in launch requests that are configured to choose from a diversified list of instance types. +* api-change:``ecr``: API and documentation updates for Amazon ECR, adding support for creating, updating, describing and deleting ECR Repository Creation Template. +* api-change:``eks``: This release adds support for EKS cluster to manage extended support. +* api-change:``elbv2``: This release adds support for sharing trust stores across accounts and organizations through integration with AWS Resource Access Manager. +* api-change:``network-firewall``: You can now log events that are related to TLS inspection, in addition to the existing alert and flow logging. +* api-change:``outposts``: Adding default vCPU information to GetOutpostSupportedInstanceTypes and GetOutpostInstanceTypes responses +* api-change:``stepfunctions``: This release adds support to customer managed KMS key encryption in AWS Step Functions. + + +1.34.148 +======== + +* api-change:``cleanrooms``: Three enhancements to the AWS Clean Rooms: Disallowed Output Columns, Flexible Result Receivers, SQL as a Seed +* api-change:``dynamodb``: DynamoDB doc only update for July +* api-change:``iotsitewise``: Adds support for creating SiteWise Edge gateways that run on a Siemens Industrial Edge Device. +* api-change:``mediapackagev2``: This release adds support for Irdeto DRM encryption in DASH manifests. +* api-change:``medical-imaging``: CopyImageSet API adds copying selected instances between image sets, and overriding inconsistent metadata with a force parameter. UpdateImageSetMetadata API enables reverting to prior versions; updates to Study, Series, and SOP Instance UIDs; and updates to private elements, with a force parameter. +* api-change:``pinpoint-sms-voice-v2``: Update for rebrand to AWS End User Messaging SMS and Voice. + + +1.34.147 +======== + +* api-change:``appsync``: Adding support for paginators in AppSync list APIs +* api-change:``cleanrooms``: This release adds AWS Entity Resolution integration to associate ID namespaces & ID mapping workflow resources as part of ID namespace association and ID mapping table in AWS Clean Rooms. It also introduces a new ID_MAPPING_TABLE analysis rule to manage the protection on ID mapping table. +* api-change:``cleanroomsml``: Adds SQL query as the source of seed audience for audience generation job. +* api-change:``connect``: Added PostContactSummary segment type on ListRealTimeContactAnalysisSegmentsV2 API +* api-change:``connect-contact-lens``: Added PostContactSummary segment type on ListRealTimeContactAnalysisSegments API +* api-change:``datazone``: This release removes the deprecated dataProductItem field from Search API output. +* api-change:``entityresolution``: Support First Party ID Mapping + + +1.34.146 +======== + +* api-change:``datazone``: This release adds 1/ support of register S3 locations of assets in AWS Lake Formation hybrid access mode for DefaultDataLake blueprint. 2/ support of CRUD operations for Asset Filters. +* api-change:``ivs``: Documentation update for IVS Low Latency API Reference. +* api-change:``mobile``: The mobile client has been removed following the deprecation of the service. +* api-change:``neptune-graph``: Amazon Neptune Analytics provides new options for customers to start with smaller graphs at a lower cost. CreateGraph, CreaateGraphImportTask, UpdateGraph and StartImportTask APIs will now allow 32 and 64 for `provisioned-memory` +* api-change:``redshift-serverless``: Adds dualstack support for Redshift Serverless workgroup. + + +1.34.145 +======== + +* api-change:``acm-pca``: Fix broken waiters for the acm-pca client. Waiters broke in version 1.13.144 of the Boto3 SDK. +* api-change:``connect``: Amazon Connect expands search API coverage for additional resources. Search for hierarchy groups by name, ID, tag, or other criteria (new endpoint). Search for agent statuses by name, ID, tag, or other criteria (new endpoint). Search for users by their assigned proficiencies (enhanced endpoint) +* api-change:``ec2``: Amazon VPC IP Address Manager (IPAM) now supports Bring-Your-Own-IP (BYOIP) for IP addresses registered with any Internet Registry. This feature uses DNS TXT records to validate ownership of a public IP address range. +* api-change:``firehose``: This release 1) Add configurable buffering hints for Snowflake as destination. 2) Add ReadFromTimestamp for MSK As Source. Firehose will start reading data from MSK Cluster using offset associated with this timestamp. 3) Gated public beta release to add Apache Iceberg tables as destination. +* api-change:``ivschat``: Documentation update for IVS Chat API Reference. +* api-change:``medialive``: AWS Elemental MediaLive now supports the SRT protocol via the new SRT Caller input type. +* api-change:``rds``: Updates Amazon RDS documentation to specify an eventual consistency model for DescribePendingMaintenanceActions. +* api-change:``sagemaker``: SageMaker Training supports R5, T3 and R5D instances family. And SageMaker Processing supports G5 and R5D instances family. +* api-change:``secretsmanager``: Doc only update for Secrets Manager +* api-change:``taxsettings``: Set default endpoint for aws partition. Requests from all regions in aws partition will be forward to us-east-1 endpoint. +* api-change:``timestream-query``: Doc-only update for TimestreamQuery. Added guidance about the accepted valid value for the QueryPricingModel parameter. +* api-change:``workspaces-thin-client``: Documentation update for WorkSpaces Thin Client. +* bugfix:Waiter: Update waiters to handle expected boolean values when matching errors (`boto/botocore#3220 `__) + + +1.34.144 +======== + +* api-change:``acm-pca``: Minor refactoring of C2J model for AWS Private CA +* api-change:``arc-zonal-shift``: Adds the option to subscribe to get notifications when a zonal autoshift occurs in a region. +* api-change:``globalaccelerator``: This feature adds exceptions to the Customer API to avoid throwing Internal Service errors +* api-change:``pinpoint``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``quicksight``: Vega ally control options and Support for Reviewed Answers in Topics + + +1.34.143 +======== + +* api-change:``batch``: This feature allows AWS Batch Jobs with EKS container orchestration type to be run as Multi-Node Parallel Jobs. +* api-change:``bedrock``: Add support for contextual grounding check for Guardrails for Amazon Bedrock. +* api-change:``bedrock-agent``: Introduces new data sources and chunking strategies for Knowledge bases, advanced parsing logic using FMs, session summary generation, and code interpretation (preview) for Claude V3 Sonnet and Haiku models. Also introduces Prompt Flows (preview) to link prompts, foundational models, and resources. +* api-change:``bedrock-agent-runtime``: Introduces query decomposition, enhanced Agents integration with Knowledge bases, session summary generation, and code interpretation (preview) for Claude V3 Sonnet and Haiku models. Also introduces Prompt Flows (preview) to link prompts, foundational models, and resources for end-to-end solutions. +* api-change:``bedrock-runtime``: Add support for contextual grounding check and ApplyGuardrail API for Guardrails for Amazon Bedrock. +* api-change:``ec2``: Add parameters to enable provisioning IPAM BYOIPv4 space at a Local Zone Network Border Group level +* api-change:``glue``: Add recipe step support for recipe node +* api-change:``groundstation``: Documentation update specifying OEM ephemeris units of measurement +* api-change:``license-manager-linux-subscriptions``: Add support for third party subscription providers, starting with RHEL subscriptions through Red Hat Subscription Manager (RHSM). Additionally, add support for tagging subscription provider resources, and detect when an instance has more than one Linux subscription and notify the customer. +* api-change:``mediaconnect``: AWS Elemental MediaConnect introduces the ability to disable outputs. Disabling an output allows you to keep the output attached to the flow, but stop streaming to the output destination. A disabled output does not incur data transfer costs. + + +1.34.142 +======== + +* api-change:``datazone``: This release deprecates dataProductItem field from SearchInventoryResultItem, along with some unused DataProduct shapes +* api-change:``fsx``: Adds support for FSx for NetApp ONTAP 2nd Generation file systems, and FSx for OpenZFS Single AZ HA file systems. +* api-change:``opensearch``: This release adds support for enabling or disabling Natural Language Query Processing feature for Amazon OpenSearch Service domains, and provides visibility into the current state of the setup or tear-down. +* api-change:``sagemaker``: This release 1/ enables optimization jobs that allows customers to perform Ahead-of-time compilation and quantization. 2/ allows customers to control access to Amazon Q integration in SageMaker Studio. 3/ enables AdditionalModelDataSources for CreateModel action. + + +1.34.141 +======== + +* api-change:``codedeploy``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``devicefarm``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``dms``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``elasticbeanstalk``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``es``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``firehose``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``gamelift``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``qapps``: This is a general availability (GA) release of Amazon Q Apps, a capability of Amazon Q Business. Q Apps leverages data sources your company has provided to enable users to build, share, and customize apps within your organization. +* api-change:``route53resolver``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``ses``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. + + +1.34.140 +======== + +* api-change:``acm``: Documentation updates, including fixes for xml formatting, broken links, and ListCertificates description. +* api-change:``ecr``: This release for Amazon ECR makes change to bring the SDK into sync with the API. +* api-change:``payment-cryptography-data``: Added further restrictions on logging of potentially sensitive inputs and outputs. +* api-change:``qbusiness``: Add personalization to Q Applications. Customers can enable or disable personalization when creating or updating a Q application with the personalization configuration. + + +1.34.139 +======== + +* api-change:``application-autoscaling``: Doc only update for Application Auto Scaling that fixes resource name. +* api-change:``directconnect``: This update includes documentation for support of new native 400 GBps ports for Direct Connect. +* api-change:``organizations``: Added a new reason under ConstraintViolationException in RegisterDelegatedAdministrator API to prevent registering suspended accounts as delegated administrator of a service. +* api-change:``rekognition``: This release adds support for tagging projects and datasets with the CreateProject and CreateDataset APIs. +* api-change:``workspaces``: Fix create workspace bundle RootStorage/UserStorage to accept non null values + + +1.34.138 +======== + +* api-change:``ec2``: Documentation updates for Elastic Compute Cloud (EC2). +* api-change:``fms``: Increases Customer API's ManagedServiceData length +* api-change:``s3``: Added response overrides to Head Object requests. + + +1.34.137 +======== + +* api-change:``apigateway``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``cognito-identity``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``connect``: Authentication profiles are Amazon Connect resources (in gated preview) that allow you to configure authentication settings for users in your contact center. This release adds support for new ListAuthenticationProfiles, DescribeAuthenticationProfile and UpdateAuthenticationProfile APIs. +* api-change:``docdb``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``eks``: Updates EKS managed node groups to support EC2 Capacity Blocks for ML +* api-change:``payment-cryptography``: Added further restrictions on logging of potentially sensitive inputs and outputs. +* api-change:``payment-cryptography-data``: Adding support for dynamic keys for encrypt, decrypt, re-encrypt and translate pin functions. With this change, customers can use one-time TR-31 keys directly in dataplane operations without the need to first import them into the service. +* api-change:``stepfunctions``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``swf``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``wafv2``: JSON body inspection: Update documentation to clarify that JSON parsing doesn't include full validation. + + +1.34.136 +======== + +* api-change:``acm-pca``: Added CCPC_LEVEL_1_OR_HIGHER KeyStorageSecurityStandard and SM2 KeyAlgorithm and SM3WITHSM2 SigningAlgorithm for China regions. +* api-change:``cloudhsmv2``: Added 3 new APIs to support backup sharing: GetResourcePolicy, PutResourcePolicy, and DeleteResourcePolicy. Added BackupArn to the output of the DescribeBackups API. Added support for BackupArn in the CreateCluster API. +* api-change:``connect``: This release supports showing PreferredAgentRouting step via DescribeContact API. +* api-change:``emr``: This release provides the support for new allocation strategies i.e. CAPACITY_OPTIMIZED_PRIORITIZED for Spot and PRIORITIZED for On-Demand by taking input of priority value for each instance type for instance fleet clusters. +* api-change:``glue``: Added AttributesToGet parameter to Glue GetDatabases, allowing caller to limit output to include only the database name. +* api-change:``kinesisanalyticsv2``: Support for Flink 1.19 in Managed Service for Apache Flink +* api-change:``opensearch``: This release removes support for enabling or disabling Natural Language Query Processing feature for Amazon OpenSearch Service domains. +* api-change:``pi``: Noting that the filter db.sql.db_id isn't available for RDS for SQL Server DB instances. +* api-change:``workspaces``: Added support for Red Hat Enterprise Linux 8 on Amazon WorkSpaces Personal. + + +1.34.135 +======== + +* api-change:``application-autoscaling``: Amazon WorkSpaces customers can now use Application Auto Scaling to automatically scale the number of virtual desktops in a WorkSpaces pool. +* api-change:``chime-sdk-media-pipelines``: Added Amazon Transcribe multi language identification to Chime SDK call analytics. Enabling customers sending single stream audio to generate call recordings using Chime SDK call analytics +* api-change:``cloudfront``: Doc only update for CloudFront that fixes customer-reported issue +* api-change:``datazone``: This release supports the data lineage feature of business data catalog in Amazon DataZone. +* api-change:``elasticache``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``mq``: This release makes the EngineVersion field optional for both broker and configuration and uses the latest available version by default. The AutoMinorVersionUpgrade field is also now optional for broker creation and defaults to 'true'. +* api-change:``qconnect``: Adds CreateContentAssociation, ListContentAssociations, GetContentAssociation, and DeleteContentAssociation APIs. +* api-change:``quicksight``: Adding support for Repeating Sections, Nested Filters +* api-change:``rds``: Updates Amazon RDS documentation for TAZ export to S3. +* api-change:``sagemaker``: Add capability for Admins to customize Studio experience for the user by showing or hiding Apps and MLTools. +* api-change:``workspaces``: Added support for WorkSpaces Pools. + + +1.34.134 +======== + +* api-change:``controltower``: Added ListLandingZoneOperations API. +* api-change:``eks``: Added support for disabling unmanaged addons during cluster creation. +* api-change:``ivs-realtime``: IVS Real-Time now offers customers the ability to upload public keys for customer vended participant tokens. +* api-change:``kinesisanalyticsv2``: This release adds support for new ListApplicationOperations and DescribeApplicationOperation APIs. It adds a new configuration to enable system rollbacks, adds field ApplicationVersionCreateTimestamp for clarity and improves support for pagination for APIs. +* api-change:``opensearch``: This release adds support for enabling or disabling Natural Language Query Processing feature for Amazon OpenSearch Service domains, and provides visibility into the current state of the setup or tear-down. + + +1.34.133 +======== + +* api-change:``autoscaling``: Doc only update for Auto Scaling's TargetTrackingMetricDataQuery +* api-change:``ec2``: This release is for the launch of the new u7ib-12tb.224xlarge, R8g, c7gn.metal and mac2-m1ultra.metal instance types +* api-change:``networkmanager``: This is model changes & documentation update for the Asynchronous Error Reporting feature for AWS Cloud WAN. This feature allows customers to view errors that occur while their resources are being provisioned, enabling customers to fix their resources without needing external support. +* api-change:``workspaces-thin-client``: This release adds the deviceCreationTags field to CreateEnvironment API input, UpdateEnvironment API input and GetEnvironment API output. + + +1.34.132 +======== + +* api-change:``bedrock-runtime``: Increases Converse API's document name length +* api-change:``customer-profiles``: This release includes changes to ProfileObjectType APIs, adds functionality top set and get capacity for profile object types. +* api-change:``ec2``: Fix EC2 multi-protocol info in models. +* api-change:``qbusiness``: Allow enable/disable Q Apps when creating/updating a Q application; Return the Q Apps enablement information when getting a Q application. +* api-change:``ssm``: Add sensitive trait to SSM IPAddress property for CloudTrail redaction +* api-change:``workspaces-web``: Added ability to enable DeepLinking functionality on a Portal via UserSettings as well as added support for IdentityProvider resource tagging. + + +1.34.131 +======== + +* api-change:``bedrock-runtime``: This release adds document support to Converse and ConverseStream APIs +* api-change:``codeartifact``: Add support for the Cargo package format. +* api-change:``compute-optimizer``: This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for Amazon RDS MySQL and RDS PostgreSQL. +* api-change:``cost-optimization-hub``: This release enables AWS Cost Optimization Hub to show cost optimization recommendations for Amazon RDS MySQL and RDS PostgreSQL. +* api-change:``dynamodb``: Doc-only update for DynamoDB. Fixed Important note in 6 Global table APIs - CreateGlobalTable, DescribeGlobalTable, DescribeGlobalTableSettings, ListGlobalTables, UpdateGlobalTable, and UpdateGlobalTableSettings. +* api-change:``glue``: Fix Glue paginators for Jobs, JobRuns, Triggers, Blueprints and Workflows. +* api-change:``ivs-realtime``: IVS Real-Time now offers customers the ability to record individual stage participants to S3. +* api-change:``sagemaker``: Adds support for model references in Hub service, and adds support for cross-account access of Hubs +* api-change:``securityhub``: Documentation updates for Security Hub + + +1.34.130 +======== + +* api-change:``artifact``: This release adds an acceptanceType field to the ReportSummary structure (used in the ListReports API response). +* api-change:``athena``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``cur``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``directconnect``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``elastictranscoder``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``opensearch``: This release enables customers to use JSON Web Tokens (JWT) for authentication on their Amazon OpenSearch Service domains. + + +1.34.129 +======== + +* api-change:``bedrock-runtime``: This release adds support for using Guardrails with the Converse and ConverseStream APIs. +* api-change:``cloudtrail``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``config``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``eks``: This release adds support to surface async fargate customer errors from async path to customer through describe-fargate-profile API response. +* api-change:``lightsail``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``polly``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``rekognition``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``sagemaker``: Launched a new feature in SageMaker to provide managed MLflow Tracking Servers for customers to track ML experiments. This release also adds a new capability of attaching additional storage to SageMaker HyperPod cluster instances. +* api-change:``shield``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``snowball``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. + + +1.34.128 +======== + +* api-change:``acm-pca``: Doc-only update that adds name constraints as an allowed extension for ImportCertificateAuthorityCertificate. +* api-change:``batch``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``codebuild``: AWS CodeBuild now supports global and organization GitHub webhooks +* api-change:``cognito-idp``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``ds``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``efs``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``glue``: This release introduces a new feature, Usage profiles. Usage profiles allow the AWS Glue admin to create different profiles for various classes of users within the account, enforcing limits and defaults for jobs and sessions. +* api-change:``mediaconvert``: This release includes support for creating I-frame only video segments for DASH trick play. +* api-change:``secretsmanager``: Doc only update for Secrets Manager +* api-change:``waf``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. + + +1.34.127 +======== + +* api-change:``datazone``: This release introduces a new default service blueprint for custom environment creation. +* api-change:``ec2``: Documentation updates for Amazon EC2. +* api-change:``macie2``: This release adds support for managing the status of automated sensitive data discovery for individual accounts in an organization, and determining whether individual S3 buckets are included in the scope of the analyses. +* api-change:``mediaconvert``: This release adds the ability to search for historical job records within the management console using a search box and/or via the SDK/CLI with partial string matching search on input file name. +* api-change:``route53domains``: Add v2 smoke tests and smithy smokeTests trait for SDK testing. + + +1.34.126 +======== + +* api-change:``cloudhsmv2``: Added support for hsm type hsm2m.medium. Added supported for creating a cluster in FIPS or NON_FIPS mode. +* api-change:``glue``: This release adds support for configuration of evaluation method for composite rules in Glue Data Quality rulesets. +* api-change:``iotwireless``: Add RoamingDeviceSNR and RoamingDeviceRSSI to Customer Metrics. +* api-change:``kms``: This feature allows customers to use their keys stored in KMS to derive a shared secret which can then be used to establish a secured channel for communication, provide proof of possession, or establish trust with other parties. +* api-change:``mediapackagev2``: This release adds support for CMAF ingest (DASH-IF live media ingest protocol interface 1) + + +1.34.125 +======== + +* api-change:``apptest``: AWS Mainframe Modernization Application Testing is an AWS Mainframe Modernization service feature that automates functional equivalence testing for mainframe application modernization and migration to AWS, and regression testing. +* api-change:``backupstorage``: The backupstorage client has been removed following the deprecation of the service. +* api-change:``ec2``: Tagging support for Traffic Mirroring FilterRule resource +* api-change:``osis``: SDK changes for self-managed vpc endpoint to OpenSearch ingestion pipelines. +* api-change:``redshift``: Updates to remove DC1 and DS2 node types. +* api-change:``secretsmanager``: Introducing RotationToken parameter for PutSecretValue API +* api-change:``securitylake``: This release updates request validation regex to account for non-commercial aws partitions. +* api-change:``sesv2``: This release adds support for Amazon EventBridge as an email sending events destination. + + +1.34.124 +======== + +* api-change:``accessanalyzer``: IAM Access Analyzer now provides policy recommendations to help resolve unused permissions for IAM roles and users. Additionally, IAM Access Analyzer now extends its custom policy checks to detect when IAM policies grant public access or access to critical resources ahead of deployments. +* api-change:``guardduty``: Added API support for GuardDuty Malware Protection for S3. +* api-change:``networkmanager``: This is model changes & documentation update for Service Insertion feature for AWS Cloud WAN. This feature allows insertion of AWS/3rd party security services on Cloud WAN. This allows to steer inter/intra segment traffic via security appliances and provide visibility to the route updates. +* api-change:``pca-connector-scep``: Connector for SCEP allows you to use a managed, cloud CA to enroll mobile devices and networking gear. SCEP is a widely-adopted protocol used by mobile device management (MDM) solutions for enrolling mobile devices. With the connector, you can use AWS Private CA with popular MDM solutions. +* api-change:``sagemaker``: Introduced Scope and AuthenticationRequestExtraParams to SageMaker Workforce OIDC configuration; this allows customers to modify these options for their private Workforce IdP integration. Model Registry Cross-account model package groups are discoverable. + + +1.34.123 +======== + +* api-change:``application-signals``: This is the initial SDK release for Amazon CloudWatch Application Signals. Amazon CloudWatch Application Signals provides curated application performance monitoring for developers to monitor and troubleshoot application health using pre-built dashboards and Service Level Objectives. +* api-change:``ecs``: This release introduces a new cluster configuration to support the customer-managed keys for ECS managed storage encryption. +* api-change:``imagebuilder``: This release updates the regex pattern for Image Builder ARNs. + + +1.34.122 +======== + +* api-change:``auditmanager``: New feature: common controls. When creating custom controls, you can now use pre-grouped AWS data sources based on common compliance themes. Also, the awsServices parameter is deprecated because we now manage services in scope for you. If used, the input is ignored and an empty list is returned. +* api-change:``b2bi``: Added exceptions to B2Bi List operations and ConflictException to B2Bi StartTransformerJob operation. Also made capabilities field explicitly required when creating a Partnership. +* api-change:``codepipeline``: CodePipeline now supports overriding S3 Source Object Key during StartPipelineExecution, as part of Source Overrides. +* api-change:``sagemaker``: This release introduces a new optional parameter: InferenceAmiVersion, in ProductionVariant. +* api-change:``verifiedpermissions``: This release adds OpenIdConnect (OIDC) configuration support for IdentitySources, allowing for external IDPs to be used in authorization requests. + + +1.34.121 +======== + +* api-change:``account``: This release adds 3 new APIs (AcceptPrimaryEmailUpdate, GetPrimaryEmail, and StartPrimaryEmailUpdate) used to centrally manage the root user email address of member accounts within an AWS organization. +* api-change:``alexaforbusiness``: The alexaforbusiness client has been removed following the deprecation of the service. +* api-change:``firehose``: Adds integration with Secrets Manager for Redshift, Splunk, HttpEndpoint, and Snowflake destinations +* api-change:``fsx``: This release adds support to increase metadata performance on FSx for Lustre file systems beyond the default level provisioned when a file system is created. This can be done by specifying MetadataConfiguration during the creation of Persistent_2 file systems or by updating it on demand. +* api-change:``glue``: This release adds support for creating and updating Glue Data Catalog Views. +* api-change:``honeycode``: The honeycode client has been removed following the deprecation of the service. +* api-change:``iotwireless``: Adds support for wireless device to be in Conflict FUOTA Device Status due to a FUOTA Task, so it couldn't be attached to a new one. +* api-change:``location``: Added two new APIs, VerifyDevicePosition and ForecastGeofenceEvents. Added support for putting larger geofences up to 100,000 vertices with Geobuf fields. +* api-change:``sns``: Doc-only update for SNS. These changes include customer-reported issues and TXC3 updates. +* api-change:``sqs``: Doc only updates for SQS. These updates include customer-reported issues and TCX3 modifications. +* api-change:``storagegateway``: Adds SoftwareUpdatePreferences to DescribeMaintenanceStartTime and UpdateMaintenanceStartTime, a structure which contains AutomaticUpdatePolicy. +* enhancement:AWSCRT: Update awscrt version to 0.20.11 + + +1.34.120 +======== + +* api-change:``globalaccelerator``: This release contains a new optional ip-addresses input field for the update accelerator and update custom routing accelerator apis. This input enables consumers to replace IPv4 addresses on existing accelerators with addresses provided in the input. +* api-change:``glue``: AWS Glue now supports native SaaS connectivity: Salesforce connector available now +* api-change:``s3``: Added new params copySource and key to copyObject API for supporting S3 Access Grants plugin. These changes will not change any of the existing S3 API functionality. + + +1.34.119 +======== + +* api-change:``ec2``: U7i instances with up to 32 TiB of DDR5 memory and 896 vCPUs are now available. C7i-flex instances are launched and are lower-priced variants of the Amazon EC2 C7i instances that offer a baseline level of CPU performance with the ability to scale up to the full compute performance 95% of the time. +* api-change:``pipes``: This release adds Timestream for LiveAnalytics as a supported target in EventBridge Pipes +* api-change:``sagemaker``: Extend DescribeClusterNode response with private DNS hostname and IP address, and placement information about availability zone and availability zone ID. +* api-change:``taxsettings``: Initial release of AWS Tax Settings API + + +1.34.118 +======== + +* api-change:``amplify``: This doc-only update identifies fields that are specific to Gen 1 and Gen 2 applications. +* api-change:``batch``: This release adds support for the AWS Batch GetJobQueueSnapshot API operation. +* api-change:``eks``: Adds support for EKS add-ons pod identity associations integration +* api-change:``iottwinmaker``: Support RESET_VALUE UpdateType for PropertyUpdates to reset property value to default or null + + +1.34.117 +======== + +* api-change:``codebuild``: AWS CodeBuild now supports Self-hosted GitHub Actions runners for Github Enterprise +* api-change:``codeguru-security``: This release includes minor model updates and documentation updates. +* api-change:``elasticache``: Update to attributes of TestFailover and minor revisions. +* api-change:``launch-wizard``: This release adds support for describing workload deployment specifications, deploying additional workload types, and managing tags for Launch Wizard resources with API operations. + + +1.34.116 +======== + +* api-change:``acm``: add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``bedrock-agent``: With this release, Knowledge bases for Bedrock adds support for Titan Text Embedding v2. +* api-change:``bedrock-runtime``: This release adds Converse and ConverseStream APIs to Bedrock Runtime +* api-change:``cloudtrail``: CloudTrail Lake returns PartitionKeys in the GetEventDataStore API response. Events are grouped into partitions based on these keys for better query performance. For example, the calendarday key groups events by day, while combining the calendarday key with the hour key groups them by day and hour. +* api-change:``connect``: Adding associatedQueueIds as a SearchCriteria and response field to the SearchRoutingProfiles API +* api-change:``emr-serverless``: The release adds support for spark structured streaming. +* api-change:``rds``: Updates Amazon RDS documentation for Aurora Postgres DBname. +* api-change:``sagemaker``: Adds Model Card information as a new component to Model Package. Autopilot launches algorithm selection for TimeSeries modality to generate AutoML candidates per algorithm. + + +1.34.115 +======== + +* api-change:``athena``: Throwing validation errors on CreateNotebook with Name containing `/`,`:`,`\` +* api-change:``codebuild``: AWS CodeBuild now supports manually creating GitHub webhooks +* api-change:``connect``: This release includes changes to DescribeContact API's response by including ConnectedToSystemTimestamp, RoutingCriteria, Customer, Campaign, AnsweringMachineDetectionStatus, CustomerVoiceActivity, QualityMetrics, DisconnectDetails, and SegmentAttributes information from a contact in Amazon Connect. +* api-change:``glue``: Add optional field JobMode to CreateJob and UpdateJob APIs. +* api-change:``securityhub``: Add ROOT type for TargetType model + + +1.34.114 +======== + +* api-change:``dynamodb``: Doc-only update for DynamoDB. Specified the IAM actions needed to authorize a user to create a table with a resource-based policy. +* api-change:``ec2``: Providing support to accept BgpAsnExtended attribute +* api-change:``kafka``: Adds ControllerNodeInfo in ListNodes response to support Raft mode for MSK +* api-change:``swf``: This release adds new APIs for deleting activity type and workflow type resources. + + +1.34.113 +======== + +* api-change:``dynamodb``: Documentation only updates for DynamoDB. +* api-change:``iotfleetwise``: AWS IoT FleetWise now supports listing vehicles with attributes filter, ListVehicles API is updated to support additional attributes filter. +* api-change:``managedblockchain``: This is a minor documentation update to address the impact of the shut down of the Goerli and Polygon networks. + + +1.34.112 +======== + +* api-change:``emr-serverless``: This release adds the capability to run interactive workloads using Apache Livy Endpoint. +* api-change:``opsworks``: Documentation-only update for OpsWorks Stacks. + + +1.34.111 +======== + +* api-change:``chatbot``: This change adds support for tagging Chatbot configurations. +* api-change:``cloudformation``: Added DeletionMode FORCE_DELETE_STACK for deleting a stack that is stuck in DELETE_FAILED state due to resource deletion failure. +* api-change:``kms``: This release includes feature to import customer's asymmetric (RSA, ECC and SM2) and HMAC keys into KMS in China. +* api-change:``opensearch``: This release adds support for enabling or disabling a data source configured as part of Zero-ETL integration with Amazon S3, by setting its status. +* api-change:``wafv2``: You can now use Security Lake to collect web ACL traffic data. + + +1.34.110 +======== + +* api-change:``cloudfront``: Model update; no change to SDK functionality. +* api-change:``glue``: Add Maintenance window to CreateJob and UpdateJob APIs and JobRun response. Add a new Job Run State for EXPIRED. +* api-change:``lightsail``: This release adds support for Amazon Lightsail instances to switch between dual-stack or IPv4 only and IPv6-only public IP address types. +* api-change:``mailmanager``: This release includes a new Amazon SES feature called Mail Manager, which is a set of email gateway capabilities designed to help customers strengthen their organization's email infrastructure, simplify email workflow management, and streamline email compliance control. +* api-change:``pi``: Performance Insights added a new input parameter called AuthorizedActions to support the fine-grained access feature. Performance Insights also restricted the acceptable input characters. +* api-change:``rds``: Updates Amazon RDS documentation for Db2 license through AWS Marketplace. +* api-change:``storagegateway``: Added new SMBSecurityStrategy enum named MandatoryEncryptionNoAes128, new mode enforces encryption and disables AES 128-bit algorithums. + + +1.34.109 +======== + +* api-change:``bedrock-agent``: This release adds support for using Guardrails with Bedrock Agents. +* api-change:``bedrock-agent-runtime``: This release adds support for using Guardrails with Bedrock Agents. +* api-change:``controltower``: Added ListControlOperations API and filtering support for ListEnabledControls API. Updates also includes added metadata for enabled controls and control operations. +* api-change:``osis``: Add support for creating an OpenSearch Ingestion pipeline that is attached to a provided VPC. Add information about the destinations of an OpenSearch Ingestion pipeline to the GetPipeline and ListPipelines APIs. +* api-change:``rds``: This release adds support for EngineLifecycleSupport on DBInstances, DBClusters, and GlobalClusters. +* api-change:``secretsmanager``: add v2 smoke tests and smithy smokeTests trait for SDK testing + + +1.34.108 +======== + +* api-change:``application-autoscaling``: add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``codebuild``: Aws CodeBuild now supports 36 hours build timeout +* api-change:``elbv2``: This release adds dualstack-without-public-ipv4 IP address type for ALB. +* api-change:``lakeformation``: Introduces a new API, GetDataLakePrincipal, that returns the identity of the invoking principal +* api-change:``transfer``: Enable use of CloudFormation traits in Smithy model to improve generated CloudFormation schema from the Smithy API model. + + +1.34.107 +======== + +* api-change:``acm-pca``: This release adds support for waiters to fail on AccessDeniedException when having insufficient permissions +* api-change:``connect``: Adding Contact Flow metrics to the GetMetricDataV2 API +* api-change:``kafka``: AWS MSK support for Broker Removal. +* api-change:``mwaa``: Amazon MWAA now supports Airflow web server auto scaling to automatically handle increased demand from REST APIs, Command Line Interface (CLI), or more Airflow User Interface (UI) users. Customers can specify maximum and minimum web server instances during environment creation and update workflow. +* api-change:``quicksight``: This release adds DescribeKeyRegistration and UpdateKeyRegistration APIs to manage QuickSight Customer Managed Keys (CMK). +* api-change:``sagemaker``: Introduced WorkerAccessConfiguration to SageMaker Workteam. This allows customers to configure resource access for workers in a workteam. +* api-change:``secretsmanager``: Documentation updates for AWS Secrets Manager +* bugfix:retries: Fix backoff calculation for truncated binary exponential backoff (`#3178 `__) + + +1.34.106 +======== + +* api-change:``bedrock-agent-runtime``: Updating Bedrock Knowledge Base Metadata & Filters feature with two new filters listContains and stringContains +* api-change:``codebuild``: CodeBuild Reserved Capacity VPC Support +* api-change:``datasync``: Task executions now display a CANCELLING status when an execution is in the process of being cancelled. +* api-change:``grafana``: This release adds new ServiceAccount and ServiceAccountToken APIs. +* api-change:``medical-imaging``: Added support for importing medical imaging data from Amazon S3 buckets across accounts and regions. +* api-change:``securityhub``: Documentation-only update for AWS Security Hub + + 1.34.105 ======== diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index c4de13054a..718b29489a 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -72,10 +72,11 @@ Reporting An Issue/Feature Codestyle --------- -This project uses flake8 to enforce codstyle requirements. We've codified this -process using a tool called `pre-commit `__. pre-commit -allows us to specify a config file with all tools required for code linting, -and surfaces either a git commit hook, or single command, for enforcing these. +This project uses `ruff `__ to enforce +codstyle requirements. We've codified this process using a tool called +`pre-commit `__. pre-commit allows us to specify a +config file with all tools required for code linting, and surfaces either a +git commit hook, or single command, for enforcing these. To validate your PR prior to publishing, you can use the following `installation guide `__ to setup pre-commit. @@ -88,11 +89,7 @@ to automatically perform the codestyle validation: $ pre-commit run This will automatically perform simple updates (such as white space clean up) -and provide a list of any failing flake8 checks. After these are addressed, +and provide a list of any failing checks. After these are addressed, you can commit the changes prior to publishing the PR. These checks are also included in our CI setup under the "Lint" workflow which will provide output on Github for anything missed locally. - -See the `flake8` section of the -`setup.cfg `__ for the -currently enforced rules. diff --git a/botocore/__init__.py b/botocore/__init__.py index 75af2bec1c..8283fd1d74 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.34.105' +__version__ = '1.35.0' class NullHandler(logging.Handler): diff --git a/botocore/args.py b/botocore/args.py index dbbcbe8a99..741ca77886 100644 --- a/botocore/args.py +++ b/botocore/args.py @@ -16,6 +16,7 @@ considered internal, and *not* a public API. """ + import copy import logging import socket @@ -267,6 +268,9 @@ def compute_client_args( client_config.disable_request_compression ), client_context_params=client_config.client_context_params, + sigv4a_signing_region_set=( + client_config.sigv4a_signing_region_set + ), ) self._compute_retry_config(config_kwargs) self._compute_connect_timeout(config_kwargs) @@ -460,7 +464,7 @@ def _get_sts_regional_endpoints_config(self): def _set_global_sts_endpoint(self, endpoint_config, is_secure): scheme = 'https' if is_secure else 'http' - endpoint_config['endpoint_url'] = '%s://sts.amazonaws.com' % scheme + endpoint_config['endpoint_url'] = f'{scheme}://sts.amazonaws.com' endpoint_config['signing_region'] = 'us-east-1' def _resolve_endpoint( diff --git a/botocore/auth.py b/botocore/auth.py index 8389c1579c..66e605a665 100644 --- a/botocore/auth.py +++ b/botocore/auth.py @@ -35,7 +35,12 @@ urlsplit, urlunsplit, ) -from botocore.exceptions import NoAuthTokenError, NoCredentialsError +from botocore.exceptions import ( + NoAuthTokenError, + NoCredentialsError, + UnknownSignatureVersionError, + UnsupportedSignatureVersionError, +) from botocore.utils import ( is_valid_ipv6_endpoint_url, normalize_url_path, @@ -432,12 +437,12 @@ def add_auth(self, request): self._inject_signature_to_request(request, signature) def _inject_signature_to_request(self, request, signature): - auth_str = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)] + auth_str = [f'AWS4-HMAC-SHA256 Credential={self.scope(request)}'] headers_to_sign = self.headers_to_sign(request) auth_str.append( f"SignedHeaders={self.signed_headers(headers_to_sign)}" ) - auth_str.append('Signature=%s' % signature) + auth_str.append(f'Signature={signature}') request.headers['Authorization'] = ', '.join(auth_str) return request @@ -685,7 +690,7 @@ def _inject_signature_to_request(self, request, signature): # Rather than calculating an "Authorization" header, for the query # param quth, we just append an 'X-Amz-Signature' param to the end # of the query string. - request.url += '&X-Amz-Signature=%s' % signature + request.url += f'&X-Amz-Signature={signature}' def _normalize_url_path(self, path): # For S3, we do not normalize the path. @@ -777,7 +782,7 @@ def _inject_signature_to_request(self, request, signature): # Rather than calculating an "Authorization" header, for the query # param quth, we just append an 'X-Amz-Signature' param to the end # of the query string. - request.url += '&X-Amz-Signature=%s' % signature + request.url += f'&X-Amz-Signature={signature}' class S3SigV4QueryAuth(SigV4QueryAuth): @@ -990,7 +995,7 @@ def get_signature( string_to_sign = self.canonical_string( method, split, headers, auth_path=auth_path ) - logger.debug('StringToSign:\n%s', string_to_sign) + logger.debug(f'StringToSign:\n{string_to_sign}') return self.sign_string(string_to_sign) def add_auth(self, request): @@ -998,7 +1003,7 @@ def add_auth(self, request): raise NoCredentialsError logger.debug("Calculating signature using hmacv1 auth.") split = urlsplit(request.url) - logger.debug('HTTP request method: %s', request.method) + logger.debug(f'HTTP request method: {request.method}') signature = self.get_signature( request.method, split, request.headers, auth_path=request.auth_path ) @@ -1132,6 +1137,19 @@ def add_auth(self, request): request.headers['Authorization'] = auth_header +def resolve_auth_type(auth_trait): + for auth_type in auth_trait: + if auth_type == 'smithy.api#noAuth': + return AUTH_TYPE_TO_SIGNATURE_VERSION[auth_type] + elif auth_type in AUTH_TYPE_TO_SIGNATURE_VERSION: + signature_version = AUTH_TYPE_TO_SIGNATURE_VERSION[auth_type] + if signature_version in AUTH_TYPE_MAPS: + return signature_version + else: + raise UnknownSignatureVersionError(signature_version=auth_type) + raise UnsupportedSignatureVersionError(signature_version=auth_trait) + + AUTH_TYPE_MAPS = { 'v2': SigV2Auth, 'v3': SigV3Auth, @@ -1160,3 +1178,10 @@ def add_auth(self, request): 's3v4-query': S3SigV4QueryAuth, } ) + +AUTH_TYPE_TO_SIGNATURE_VERSION = { + 'aws.auth#sigv4': 'v4', + 'aws.auth#sigv4a': 'v4a', + 'smithy.api#httpBearerAuth': 'bearer', + 'smithy.api#noAuth': 'none', +} diff --git a/botocore/awsrequest.py b/botocore/awsrequest.py index 9123e65c9d..49b4eee0d9 100644 --- a/botocore/awsrequest.py +++ b/botocore/awsrequest.py @@ -280,9 +280,9 @@ def prepare_request_dict( percent_encode_sequence = botocore.utils.percent_encode_sequence encoded_query_string = percent_encode_sequence(r['query_string']) if '?' not in url: - url += '?%s' % encoded_query_string + url += f'?{encoded_query_string}' else: - url += '&%s' % encoded_query_string + url += f'&{encoded_query_string}' r['url'] = url r['context'] = context if context is None: diff --git a/botocore/client.py b/botocore/client.py index 1e36232834..ab1be75365 100644 --- a/botocore/client.py +++ b/botocore/client.py @@ -14,7 +14,7 @@ from botocore import waiter, xform_name from botocore.args import ClientArgsCreator -from botocore.auth import AUTH_TYPE_MAPS +from botocore.auth import AUTH_TYPE_MAPS, resolve_auth_type from botocore.awsrequest import prepare_request_dict from botocore.compress import maybe_compress_request from botocore.config import Config @@ -148,15 +148,19 @@ def create_client( region_name, client_config = self._normalize_fips_region( region_name, client_config ) + if auth := service_model.metadata.get('auth'): + service_signature_version = resolve_auth_type(auth) + else: + service_signature_version = service_model.metadata.get( + 'signatureVersion' + ) endpoint_bridge = ClientEndpointBridge( self._endpoint_resolver, scoped_config, client_config, service_signing_name=service_model.metadata.get('signingName'), config_store=self._config_store, - service_signature_version=service_model.metadata.get( - 'signatureVersion' - ), + service_signature_version=service_signature_version, ) client_args = self._get_client_args( service_model, @@ -199,7 +203,7 @@ def _create_client_class(self, service_name, service_model): bases = [BaseClient] service_id = service_model.service_id.hyphenize() self._event_emitter.emit( - 'creating-client-class.%s' % service_id, + f'creating-client-class.{service_id}', class_attributes=class_attributes, base_classes=bases, ) @@ -223,10 +227,10 @@ def _normalize_fips_region(self, region_name, client_config): else: client_config = config_use_fips_endpoint logger.warning( - 'transforming region from %s to %s and setting ' + f'transforming region from {region_name} to ' + f'{normalized_region_name} and setting ' 'use_fips_endpoint to true. client should not ' 'be configured with a fips psuedo region.' - % (region_name, normalized_region_name) ) region_name = normalized_region_name return region_name, client_config @@ -289,7 +293,7 @@ def _register_legacy_retries(self, client): handler = self._retry_handler_factory.create_retry_handler( retry_config, endpoint_prefix ) - unique_id = 'retry-config-%s' % service_event_name + unique_id = f'retry-config-{service_event_name}' client.meta.events.register( f"needs-retry.{service_event_name}", handler, unique_id=unique_id ) @@ -487,7 +491,7 @@ def _default_s3_presign_to_sigv2(self, signature_version, **kwargs): return if signature_version.startswith('v4-s3express'): - return f'{signature_version}' + return signature_version for suffix in ['-query', '-presign-post']: if signature_version.endswith(suffix): @@ -573,7 +577,7 @@ def _api_call(self, *args, **kwargs): method_name=operation_name, event_emitter=self._event_emitter, method_description=operation_model.documentation, - example_prefix='response = client.%s' % py_operation_name, + example_prefix=f'response = client.{py_operation_name}', include_signature=False, ) _api_call.__doc__ = docstring @@ -953,8 +957,10 @@ def _make_api_call(self, operation_name, api_params): 'client_region': self.meta.region_name, 'client_config': self.meta.config, 'has_streaming_input': operation_model.has_streaming_input, - 'auth_type': operation_model.auth_type, + 'auth_type': operation_model.resolved_auth_type, + 'unsigned_payload': operation_model.unsigned_payload, } + api_params = self._emit_api_params( api_params=api_params, operation_model=operation_model, @@ -982,9 +988,7 @@ def _make_api_call(self, operation_name, api_params): service_id = self._service_model.service_id.hyphenize() handler, event_response = self.meta.events.emit_until_response( - 'before-call.{service_id}.{operation_name}'.format( - service_id=service_id, operation_name=operation_name - ), + f'before-call.{service_id}.{operation_name}', model=operation_model, params=request_dict, request_signer=self._request_signer, @@ -1003,9 +1007,7 @@ def _make_api_call(self, operation_name, api_params): ) self.meta.events.emit( - 'after-call.{service_id}.{operation_name}'.format( - service_id=service_id, operation_name=operation_name - ), + f'after-call.{service_id}.{operation_name}', http_response=http, parsed=parsed_response, model=operation_model, @@ -1027,10 +1029,7 @@ def _make_request(self, operation_model, request_dict, request_context): return self._endpoint.make_request(operation_model, request_dict) except Exception as e: self.meta.events.emit( - 'after-call-error.{service_id}.{operation_name}'.format( - service_id=self._service_model.service_id.hyphenize(), - operation_name=operation_model.name, - ), + f'after-call-error.{self._service_model.service_id.hyphenize()}.{operation_model.name}', exception=e, context=request_context, ) @@ -1259,13 +1258,13 @@ def get_waiter(self, waiter_name): """ config = self._get_waiter_config() if not config: - raise ValueError("Waiter does not exist: %s" % waiter_name) + raise ValueError(f"Waiter does not exist: {waiter_name}") model = waiter.WaiterModel(config) mapping = {} for name in model.waiter_names: mapping[xform_name(name)] = name if waiter_name not in mapping: - raise ValueError("Waiter does not exist: %s" % waiter_name) + raise ValueError(f"Waiter does not exist: {waiter_name}") return waiter.create_waiter_with_client( mapping[waiter_name], model, self diff --git a/botocore/config.py b/botocore/config.py index 87b52b6f1a..587dc95ad8 100644 --- a/botocore/config.py +++ b/botocore/config.py @@ -221,6 +221,12 @@ class Config: Defaults to None. + :type sigv4a_signing_region_set: string + :param sigv4a_signing_region_set: A set of AWS regions to apply the signature for + when using SigV4a for signing. Set to ``*`` to represent all regions. + + Defaults to None. + :type client_context_params: dict :param client_context_params: A dictionary of parameters specific to individual services. If available, valid parameters can be found in @@ -257,6 +263,7 @@ class Config: ('request_min_compression_size_bytes', None), ('disable_request_compression', None), ('client_context_params', None), + ('sigv4a_signing_region_set', None), ] ) diff --git a/botocore/configprovider.py b/botocore/configprovider.py index 3b68fca57f..b0dd09f09f 100644 --- a/botocore/configprovider.py +++ b/botocore/configprovider.py @@ -13,6 +13,7 @@ """This module contains the interface for controlling how configuration is loaded. """ + import copy import logging import os @@ -167,6 +168,12 @@ False, utils.ensure_boolean, ), + 'sigv4a_signing_region_set': ( + 'sigv4a_signing_region_set', + 'AWS_SIGV4A_SIGNING_REGION_SET', + None, + None, + ), } # A mapping for the s3 specific configuration vars. These are the configuration # vars that typically go in the s3 section of the config file. This mapping @@ -697,7 +704,7 @@ def _convert_type(self, value): return value def __repr__(self): - return '[%s]' % ', '.join([str(p) for p in self._providers]) + return '[{}]'.format(', '.join([str(p) for p in self._providers])) class InstanceVarProvider(BaseProvider): @@ -728,10 +735,7 @@ def provide(self): return value def __repr__(self): - return 'InstanceVarProvider(instance_var={}, session={})'.format( - self._instance_var, - self._session, - ) + return f'InstanceVarProvider(instance_var={self._instance_var}, session={self._session})' class ScopedConfigProvider(BaseProvider): @@ -767,10 +771,7 @@ def provide(self): return scoped_config.get(self._config_var_name) def __repr__(self): - return 'ScopedConfigProvider(config_var_name={}, session={})'.format( - self._config_var_name, - self._session, - ) + return f'ScopedConfigProvider(config_var_name={self._config_var_name}, session={self._session})' class EnvironmentProvider(BaseProvider): @@ -878,7 +879,7 @@ def provide(self): return self._value def __repr__(self): - return 'ConstantProvider(value=%s)' % self._value + return f'ConstantProvider(value={self._value})' class ConfiguredEndpointProvider(BaseProvider): diff --git a/botocore/credentials.py b/botocore/credentials.py index e48f0dec7c..dd7e718255 100644 --- a/botocore/credentials.py +++ b/botocore/credentials.py @@ -753,7 +753,7 @@ def __init__( super().__init__(cache, expiry_window_seconds) def _generate_assume_role_name(self): - self._role_session_name = 'botocore-session-%s' % (int(time.time())) + self._role_session_name = f'botocore-session-{int(time.time())}' self._assume_kwargs['RoleSessionName'] = self._role_session_name self._using_default_session_name = True @@ -848,7 +848,7 @@ def _assume_role_kwargs(self): mfa_serial = assume_role_kwargs.get('SerialNumber') if mfa_serial is not None: - prompt = 'Enter MFA code for %s: ' % mfa_serial + prompt = f'Enter MFA code for {mfa_serial}: ' token_code = self._mfa_prompter(prompt) assume_role_kwargs['TokenCode'] = token_code @@ -1570,8 +1570,8 @@ def _get_role_config(self, profile_name): if credential_source is not None and source_profile is not None: raise InvalidConfigError( error_msg=( - 'The profile "%s" contains both source_profile and ' - 'credential_source.' % profile_name + f'The profile "{profile_name}" contains both ' + 'source_profile and credential_source.' ) ) elif credential_source is None and source_profile is None: @@ -1720,7 +1720,7 @@ def _resolve_credentials_from_source( provider=credential_source, error_msg=( 'No credentials found in credential_source referenced ' - 'in profile %s' % profile_name + f'in profile {profile_name}' ), ) return credentials @@ -2242,8 +2242,8 @@ def _load_sso_config(self): missing = ', '.join(missing_config_vars) raise InvalidConfigError( error_msg=( - 'The profile "%s" is configured to use SSO but is missing ' - 'required configuration: %s' % (profile_name, missing) + f'The profile "{profile_name}" is configured to use SSO ' + f'but is missing required configuration: {missing}' ) ) return config diff --git a/botocore/data/accessanalyzer/2019-11-01/paginators-1.json b/botocore/data/accessanalyzer/2019-11-01/paginators-1.json index a975fa2508..de88b6d49e 100644 --- a/botocore/data/accessanalyzer/2019-11-01/paginators-1.json +++ b/botocore/data/accessanalyzer/2019-11-01/paginators-1.json @@ -59,6 +59,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "findings" + }, + "GetFindingRecommendation": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "recommendedSteps" } } } diff --git a/botocore/data/accessanalyzer/2019-11-01/paginators-1.sdk-extras.json b/botocore/data/accessanalyzer/2019-11-01/paginators-1.sdk-extras.json index 0a06b3155a..2fe19c01f7 100644 --- a/botocore/data/accessanalyzer/2019-11-01/paginators-1.sdk-extras.json +++ b/botocore/data/accessanalyzer/2019-11-01/paginators-1.sdk-extras.json @@ -15,6 +15,16 @@ "id", "updatedAt" ] + }, + "GetFindingRecommendation": { + "non_aggregate_keys": [ + "status", + "error", + "completedAt", + "recommendationType", + "resourceArn", + "startedAt" + ] } } } diff --git a/botocore/data/accessanalyzer/2019-11-01/service-2.json b/botocore/data/accessanalyzer/2019-11-01/service-2.json index 2a66180e5e..a111c26aa6 100644 --- a/botocore/data/accessanalyzer/2019-11-01/service-2.json +++ b/botocore/data/accessanalyzer/2019-11-01/service-2.json @@ -3,8 +3,8 @@ "metadata":{ "apiVersion":"2019-11-01", "endpointPrefix":"access-analyzer", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Access Analyzer", "serviceId":"AccessAnalyzer", "signatureVersion":"v4", @@ -86,6 +86,25 @@ ], "documentation":"

Checks whether new access is allowed for an updated policy when compared to the existing policy.

You can find examples for reference policies and learn how to set up and run a custom policy check for new access in the IAM Access Analyzer custom policy checks samples repository on GitHub. The reference policies in this repository are meant to be passed to the existingPolicyDocument request parameter.

" }, + "CheckNoPublicAccess":{ + "name":"CheckNoPublicAccess", + "http":{ + "method":"POST", + "requestUri":"/policy/check-no-public-access", + "responseCode":200 + }, + "input":{"shape":"CheckNoPublicAccessRequest"}, + "output":{"shape":"CheckNoPublicAccessResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnprocessableEntityException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Checks whether a resource policy can grant public access to the specified resource type.

" + }, "CreateAccessPreview":{ "name":"CreateAccessPreview", "http":{ @@ -183,6 +202,22 @@ "documentation":"

Deletes the specified archive rule.

", "idempotent":true }, + "GenerateFindingRecommendation":{ + "name":"GenerateFindingRecommendation", + "http":{ + "method":"POST", + "requestUri":"/recommendation/{id}", + "responseCode":200 + }, + "input":{"shape":"GenerateFindingRecommendationRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates a recommendation for an unused permissions finding.

" + }, "GetAccessPreview":{ "name":"GetAccessPreview", "http":{ @@ -273,6 +308,24 @@ ], "documentation":"

Retrieves information about the specified finding. GetFinding and GetFindingV2 both use access-analyzer:GetFinding in the Action element of an IAM policy statement. You must have permission to perform the access-analyzer:GetFinding action.

" }, + "GetFindingRecommendation":{ + "name":"GetFindingRecommendation", + "http":{ + "method":"GET", + "requestUri":"/recommendation/{id}", + "responseCode":200 + }, + "input":{"shape":"GetFindingRecommendationRequest"}, + "output":{"shape":"GetFindingRecommendationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves information about a finding recommendation for the specified analyzer.

" + }, "GetFindingV2":{ "name":"GetFindingV2", "http":{ @@ -600,14 +653,17 @@ "shapes":{ "Access":{ "type":"structure", - "required":["actions"], "members":{ "actions":{ "shape":"AccessActionsList", "documentation":"

A list of actions for the access permissions. Any strings that can be used as an action in an IAM policy can be used in the list of actions to check.

" + }, + "resources":{ + "shape":"AccessResourcesList", + "documentation":"

A list of resources for the access permissions. Any strings that can be used as a resource in an IAM policy can be used in the list of resources to check.

" } }, - "documentation":"

Contains information about actions that define permissions to check against a policy.

" + "documentation":"

Contains information about actions and resources that define permissions to check against a policy.

" }, "AccessActionsList":{ "type":"list", @@ -626,6 +682,29 @@ "RESOURCE_POLICY" ] }, + "AccessCheckResourceType":{ + "type":"string", + "enum":[ + "AWS::DynamoDB::Table", + "AWS::DynamoDB::Stream", + "AWS::EFS::FileSystem", + "AWS::OpenSearchService::Domain", + "AWS::Kinesis::Stream", + "AWS::Kinesis::StreamConsumer", + "AWS::KMS::Key", + "AWS::Lambda::Function", + "AWS::S3::Bucket", + "AWS::S3::AccessPoint", + "AWS::S3Express::DirectoryBucket", + "AWS::S3::Glacier", + "AWS::S3Outposts::Bucket", + "AWS::S3Outposts::AccessPoint", + "AWS::SecretsManager::Secret", + "AWS::SNS::Topic", + "AWS::SQS::Queue", + "AWS::IAM::AssumeRolePolicyDocument" + ] + }, "AccessDeniedException":{ "type":"structure", "required":["message"], @@ -823,6 +902,12 @@ "type":"list", "member":{"shape":"AccessPreviewSummary"} }, + "AccessResourcesList":{ + "type":"list", + "member":{"shape":"Resource"}, + "max":100, + "min":0 + }, "AclCanonicalId":{"type":"string"}, "AclGrantee":{ "type":"structure", @@ -1112,7 +1197,7 @@ }, "access":{ "shape":"CheckAccessNotGrantedRequestAccessList", - "documentation":"

An access object containing the permissions that shouldn't be granted by the specified policy.

" + "documentation":"

An access object containing the permissions that shouldn't be granted by the specified policy. If only actions are specified, IAM Access Analyzer checks for access of the actions on all resources in the policy. If only resources are specified, then IAM Access Analyzer checks which actions have access to the specified resources. If both actions and resources are specified, then IAM Access Analyzer checks which of the specified actions have access to the specified resources.

" }, "policyType":{ "shape":"AccessCheckPolicyType", @@ -1196,6 +1281,47 @@ "FAIL" ] }, + "CheckNoPublicAccessRequest":{ + "type":"structure", + "required":[ + "policyDocument", + "resourceType" + ], + "members":{ + "policyDocument":{ + "shape":"AccessCheckPolicyDocument", + "documentation":"

The JSON policy document to evaluate for public access.

" + }, + "resourceType":{ + "shape":"AccessCheckResourceType", + "documentation":"

The type of resource to evaluate for public access. For example, to check for public access to Amazon S3 buckets, you can choose AWS::S3::Bucket for the resource type.

For resource types not supported as valid values, IAM Access Analyzer will return an error.

" + } + } + }, + "CheckNoPublicAccessResponse":{ + "type":"structure", + "members":{ + "result":{ + "shape":"CheckNoPublicAccessResult", + "documentation":"

The result of the check for public access to the specified resource type. If the result is PASS, the policy doesn't allow public access to the specified resource type. If the result is FAIL, the policy might allow public access to the specified resource type.

" + }, + "message":{ + "shape":"String", + "documentation":"

The message indicating whether the specified policy allows public access to resources.

" + }, + "reasons":{ + "shape":"ReasonSummaryList", + "documentation":"

A list of reasons why the specified resource policy grants public access for the resource type.

" + } + } + }, + "CheckNoPublicAccessResult":{ + "type":"string", + "enum":[ + "PASS", + "FAIL" + ] + }, "CloudTrailArn":{ "type":"string", "pattern":"arn:[^:]*:cloudtrail:[^:]*:[^:]*:trail/.{1,576}" @@ -1950,6 +2076,32 @@ "type":"list", "member":{"shape":"FindingSummaryV2"} }, + "GenerateFindingRecommendationRequest":{ + "type":"structure", + "required":[ + "analyzerArn", + "id" + ], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

The ARN of the analyzer used to generate the finding recommendation.

", + "location":"querystring", + "locationName":"analyzerArn" + }, + "id":{ + "shape":"GenerateFindingRecommendationRequestIdString", + "documentation":"

The unique ID for the finding recommendation.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GenerateFindingRecommendationRequestIdString":{ + "type":"string", + "max":2048, + "min":1 + }, "GeneratedPolicy":{ "type":"structure", "required":["policy"], @@ -2116,6 +2268,93 @@ }, "documentation":"

The response to the request.

" }, + "GetFindingRecommendationRequest":{ + "type":"structure", + "required":[ + "analyzerArn", + "id" + ], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

The ARN of the analyzer used to generate the finding recommendation.

", + "location":"querystring", + "locationName":"analyzerArn" + }, + "id":{ + "shape":"GetFindingRecommendationRequestIdString", + "documentation":"

The unique ID for the finding recommendation.

", + "location":"uri", + "locationName":"id" + }, + "maxResults":{ + "shape":"GetFindingRecommendationRequestMaxResultsInteger", + "documentation":"

The maximum number of results to return in the response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

A token used for pagination of results returned.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetFindingRecommendationRequestIdString":{ + "type":"string", + "max":2048, + "min":1 + }, + "GetFindingRecommendationRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "GetFindingRecommendationResponse":{ + "type":"structure", + "required":[ + "startedAt", + "resourceArn", + "recommendationType", + "status" + ], + "members":{ + "startedAt":{ + "shape":"Timestamp", + "documentation":"

The time at which the retrieval of the finding recommendation was started.

" + }, + "completedAt":{ + "shape":"Timestamp", + "documentation":"

The time at which the retrieval of the finding recommendation was completed.

" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

A token used for pagination of results returned.

" + }, + "error":{ + "shape":"RecommendationError", + "documentation":"

Detailed information about the reason that the retrieval of a recommendation for the finding failed.

" + }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the resource of the finding.

" + }, + "recommendedSteps":{ + "shape":"RecommendedStepList", + "documentation":"

A group of recommended steps for the finding.

" + }, + "recommendationType":{ + "shape":"RecommendationType", + "documentation":"

The type of recommendation for the finding.

" + }, + "status":{ + "shape":"Status", + "documentation":"

The status of the retrieval of the finding recommendation.

" + } + } + }, "GetFindingRequest":{ "type":"structure", "required":[ @@ -3157,10 +3396,59 @@ "type":"list", "member":{"shape":"ReasonSummary"} }, + "RecommendationError":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{ + "shape":"String", + "documentation":"

The error code for a failed retrieval of a recommendation for a finding.

" + }, + "message":{ + "shape":"String", + "documentation":"

The error message for a failed retrieval of a recommendation for a finding.

" + } + }, + "documentation":"

Contains information about the reason that the retrieval of a recommendation for a finding failed.

" + }, + "RecommendationType":{ + "type":"string", + "enum":["UnusedPermissionRecommendation"] + }, + "RecommendedRemediationAction":{ + "type":"string", + "enum":[ + "CREATE_POLICY", + "DETACH_POLICY" + ] + }, + "RecommendedStep":{ + "type":"structure", + "members":{ + "unusedPermissionsRecommendedStep":{ + "shape":"UnusedPermissionsRecommendedStep", + "documentation":"

A recommended step for an unused permissions finding.

" + } + }, + "documentation":"

Contains information about a recommended step for an unused access analyzer finding.

", + "union":true + }, + "RecommendedStepList":{ + "type":"list", + "member":{"shape":"RecommendedStep"} + }, "RegionList":{ "type":"list", "member":{"shape":"String"} }, + "Resource":{ + "type":"string", + "max":2048, + "min":0 + }, "ResourceArn":{ "type":"string", "pattern":"arn:[^:]*:[^:]*:[^:]*:[^:]*:.*" @@ -3467,6 +3755,14 @@ }, "documentation":"

Starts a scan of the policies applied to the specified resource.

" }, + "Status":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "FAILED", + "IN_PROGRESS" + ] + }, "StatusReason":{ "type":"structure", "required":["code"], @@ -3732,11 +4028,34 @@ }, "lastAccessed":{ "shape":"Timestamp", - "documentation":"

The time at which the permission last accessed.

" + "documentation":"

The time at which the permission was last accessed.

" } }, "documentation":"

Contains information about an unused access finding for a permission. IAM Access Analyzer charges for unused access analysis based on the number of IAM roles and users analyzed per month. For more details on pricing, see IAM Access Analyzer pricing.

" }, + "UnusedPermissionsRecommendedStep":{ + "type":"structure", + "required":["recommendedAction"], + "members":{ + "policyUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The time at which the existing policy for the unused permissions finding was last updated.

" + }, + "recommendedAction":{ + "shape":"RecommendedRemediationAction", + "documentation":"

A recommendation of whether to create or detach a policy for an unused permissions finding.

" + }, + "recommendedPolicy":{ + "shape":"String", + "documentation":"

If the recommended action for the unused permissions finding is to replace the existing policy, the contents of the recommended policy to replace the policy specified in the existingPolicyId field.

" + }, + "existingPolicyId":{ + "shape":"String", + "documentation":"

If the recommended action for the unused permissions finding is to detach a policy, the ID of an existing policy to be detached.

" + } + }, + "documentation":"

Contains information about the action to take for a policy in an unused permissions finding.

" + }, "UpdateArchiveRuleRequest":{ "type":"structure", "required":[ @@ -3960,7 +4279,8 @@ "unknownOperation", "cannotParse", "fieldValidationFailed", - "other" + "other", + "notSupported" ] }, "ValueList":{ diff --git a/botocore/data/account/2021-02-01/endpoint-rule-set-1.json b/botocore/data/account/2021-02-01/endpoint-rule-set-1.json index 88e9002a1f..8f8a08191d 100644 --- a/botocore/data/account/2021-02-01/endpoint-rule-set-1.json +++ b/botocore/data/account/2021-02-01/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -235,7 +233,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -270,7 +267,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -281,14 +277,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -302,14 +300,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -318,11 +314,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -333,14 +329,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -354,7 +352,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -374,7 +371,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -385,14 +381,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -403,9 +401,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/account/2021-02-01/service-2.json b/botocore/data/account/2021-02-01/service-2.json index 65ba47afbb..5620b7d1c0 100644 --- a/botocore/data/account/2021-02-01/service-2.json +++ b/botocore/data/account/2021-02-01/service-2.json @@ -12,6 +12,25 @@ "uid":"account-2021-02-01" }, "operations":{ + "AcceptPrimaryEmailUpdate":{ + "name":"AcceptPrimaryEmailUpdate", + "http":{ + "method":"POST", + "requestUri":"/acceptPrimaryEmailUpdate", + "responseCode":200 + }, + "input":{"shape":"AcceptPrimaryEmailUpdateRequest"}, + "output":{"shape":"AcceptPrimaryEmailUpdateResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Accepts the request that originated from StartPrimaryEmailUpdate to update the primary email address (also known as the root user email address) for the specified account.

" + }, "DeleteAlternateContact":{ "name":"DeleteAlternateContact", "http":{ @@ -45,7 +64,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServerException"} ], - "documentation":"

Disables (opts-out) a particular Region for an account.

" + "documentation":"

Disables (opts-out) a particular Region for an account.

The act of disabling a Region will remove all IAM access to any resources that reside in that Region.

" }, "EnableRegion":{ "name":"EnableRegion", @@ -100,6 +119,24 @@ ], "documentation":"

Retrieves the primary contact information of an Amazon Web Services account.

For complete details about how to use the primary contact operations, see Update the primary and alternate contact information.

" }, + "GetPrimaryEmail":{ + "name":"GetPrimaryEmail", + "http":{ + "method":"POST", + "requestUri":"/getPrimaryEmail", + "responseCode":200 + }, + "input":{"shape":"GetPrimaryEmailRequest"}, + "output":{"shape":"GetPrimaryEmailResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves the primary email address for the specified account.

" + }, "GetRegionOptStatus":{ "name":"GetRegionOptStatus", "http":{ @@ -167,9 +204,59 @@ ], "documentation":"

Updates the primary contact information of an Amazon Web Services account.

For complete details about how to use the primary contact operations, see Update the primary and alternate contact information.

", "idempotent":true + }, + "StartPrimaryEmailUpdate":{ + "name":"StartPrimaryEmailUpdate", + "http":{ + "method":"POST", + "requestUri":"/startPrimaryEmailUpdate", + "responseCode":200 + }, + "input":{"shape":"StartPrimaryEmailUpdateRequest"}, + "output":{"shape":"StartPrimaryEmailUpdateResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts the process to update the primary email address for the specified account.

" } }, "shapes":{ + "AcceptPrimaryEmailUpdateRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Otp", + "PrimaryEmail" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

This operation can only be called from the management account or the delegated administrator account of an organization for a member account.

The management account can't specify its own AccountId.

" + }, + "Otp":{ + "shape":"Otp", + "documentation":"

The OTP code sent to the PrimaryEmail specified on the StartPrimaryEmailUpdate API call.

" + }, + "PrimaryEmail":{ + "shape":"PrimaryEmailAddress", + "documentation":"

The new primary email address for use with the specified account. This must match the PrimaryEmail from the StartPrimaryEmailUpdate API call.

" + } + } + }, + "AcceptPrimaryEmailUpdateResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"PrimaryEmailUpdateStatus", + "documentation":"

Retrieves the status of the accepted primary email update request.

" + } + } + }, "AccessDeniedException":{ "type":"structure", "required":["message"], @@ -305,7 +392,7 @@ }, "StateOrRegion":{ "shape":"StateOrRegion", - "documentation":"

The state or region of the primary contact address. This field is required in selected countries.

" + "documentation":"

The state or region of the primary contact address. If the mailing address is within the United States (US), the value in this field can be either a two character state code (for example, NJ) or the full state name (for example, New Jersey). This field is required in the following countries: US, CA, GB, DE, JP, IN, and BR.

" }, "WebsiteUrl":{ "shape":"WebsiteUrl", @@ -347,7 +434,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" }, "RegionName":{ "shape":"RegionName", @@ -374,7 +461,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" }, "RegionName":{ "shape":"RegionName", @@ -416,7 +503,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" } } }, @@ -429,13 +516,32 @@ } } }, + "GetPrimaryEmailRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

This operation can only be called from the management account or the delegated administrator account of an organization for a member account.

The management account can't specify its own AccountId.

" + } + } + }, + "GetPrimaryEmailResponse":{ + "type":"structure", + "members":{ + "PrimaryEmail":{ + "shape":"PrimaryEmailAddress", + "documentation":"

Retrieves the primary email address associated with the specified account.

" + } + } + }, "GetRegionOptStatusRequest":{ "type":"structure", "required":["RegionName"], "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" }, "RegionName":{ "shape":"RegionName", @@ -473,7 +579,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" }, "MaxResults":{ "shape":"ListRegionsRequestMaxResultsInteger", @@ -519,6 +625,11 @@ "min":1, "sensitive":true }, + "Otp":{ + "type":"string", + "pattern":"^[a-zA-Z0-9]{6}$", + "sensitive":true + }, "PhoneNumber":{ "type":"string", "max":25, @@ -532,6 +643,19 @@ "min":1, "sensitive":true }, + "PrimaryEmailAddress":{ + "type":"string", + "max":64, + "min":5, + "sensitive":true + }, + "PrimaryEmailUpdateStatus":{ + "type":"string", + "enum":[ + "PENDING", + "ACCEPTED" + ] + }, "PutAlternateContactRequest":{ "type":"structure", "required":[ @@ -574,7 +698,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" }, "ContactInformation":{ "shape":"ContactInformation", @@ -636,6 +760,32 @@ "type":"string", "sensitive":true }, + "StartPrimaryEmailUpdateRequest":{ + "type":"structure", + "required":[ + "AccountId", + "PrimaryEmail" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

This operation can only be called from the management account or the delegated administrator account of an organization for a member account.

The management account can't specify its own AccountId.

" + }, + "PrimaryEmail":{ + "shape":"PrimaryEmailAddress", + "documentation":"

The new primary email address (also known as the root user email address) to use in the specified account.

" + } + } + }, + "StartPrimaryEmailUpdateResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"PrimaryEmailUpdateStatus", + "documentation":"

The status of the primary email update request.

" + } + } + }, "StateOrRegion":{ "type":"string", "max":50, diff --git a/botocore/data/acm-pca/2017-08-22/service-2.json b/botocore/data/acm-pca/2017-08-22/service-2.json index 8a7365ac14..548bea2067 100644 --- a/botocore/data/acm-pca/2017-08-22/service-2.json +++ b/botocore/data/acm-pca/2017-08-22/service-2.json @@ -2,13 +2,16 @@ "version":"2.0", "metadata":{ "apiVersion":"2017-08-22", + "auth":["aws.auth#sigv4"], "endpointPrefix":"acm-pca", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"ACM-PCA", "serviceFullName":"AWS Certificate Manager Private Certificate Authority", "serviceId":"ACM PCA", "signatureVersion":"v4", + "signingName":"acm-pca", "targetPrefix":"ACMPrivateCA", "uid":"acm-pca-2017-08-22" }, @@ -22,12 +25,12 @@ "input":{"shape":"CreateCertificateAuthorityRequest"}, "output":{"shape":"CreateCertificateAuthorityResponse"}, "errors":[ + {"shape":"LimitExceededException"}, {"shape":"InvalidArgsException"}, - {"shape":"InvalidPolicyException"}, {"shape":"InvalidTagException"}, - {"shape":"LimitExceededException"} + {"shape":"InvalidPolicyException"} ], - "documentation":"

Creates a root or subordinate private certificate authority (CA). You must specify the CA configuration, an optional configuration for Online Certificate Status Protocol (OCSP) and/or a certificate revocation list (CRL), the CA type, and an optional idempotency token to avoid accidental creation of multiple CAs. The CA configuration specifies the name of the algorithm and key size to be used to create the CA private key, the type of signing algorithm that the CA uses, and X.500 subject information. The OCSP configuration can optionally specify a custom URL for the OCSP responder. The CRL configuration specifies the CRL expiration period in days (the validity period of the CRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3 bucket that is included in certificates issued by the CA. If successful, this action returns the Amazon Resource Name (ARN) of the CA.

Both Amazon Web Services Private CA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Access policies for CRLs in Amazon S3.

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your CRLs.

", + "documentation":"

Creates a root or subordinate private certificate authority (CA). You must specify the CA configuration, an optional configuration for Online Certificate Status Protocol (OCSP) and/or a certificate revocation list (CRL), the CA type, and an optional idempotency token to avoid accidental creation of multiple CAs. The CA configuration specifies the name of the algorithm and key size to be used to create the CA private key, the type of signing algorithm that the CA uses, and X.500 subject information. The OCSP configuration can optionally specify a custom URL for the OCSP responder. The CRL configuration specifies the CRL expiration period in days (the validity period of the CRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3 bucket that is included in certificates issued by the CA. If successful, this action returns the Amazon Resource Name (ARN) of the CA.

Both Amazon Web Services Private CA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Access policies for CRLs in Amazon S3.

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your CRLs.

", "idempotent":true }, "CreateCertificateAuthorityAuditReport":{ @@ -39,12 +42,12 @@ "input":{"shape":"CreateCertificateAuthorityAuditReportRequest"}, "output":{"shape":"CreateCertificateAuthorityAuditReportResponse"}, "errors":[ - {"shape":"RequestInProgressException"}, - {"shape":"RequestFailedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, {"shape":"InvalidArgsException"}, - {"shape":"InvalidStateException"} + {"shape":"RequestFailedException"}, + {"shape":"InvalidStateException"}, + {"shape":"RequestInProgressException"} ], "documentation":"

Creates an audit report that lists every time that your CA private key is used. The report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate actions use the private key.

Both Amazon Web Services Private CA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Access policies for CRLs in Amazon S3.

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your Audit Reports.

You can generate a maximum of one report every 30 minutes.

", "idempotent":true @@ -57,12 +60,12 @@ }, "input":{"shape":"CreatePermissionRequest"}, "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"PermissionAlreadyExistsException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, - {"shape":"PermissionAlreadyExistsException"}, - {"shape":"LimitExceededException"}, - {"shape":"InvalidStateException"}, - {"shape":"RequestFailedException"} + {"shape":"RequestFailedException"}, + {"shape":"InvalidStateException"} ], "documentation":"

Grants one or more permissions on a private CA to the Certificate Manager (ACM) service principal (acm.amazonaws.com). These permissions allow ACM to issue and renew ACM certificates that reside in the same Amazon Web Services account as the CA.

You can list current permissions with the ListPermissions action and revoke them with the DeletePermission action.

About Permissions

  • If the private CA and the certificates it issues reside in the same account, you can use CreatePermission to grant permissions for ACM to carry out automatic certificate renewals.

  • For automatic certificate renewal to succeed, the ACM service principal needs permissions to create, retrieve, and list certificates.

  • If the private CA and the ACM certificates reside in different accounts, then permissions cannot be used to enable automatic renewals. Instead, the ACM certificate owner must set up a resource-based policy to enable cross-account issuance and renewals. For more information, see Using a Resource Based Policy with Amazon Web Services Private CA.

" }, @@ -74,10 +77,10 @@ }, "input":{"shape":"DeleteCertificateAuthorityRequest"}, "errors":[ - {"shape":"ConcurrentModificationException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, - {"shape":"InvalidStateException"} + {"shape":"InvalidStateException"}, + {"shape":"ConcurrentModificationException"} ], "documentation":"

Deletes a private certificate authority (CA). You must provide the Amazon Resource Name (ARN) of the private CA that you want to delete. You can find the ARN by calling the ListCertificateAuthorities action.

Deleting a CA will invalidate other CAs and certificates below it in your CA hierarchy.

Before you can delete a CA that you have created and activated, you must disable it. To do this, call the UpdateCertificateAuthority action and set the CertificateAuthorityStatus parameter to DISABLED.

Additionally, you can delete a CA if you are waiting for it to be created (that is, the status of the CA is CREATING). You can also delete it if the CA has been created but you haven't yet imported the signed certificate into Amazon Web Services Private CA (that is, the status of the CA is PENDING_CERTIFICATE).

When you successfully call DeleteCertificateAuthority, the CA's status changes to DELETED. However, the CA won't be permanently deleted until the restoration period has passed. By default, if you do not set the PermanentDeletionTimeInDays parameter, the CA remains restorable for 30 days. You can set the parameter from 7 to 30 days. The DescribeCertificateAuthority action returns the time remaining in the restoration window of a private CA in the DELETED state. To restore an eligible CA, call the RestoreCertificateAuthority action.

" }, @@ -91,8 +94,8 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, - {"shape":"InvalidStateException"}, - {"shape":"RequestFailedException"} + {"shape":"RequestFailedException"}, + {"shape":"InvalidStateException"} ], "documentation":"

Revokes permissions on a private CA granted to the Certificate Manager (ACM) service principal (acm.amazonaws.com).

These permissions allow ACM to issue and renew ACM certificates that reside in the same Amazon Web Services account as the CA. If you revoke these permissions, ACM will no longer renew the affected certificates automatically.

Permissions can be granted with the CreatePermission action and listed with the ListPermissions action.

About Permissions

  • If the private CA and the certificates it issues reside in the same account, you can use CreatePermission to grant permissions for ACM to carry out automatic certificate renewals.

  • For automatic certificate renewal to succeed, the ACM service principal needs permissions to create, retrieve, and list certificates.

  • If the private CA and the ACM certificates reside in different accounts, then permissions cannot be used to enable automatic renewals. Instead, the ACM certificate owner must set up a resource-based policy to enable cross-account issuance and renewals. For more information, see Using a Resource Based Policy with Amazon Web Services Private CA.

" }, @@ -104,12 +107,12 @@ }, "input":{"shape":"DeletePolicyRequest"}, "errors":[ - {"shape":"ConcurrentModificationException"}, - {"shape":"InvalidArnException"}, - {"shape":"InvalidStateException"}, {"shape":"LockoutPreventedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArnException"}, {"shape":"RequestFailedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InvalidStateException"}, + {"shape":"ConcurrentModificationException"} ], "documentation":"

Deletes the resource-based policy attached to a private CA. Deletion will remove any access that the policy has granted. If there is no policy attached to the private CA, this action will return successful.

If you delete a policy that was applied through Amazon Web Services Resource Access Manager (RAM), the CA will be removed from all shares in which it was included.

The Certificate Manager Service Linked Role that the policy supports is not affected when you delete the policy.

The current policy can be shown with GetPolicy and updated with PutPolicy.

About Policies

  • A policy grants access on a private CA to an Amazon Web Services customer account, to Amazon Web Services Organizations, or to an Amazon Web Services Organizations unit. Policies are under the control of a CA administrator. For more information, see Using a Resource Based Policy with Amazon Web Services Private CA.

  • A policy permits a user of Certificate Manager (ACM) to issue ACM certificates signed by a CA in another account.

  • For ACM to manage automatic renewal of these certificates, the ACM user must configure a Service Linked Role (SLR). The SLR allows the ACM service to assume the identity of the user, subject to confirmation against the Amazon Web Services Private CA policy. For more information, see Using a Service Linked Role with ACM.

  • Updates made in Amazon Web Services Resource Manager (RAM) are reflected in policies. For more information, see Attach a Policy for Cross-Account Access.

" }, @@ -151,11 +154,11 @@ "input":{"shape":"GetCertificateRequest"}, "output":{"shape":"GetCertificateResponse"}, "errors":[ - {"shape":"RequestInProgressException"}, - {"shape":"RequestFailedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, - {"shape":"InvalidStateException"} + {"shape":"RequestFailedException"}, + {"shape":"InvalidStateException"}, + {"shape":"RequestInProgressException"} ], "documentation":"

Retrieves a certificate from your private CA or one that has been shared with you. The ARN of the certificate is returned when you call the IssueCertificate action. You must specify both the ARN of your private CA and the ARN of the issued certificate when calling the GetCertificate action. You can retrieve the certificate if it is in the ISSUED state. You can call the CreateCertificateAuthorityAuditReport action to create a report that contains information about all of the certificates issued and revoked by your private CA.

" }, @@ -169,8 +172,8 @@ "output":{"shape":"GetCertificateAuthorityCertificateResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidStateException"}, - {"shape":"InvalidArnException"} + {"shape":"InvalidArnException"}, + {"shape":"InvalidStateException"} ], "documentation":"

Retrieves the certificate and certificate chain for your private certificate authority (CA) or one that has been shared with you. Both the certificate and the chain are base64 PEM-encoded. The chain does not include the CA certificate. Each certificate in the chain signs the one before it.

" }, @@ -183,11 +186,11 @@ "input":{"shape":"GetCertificateAuthorityCsrRequest"}, "output":{"shape":"GetCertificateAuthorityCsrResponse"}, "errors":[ - {"shape":"RequestInProgressException"}, - {"shape":"RequestFailedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, - {"shape":"InvalidStateException"} + {"shape":"RequestFailedException"}, + {"shape":"InvalidStateException"}, + {"shape":"RequestInProgressException"} ], "documentation":"

Retrieves the certificate signing request (CSR) for your private certificate authority (CA). The CSR is created when you call the CreateCertificateAuthority action. Sign the CSR with your Amazon Web Services Private CA-hosted or on-premises root or subordinate CA. Then import the signed certificate back into Amazon Web Services Private CA by calling the ImportCertificateAuthorityCertificate action. The CSR is returned as a base64 PEM-encoded string.

" }, @@ -200,10 +203,10 @@ "input":{"shape":"GetPolicyRequest"}, "output":{"shape":"GetPolicyResponse"}, "errors":[ + {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, - {"shape":"InvalidStateException"}, {"shape":"RequestFailedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InvalidStateException"} ], "documentation":"

Retrieves the resource-based policy attached to a private CA. If either the private CA resource or the policy cannot be found, this action returns a ResourceNotFoundException.

The policy can be attached or updated with PutPolicy and removed with DeletePolicy.

About Policies

  • A policy grants access on a private CA to an Amazon Web Services customer account, to Amazon Web Services Organizations, or to an Amazon Web Services Organizations unit. Policies are under the control of a CA administrator. For more information, see Using a Resource Based Policy with Amazon Web Services Private CA.

  • A policy permits a user of Certificate Manager (ACM) to issue ACM certificates signed by a CA in another account.

  • For ACM to manage automatic renewal of these certificates, the ACM user must configure a Service Linked Role (SLR). The SLR allows the ACM service to assume the identity of the user, subject to confirmation against the Amazon Web Services Private CA policy. For more information, see Using a Service Linked Role with ACM.

  • Updates made in Amazon Web Services Resource Manager (RAM) are reflected in policies. For more information, see Attach a Policy for Cross-Account Access.

" }, @@ -215,15 +218,15 @@ }, "input":{"shape":"ImportCertificateAuthorityCertificateRequest"}, "errors":[ - {"shape":"ConcurrentModificationException"}, - {"shape":"RequestInProgressException"}, - {"shape":"RequestFailedException"}, + {"shape":"CertificateMismatchException"}, + {"shape":"MalformedCertificateException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, {"shape":"InvalidRequestException"}, + {"shape":"RequestFailedException"}, {"shape":"InvalidStateException"}, - {"shape":"MalformedCertificateException"}, - {"shape":"CertificateMismatchException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"RequestInProgressException"} ], "documentation":"

Imports a signed private CA certificate into Amazon Web Services Private CA. This action is used when you are using a chain of trust whose root is located outside Amazon Web Services Private CA. Before you can call this action, the following preparations must in place:

  1. In Amazon Web Services Private CA, call the CreateCertificateAuthority action to create the private CA that you plan to back with the imported certificate.

  2. Call the GetCertificateAuthorityCsr action to generate a certificate signing request (CSR).

  3. Sign the CSR using a root or intermediate CA hosted by either an on-premises PKI hierarchy or by a commercial CA.

  4. Create a certificate chain and copy the signed certificate and the certificate chain to your working directory.

Amazon Web Services Private CA supports three scenarios for installing a CA certificate:

  • Installing a certificate for a root CA hosted by Amazon Web Services Private CA.

  • Installing a subordinate CA certificate whose parent authority is hosted by Amazon Web Services Private CA.

  • Installing a subordinate CA certificate whose parent authority is externally hosted.

The following additional requirements apply when you import a CA certificate.

  • Only a self-signed certificate can be imported as a root CA.

  • A self-signed certificate cannot be imported as a subordinate CA.

  • Your certificate chain must not include the private CA certificate that you are importing.

  • Your root CA must be the last certificate in your chain. The subordinate certificate, if any, that your root CA signed must be next to last. The subordinate certificate signed by the preceding subordinate CA must come next, and so on until your chain is built.

  • The chain must be PEM-encoded.

  • The maximum allowed size of a certificate is 32 KB.

  • The maximum allowed size of a certificate chain is 2 MB.

Enforcement of Critical Constraints

Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA certificate or chain.

  • Basic constraints (must be marked critical)

  • Subject alternative names

  • Key usage

  • Extended key usage

  • Authority key identifier

  • Subject key identifier

  • Issuer alternative name

  • Subject directory attributes

  • Subject information access

  • Certificate policies

  • Policy mappings

  • Inhibit anyPolicy

Amazon Web Services Private CA rejects the following extensions when they are marked critical in an imported CA certificate or chain.

  • Name constraints

  • Policy constraints

  • CRL distribution points

  • Authority information access

  • Freshest CRL

  • Any other extension

" }, @@ -238,9 +241,9 @@ "errors":[ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidStateException"}, {"shape":"InvalidArnException"}, {"shape":"InvalidArgsException"}, + {"shape":"InvalidStateException"}, {"shape":"MalformedCSRException"} ], "documentation":"

Uses your private certificate authority (CA), or one that has been shared with you, to issue a client certificate. This action returns the Amazon Resource Name (ARN) of the certificate. You can retrieve the certificate by calling the GetCertificate action and specifying the ARN.

You cannot use the ACM ListCertificateAuthorities action to retrieve the ARNs of the certificates that you issue by using Amazon Web Services Private CA.

", @@ -270,9 +273,9 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, - {"shape":"InvalidNextTokenException"}, + {"shape":"RequestFailedException"}, {"shape":"InvalidStateException"}, - {"shape":"RequestFailedException"} + {"shape":"InvalidNextTokenException"} ], "documentation":"

List all permissions on a private CA, if any, granted to the Certificate Manager (ACM) service principal (acm.amazonaws.com).

These permissions allow ACM to issue and renew ACM certificates that reside in the same Amazon Web Services account as the CA.

Permissions can be granted with the CreatePermission action and revoked with the DeletePermission action.

About Permissions

  • If the private CA and the certificates it issues reside in the same account, you can use CreatePermission to grant permissions for ACM to carry out automatic certificate renewals.

  • For automatic certificate renewal to succeed, the ACM service principal needs permissions to create, retrieve, and list certificates.

  • If the private CA and the ACM certificates reside in different accounts, then permissions cannot be used to enable automatic renewals. Instead, the ACM certificate owner must set up a resource-based policy to enable cross-account issuance and renewals. For more information, see Using a Resource Based Policy with Amazon Web Services Private CA.

" }, @@ -299,13 +302,13 @@ }, "input":{"shape":"PutPolicyRequest"}, "errors":[ - {"shape":"ConcurrentModificationException"}, - {"shape":"InvalidArnException"}, - {"shape":"InvalidStateException"}, - {"shape":"InvalidPolicyException"}, {"shape":"LockoutPreventedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArnException"}, {"shape":"RequestFailedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InvalidStateException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"InvalidPolicyException"} ], "documentation":"

Attaches a resource-based policy to a private CA.

A policy can also be applied by sharing a private CA through Amazon Web Services Resource Access Manager (RAM). For more information, see Attach a Policy for Cross-Account Access.

The policy can be displayed with GetPolicy and removed with DeletePolicy.

About Policies

  • A policy grants access on a private CA to an Amazon Web Services customer account, to Amazon Web Services Organizations, or to an Amazon Web Services Organizations unit. Policies are under the control of a CA administrator. For more information, see Using a Resource Based Policy with Amazon Web Services Private CA.

  • A policy permits a user of Certificate Manager (ACM) to issue ACM certificates signed by a CA in another account.

  • For ACM to manage automatic renewal of these certificates, the ACM user must configure a Service Linked Role (SLR). The SLR allows the ACM service to assume the identity of the user, subject to confirmation against the Amazon Web Services Private CA policy. For more information, see Using a Service Linked Role with ACM.

  • Updates made in Amazon Web Services Resource Manager (RAM) are reflected in policies. For more information, see Attach a Policy for Cross-Account Access.

" }, @@ -318,8 +321,8 @@ "input":{"shape":"RestoreCertificateAuthorityRequest"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidStateException"}, - {"shape":"InvalidArnException"} + {"shape":"InvalidArnException"}, + {"shape":"InvalidStateException"} ], "documentation":"

Restores a certificate authority (CA) that is in the DELETED state. You can restore a CA during the period that you defined in the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthority action. Currently, you can specify 7 to 30 days. If you did not specify a PermanentDeletionTimeInDays value, by default you can restore the CA at any time in a 30 day period. You can check the time remaining in the restoration period of a private CA in the DELETED state by calling the DescribeCertificateAuthority or ListCertificateAuthorities actions. The status of a restored CA is set to its pre-deletion status when the RestoreCertificateAuthority action returns. To change its status to ACTIVE, call the UpdateCertificateAuthority action. If the private CA was in the PENDING_CERTIFICATE state at deletion, you must use the ImportCertificateAuthorityCertificate action to import a certificate authority into the private CA before it can be activated. You cannot restore a CA after the restoration period has ended.

" }, @@ -331,15 +334,15 @@ }, "input":{"shape":"RevokeCertificateRequest"}, "errors":[ - {"shape":"ConcurrentModificationException"}, + {"shape":"RequestAlreadyProcessedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, {"shape":"InvalidRequestException"}, + {"shape":"RequestFailedException"}, {"shape":"InvalidStateException"}, - {"shape":"LimitExceededException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"RequestAlreadyProcessedException"}, - {"shape":"RequestInProgressException"}, - {"shape":"RequestFailedException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"RequestInProgressException"} ], "documentation":"

Revokes a certificate that was issued inside Amazon Web Services Private CA. If you enable a certificate revocation list (CRL) when you create or update your private CA, information about the revoked certificates will be included in the CRL. Amazon Web Services Private CA writes the CRL to an S3 bucket that you specify. A CRL is typically updated approximately 30 minutes after a certificate is revoked. If for any reason the CRL update fails, Amazon Web Services Private CA attempts makes further attempts every 15 minutes. With Amazon CloudWatch, you can create alarms for the metrics CRLGenerated and MisconfiguredCRLBucket. For more information, see Supported CloudWatch Metrics.

Both Amazon Web Services Private CA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Access policies for CRLs in Amazon S3.

Amazon Web Services Private CA also writes revocation information to the audit report. For more information, see CreateCertificateAuthorityAuditReport.

You cannot revoke a root CA self-signed certificate.

" }, @@ -382,11 +385,11 @@ }, "input":{"shape":"UpdateCertificateAuthorityRequest"}, "errors":[ - {"shape":"ConcurrentModificationException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidArgsException"}, {"shape":"InvalidArnException"}, + {"shape":"InvalidArgsException"}, {"shape":"InvalidStateException"}, + {"shape":"ConcurrentModificationException"}, {"shape":"InvalidPolicyException"} ], "documentation":"

Updates the status or configuration of a private certificate authority (CA). Your private CA must be in the ACTIVE or DISABLED state before you can update it. You can disable a private CA that is in the ACTIVE state or make a CA that is in the DISABLED state active again.

Both Amazon Web Services Private CA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Access policies for CRLs in Amazon S3.

" @@ -487,8 +490,7 @@ "documentation":"

The location of AccessDescription information.

" } }, - "documentation":"

Provides access information used by the authorityInfoAccess and subjectInfoAccess extensions described in RFC 5280.

", - "box":true + "documentation":"

Provides access information used by the authorityInfoAccess and subjectInfoAccess extensions described in RFC 5280.

" }, "AccessDescriptionList":{ "type":"list", @@ -578,7 +580,7 @@ "type":"string", "max":4096, "min":1, - "pattern":"^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" + "pattern":"(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?" }, "Boolean":{"type":"boolean"}, "CertificateAuthorities":{ @@ -734,7 +736,7 @@ "type":"string", "max":253, "min":0, - "pattern":"^[-a-zA-Z0-9;/?:@&=+$,%_.!~*()']*$" + "pattern":"[-a-zA-Z0-9;/?:@&=+$,%_.!~*()']*" }, "ConcurrentModificationException":{ "type":"structure", @@ -868,8 +870,7 @@ }, "ExpirationInDays":{ "shape":"Integer1To5000", - "documentation":"

Validity period of the CRL in days.

", - "box":true + "documentation":"

Validity period of the CRL in days.

" }, "CustomCname":{ "shape":"CnameString", @@ -888,7 +889,7 @@ "documentation":"

Configures the behavior of the CRL Distribution Point extension for certificates issued by your certificate authority. If this field is not provided, then the CRl Distribution Point Extension will be present and contain the default CRL URL.

" } }, - "documentation":"

Contains configuration information for a certificate revocation list (CRL). Your private certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You can enable CRLs for your new or an existing private CA by setting the Enabled parameter to true. Your private CA writes CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by specifying a value for the CustomCname parameter. Your private CA by default copies the CNAME or the S3 bucket name to the CRL Distribution Points extension of each certificate it issues. If you want to configure this default behavior to be something different, you can set the CrlDistributionPointExtensionConfiguration parameter. Your S3 bucket policy must give write permission to Amazon Web Services Private CA.

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your CRLs.

Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed prior to a certificate's expiration date or when a certificate is revoked. When a certificate is revoked, it appears in the CRL until the certificate expires, and then in one additional CRL after expiration, and it always appears in the audit report.

A CRL is typically updated approximately 30 minutes after a certificate is revoked. If for any reason a CRL update fails, Amazon Web Services Private CA makes further attempts every 15 minutes.

CRLs contain the following fields:

  • Version: The current version number defined in RFC 5280 is V2. The integer value is 0x1.

  • Signature Algorithm: The name of the algorithm used to sign the CRL.

  • Issuer: The X.500 distinguished name of your private CA that issued the CRL.

  • Last Update: The issue date and time of this CRL.

  • Next Update: The day and time by which the next CRL will be issued.

  • Revoked Certificates: List of revoked certificates. Each list item contains the following information.

    • Serial Number: The serial number, in hexadecimal format, of the revoked certificate.

    • Revocation Date: Date and time the certificate was revoked.

    • CRL Entry Extensions: Optional extensions for the CRL entry.

      • X509v3 CRL Reason Code: Reason the certificate was revoked.

  • CRL Extensions: Optional extensions for the CRL.

    • X509v3 Authority Key Identifier: Identifies the public key associated with the private key used to sign the certificate.

    • X509v3 CRL Number:: Decimal sequence number for the CRL.

  • Signature Algorithm: Algorithm used by your private CA to sign the CRL.

  • Signature Value: Signature computed over the CRL.

Certificate revocation lists created by Amazon Web Services Private CA are DER-encoded. You can use the following OpenSSL command to list a CRL.

openssl crl -inform DER -text -in crl_path -noout

For more information, see Planning a certificate revocation list (CRL) in the Amazon Web Services Private Certificate Authority User Guide

" + "documentation":"

Contains configuration information for a certificate revocation list (CRL). Your private certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You can enable CRLs for your new or an existing private CA by setting the Enabled parameter to true. Your private CA writes CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by specifying a value for the CustomCname parameter. Your private CA by default copies the CNAME or the S3 bucket name to the CRL Distribution Points extension of each certificate it issues. If you want to configure this default behavior to be something different, you can set the CrlDistributionPointExtensionConfiguration parameter. Your S3 bucket policy must give write permission to Amazon Web Services Private CA.

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your CRLs.

Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed prior to a certificate's expiration date or when a certificate is revoked. When a certificate is revoked, it appears in the CRL until the certificate expires, and then in one additional CRL after expiration, and it always appears in the audit report.

A CRL is typically updated approximately 30 minutes after a certificate is revoked. If for any reason a CRL update fails, Amazon Web Services Private CA makes further attempts every 15 minutes.

CRLs contain the following fields:

  • Version: The current version number defined in RFC 5280 is V2. The integer value is 0x1.

  • Signature Algorithm: The name of the algorithm used to sign the CRL.

  • Issuer: The X.500 distinguished name of your private CA that issued the CRL.

  • Last Update: The issue date and time of this CRL.

  • Next Update: The day and time by which the next CRL will be issued.

  • Revoked Certificates: List of revoked certificates. Each list item contains the following information.

    • Serial Number: The serial number, in hexadecimal format, of the revoked certificate.

    • Revocation Date: Date and time the certificate was revoked.

    • CRL Entry Extensions: Optional extensions for the CRL entry.

      • X509v3 CRL Reason Code: Reason the certificate was revoked.

  • CRL Extensions: Optional extensions for the CRL.

    • X509v3 Authority Key Identifier: Identifies the public key associated with the private key used to sign the certificate.

    • X509v3 CRL Number:: Decimal sequence number for the CRL.

  • Signature Algorithm: Algorithm used by your private CA to sign the CRL.

  • Signature Value: Signature computed over the CRL.

Certificate revocation lists created by Amazon Web Services Private CA are DER-encoded. You can use the following OpenSSL command to list a CRL.

openssl crl -inform DER -text -in crl_path -noout

For more information, see Planning a certificate revocation list (CRL) in the Amazon Web Services Private Certificate Authority User Guide

" }, "CrlDistributionPointExtensionConfiguration":{ "type":"structure", @@ -979,7 +980,7 @@ "type":"string", "max":64, "min":0, - "pattern":"^([0-2])\\.([0-9]|([0-3][0-9]))((\\.([0-9]+)){0,126})$" + "pattern":"([0-2])\\.([0-9]|([0-3][0-9]))((\\.([0-9]+)){0,126})" }, "DeleteCertificateAuthorityRequest":{ "type":"structure", @@ -1282,7 +1283,7 @@ "members":{ "ResourceArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Number (ARN) of the private CA that will have its policy retrieved. You can find the CA's ARN by calling the ListCertificateAuthorities action.

" + "documentation":"

The Amazon Resource Number (ARN) of the private CA that will have its policy retrieved. You can find the CA's ARN by calling the ListCertificateAuthorities action.

 </p> 
" } } }, @@ -1324,6 +1325,7 @@ }, "Integer1To5000":{ "type":"integer", + "box":true, "max":5000, "min":1 }, @@ -1441,14 +1443,16 @@ "RSA_2048", "RSA_4096", "EC_prime256v1", - "EC_secp384r1" + "EC_secp384r1", + "SM2" ] }, "KeyStorageSecurityStandard":{ "type":"string", "enum":[ "FIPS_140_2_LEVEL_2_OR_HIGHER", - "FIPS_140_2_LEVEL_3_OR_HIGHER" + "FIPS_140_2_LEVEL_3_OR_HIGHER", + "CCPC_LEVEL_1_OR_HIGHER" ] }, "KeyUsage":{ @@ -1504,14 +1508,14 @@ "ListCertificateAuthoritiesRequest":{ "type":"structure", "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

" - }, "MaxResults":{ "shape":"MaxResults", "documentation":"

Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

Although the maximum value is 1000, the action only returns a maximum of 100 items.

" }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

" + }, "ResourceOwner":{ "shape":"ResourceOwner", "documentation":"

Use this parameter to filter the returned set of certificate authorities based on their owner. The default is SELF.

" @@ -1521,13 +1525,13 @@ "ListCertificateAuthoritiesResponse":{ "type":"structure", "members":{ - "CertificateAuthorities":{ - "shape":"CertificateAuthorities", - "documentation":"

Summary information about each certificate authority you have created.

" - }, "NextToken":{ "shape":"NextToken", "documentation":"

When the list is truncated, this value is present and should be used for the NextToken parameter in a subsequent pagination request.

" + }, + "CertificateAuthorities":{ + "shape":"CertificateAuthorities", + "documentation":"

Summary information about each certificate authority you have created.

" } } }, @@ -1535,30 +1539,30 @@ "type":"structure", "required":["CertificateAuthorityArn"], "members":{ - "CertificateAuthorityArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Number (ARN) of the private CA to inspect. You can find the ARN by calling the ListCertificateAuthorities action. This must be of the form: arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 You can get a private CA's ARN by running the ListCertificateAuthorities action.

" + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

When paginating results, use this parameter to specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

" }, "NextToken":{ "shape":"NextToken", "documentation":"

When paginating results, use this parameter in a subsequent request after you receive a response with truncated results. Set it to the value of NextToken from the response you just received.

" }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

When paginating results, use this parameter to specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

" + "CertificateAuthorityArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Number (ARN) of the private CA to inspect. You can find the ARN by calling the ListCertificateAuthorities action. This must be of the form: arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 You can get a private CA's ARN by running the ListCertificateAuthorities action.

" } } }, "ListPermissionsResponse":{ "type":"structure", "members":{ - "Permissions":{ - "shape":"PermissionList", - "documentation":"

Summary information about each permission assigned by the specified private CA, including the action enabled, the policy provided, and the time of creation.

" - }, "NextToken":{ "shape":"NextToken", "documentation":"

When the list is truncated, this value is present and should be used for the NextToken parameter in a subsequent pagination request.

" + }, + "Permissions":{ + "shape":"PermissionList", + "documentation":"

Summary information about each permission assigned by the specified private CA, including the action enabled, the policy provided, and the time of creation.

" } } }, @@ -1566,30 +1570,30 @@ "type":"structure", "required":["CertificateAuthorityArn"], "members":{ - "CertificateAuthorityArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Use this parameter when paginating results to specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

" }, "NextToken":{ "shape":"NextToken", "documentation":"

Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of NextToken from the response you just received.

" }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

Use this parameter when paginating results to specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

" + "CertificateAuthorityArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" } } }, "ListTagsResponse":{ "type":"structure", "members":{ - "Tags":{ - "shape":"TagList", - "documentation":"

The tags associated with your private CA.

" - }, "NextToken":{ "shape":"NextToken", "documentation":"

When the list is truncated, this value is present and should be used for the NextToken parameter in a subsequent pagination request.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags associated with your private CA.

" } } }, @@ -1619,6 +1623,7 @@ }, "MaxResults":{ "type":"integer", + "box":true, "max":1000, "min":1 }, @@ -1663,6 +1668,7 @@ }, "PermanentDeletionTimeInDays":{ "type":"integer", + "box":true, "max":30, "min":7 }, @@ -1754,13 +1760,14 @@ }, "PositiveLong":{ "type":"long", + "box":true, "min":1 }, "Principal":{ "type":"string", "max":128, "min":0, - "pattern":"^[^*]+$" + "pattern":"[^*]+" }, "PutPolicyRequest":{ "type":"structure", @@ -1897,11 +1904,12 @@ "type":"string", "max":255, "min":3, - "pattern":"^[-a-zA-Z0-9._/]+$" + "pattern":"[-a-zA-Z0-9._/]+" }, "S3Key":{ "type":"string", - "max":1024 + "max":1024, + "min":0 }, "S3ObjectAcl":{ "type":"string", @@ -1918,7 +1926,8 @@ "SHA512WITHECDSA", "SHA256WITHRSA", "SHA384WITHRSA", - "SHA512WITHRSA" + "SHA512WITHRSA", + "SM3WITHSM2" ] }, "String":{"type":"string"}, @@ -2009,7 +2018,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" }, "TagList":{ "type":"list", @@ -2021,7 +2030,7 @@ "type":"string", "max":256, "min":0, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" }, "TooManyTagsException":{ "type":"structure", @@ -2075,8 +2084,7 @@ "members":{ "Value":{ "shape":"PositiveLong", - "documentation":"

A long integer interpreted according to the value of Type, below.

", - "box":true + "documentation":"

A long integer interpreted according to the value of Type, below.

" }, "Type":{ "shape":"ValidityPeriodType", diff --git a/botocore/data/acm-pca/2017-08-22/waiters-2.json b/botocore/data/acm-pca/2017-08-22/waiters-2.json index 245186cadf..6da2171132 100644 --- a/botocore/data/acm-pca/2017-08-22/waiters-2.json +++ b/botocore/data/acm-pca/2017-08-22/waiters-2.json @@ -16,6 +16,11 @@ "state": "retry", "matcher": "error", "expected": "RequestInProgressException" + }, + { + "state": "failure", + "matcher": "error", + "expected": "AccessDeniedException" } ] }, @@ -34,6 +39,11 @@ "state": "retry", "matcher": "error", "expected": "RequestInProgressException" + }, + { + "state": "failure", + "matcher": "error", + "expected": "AccessDeniedException" } ] }, @@ -54,6 +64,11 @@ "matcher": "path", "argument": "AuditReportStatus", "expected": "FAILED" + }, + { + "state": "failure", + "matcher": "error", + "expected": "AccessDeniedException" } ] } diff --git a/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json b/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json index 671234b4f5..e0738bc0b1 100644 --- a/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json +++ b/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -256,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -277,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -297,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -308,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/acm/2015-12-08/service-2.json b/botocore/data/acm/2015-12-08/service-2.json index c3fac7b087..539c7d63a2 100644 --- a/botocore/data/acm/2015-12-08/service-2.json +++ b/botocore/data/acm/2015-12-08/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"acm", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"ACM", "serviceFullName":"AWS Certificate Manager", "serviceId":"ACM", "signatureVersion":"v4", "targetPrefix":"CertificateManager", - "uid":"acm-2015-12-08" + "uid":"acm-2015-12-08", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddTagsToCertificate":{ @@ -103,7 +105,7 @@ {"shape":"RequestInProgressException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Retrieves an Amazon-issued certificate and its certificate chain. The chain consists of the certificate of the issuing CA and the intermediate certificates of any other subordinate CAs. All of the certificates are base64 encoded. You can use OpenSSL to decode the certificates and inspect individual fields.

" + "documentation":"

Retrieves a certificate and its certificate chain. The certificate may be either a public or private certificate issued using the ACM RequestCertificate action, or a certificate imported into ACM using the ImportCertificate action. The chain consists of the certificate of the issuing CA and the intermediate certificates of any other subordinate CAs. All of the certificates are base64 encoded. You can use OpenSSL to decode the certificates and inspect individual fields.

" }, "ImportCertificate":{ "name":"ImportCertificate", @@ -122,7 +124,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Imports a certificate into Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the Certificate Manager User Guide.

ACM does not provide managed renewal for certificates that you import.

Note the following guidelines when importing third party certificates:

  • You must enter the private key that matches the certificate you are importing.

  • The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase.

  • The private key must be no larger than 5 KB (5,120 bytes).

  • If the certificate you are importing is not self-signed, you must enter its certificate chain.

  • If a certificate chain is included, the issuer must be the subject of one of the certificates in the chain.

  • The certificate, private key, and certificate chain must be PEM-encoded.

  • The current time must be between the Not Before and Not After certificate fields.

  • The Issuer field must not be empty.

  • The OCSP authority URL, if present, must not exceed 1000 characters.

  • To import a new certificate, omit the CertificateArn argument. Include this argument only when you want to replace a previously imported certificate.

  • When you import a certificate by using the CLI, you must specify the certificate, the certificate chain, and the private key by their file names preceded by fileb://. For example, you can specify a certificate saved in the C:\\temp folder as fileb://C:\\temp\\certificate_to_import.pem. If you are making an HTTP or HTTPS Query request, include these arguments as BLOBs.

  • When you import a certificate by using an SDK, you must specify the certificate, the certificate chain, and the private key files in the manner required by the programming language you're using.

  • The cryptographic algorithm of an imported certificate must match the algorithm of the signing CA. For example, if the signing CA key type is RSA, then the certificate key type must also be RSA.

This operation returns the Amazon Resource Name (ARN) of the imported certificate.

" + "documentation":"

Imports a certificate into Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the Certificate Manager User Guide.

ACM does not provide managed renewal for certificates that you import.

Note the following guidelines when importing third party certificates:

  • You must enter the private key that matches the certificate you are importing.

  • The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase.

  • The private key must be no larger than 5 KB (5,120 bytes).

  • The certificate, private key, and certificate chain must be PEM-encoded.

  • The current time must be between the Not Before and Not After certificate fields.

  • The Issuer field must not be empty.

  • The OCSP authority URL, if present, must not exceed 1000 characters.

  • To import a new certificate, omit the CertificateArn argument. Include this argument only when you want to replace a previously imported certificate.

  • When you import a certificate by using the CLI, you must specify the certificate, the certificate chain, and the private key by their file names preceded by fileb://. For example, you can specify a certificate saved in the C:\\temp folder as fileb://C:\\temp\\certificate_to_import.pem. If you are making an HTTP or HTTPS Query request, include these arguments as BLOBs.

  • When you import a certificate by using an SDK, you must specify the certificate, the certificate chain, and the private key files in the manner required by the programming language you're using.

  • The cryptographic algorithm of an imported certificate must match the algorithm of the signing CA. For example, if the signing CA key type is RSA, then the certificate key type must also be RSA.

This operation returns the Amazon Resource Name (ARN) of the imported certificate.

" }, "ListCertificates":{ "name":"ListCertificates", @@ -136,7 +138,7 @@ {"shape":"InvalidArgsException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list of certificate ARNs and domain names. You can request that only certificates that match a specific status be listed. You can also filter by specific attributes of the certificate. Default filtering returns only RSA_2048 certificates. For more information, see Filters.

" + "documentation":"

Retrieves a list of certificate ARNs and domain names. By default, the API returns RSA_2048 certificates. To return all certificates in the account, include the keyType filter with the values [RSA_1024, RSA_2048, RSA_3072, RSA_4096, EC_prime256v1, EC_secp384r1, EC_secp521r1].

In addition to keyType, you can also filter by the CertificateStatuses, keyUsage, and extendedKeyUsage attributes on the certificate. For more information, see Filters.

" }, "ListTagsForCertificate":{ "name":"ListTagsForCertificate", @@ -452,11 +454,11 @@ }, "SubjectAlternativeNameSummaries":{ "shape":"DomainList", - "documentation":"

One or more domain names (subject alternative names) included in the certificate. This list contains the domain names that are bound to the public key that is contained in the certificate. The subject alternative names include the canonical domain name (CN) of the certificate and additional domain names that can be used to connect to the website.

When called by ListCertificates, this parameter will only return the first 100 subject alternative names included in the certificate. To display the full list of subject alternative names, use DescribeCertificate.

" + "documentation":"

One or more domain names (subject alternative names) included in the certificate. This list contains the domain names that are bound to the public key that is contained in the certificate. The subject alternative names include the canonical domain name (CN) of the certificate and additional domain names that can be used to connect to the website.

When called by ListCertificates, this parameter will only return the first 100 subject alternative names included in the certificate. To display the full list of subject alternative names, use DescribeCertificate.

" }, "HasAdditionalSubjectAlternativeNames":{ "shape":"NullableBoolean", - "documentation":"

When called by ListCertificates, indicates whether the full list of subject alternative names has been included in the response. If false, the response includes all of the subject alternative names included in the certificate. If true, the response only includes the first 100 subject alternative names included in the certificate. To display the full list of subject alternative names, use DescribeCertificate.

", + "documentation":"

When called by ListCertificates, indicates whether the full list of subject alternative names has been included in the response. If false, the response includes all of the subject alternative names included in the certificate. If true, the response only includes the first 100 subject alternative names included in the certificate. To display the full list of subject alternative names, use DescribeCertificate.

", "box":true }, "Status":{ @@ -614,7 +616,7 @@ }, "ValidationStatus":{ "shape":"DomainStatus", - "documentation":"

The validation status of the domain name. This can be one of the following values:

  • PENDING_VALIDATION

  • SUCCESS

  • FAILED

" + "documentation":"

The validation status of the domain name. This can be one of the following values:

  • PENDING_VALIDATION

  • SUCCESS

  • FAILED

" }, "ResourceRecord":{ "shape":"ResourceRecord", @@ -1202,7 +1204,7 @@ }, "KeyAlgorithm":{ "shape":"KeyAlgorithm", - "documentation":"

Specifies the algorithm of the public and private key pair that your certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not supported by all network clients. Some AWS services may require RSA keys, or only support ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the AWS service where you plan to deploy your certificate.

Default: RSA_2048

" + "documentation":"

Specifies the algorithm of the public and private key pair that your certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not supported by all network clients. Some Amazon Web Services services may require RSA keys, or only support ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the Amazon Web Services service where you plan to deploy your certificate. For more information about selecting an algorithm, see Key algorithms.

Algorithms supported for an ACM certificate request include:

  • RSA_2048

  • EC_prime256v1

  • EC_secp384r1

Other listed algorithms are for imported certificates only.

When you request a private PKI certificate signed by a CA from Amazon Web Services Private CA, the specified signing algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key.

Default: RSA_2048

" } } }, diff --git a/botocore/data/alexaforbusiness/2017-11-09/endpoint-rule-set-1.json b/botocore/data/alexaforbusiness/2017-11-09/endpoint-rule-set-1.json deleted file mode 100644 index d8065b9dc0..0000000000 --- a/botocore/data/alexaforbusiness/2017-11-09/endpoint-rule-set-1.json +++ /dev/null @@ -1,314 +0,0 @@ -{ - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://a4b-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://a4b-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://a4b.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://a4b.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] -} \ No newline at end of file diff --git a/botocore/data/alexaforbusiness/2017-11-09/examples-1.json b/botocore/data/alexaforbusiness/2017-11-09/examples-1.json deleted file mode 100644 index 0ea7e3b0bb..0000000000 --- a/botocore/data/alexaforbusiness/2017-11-09/examples-1.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "version": "1.0", - "examples": { - } -} diff --git a/botocore/data/alexaforbusiness/2017-11-09/paginators-1.json b/botocore/data/alexaforbusiness/2017-11-09/paginators-1.json deleted file mode 100644 index ced5de2f7e..0000000000 --- a/botocore/data/alexaforbusiness/2017-11-09/paginators-1.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "pagination": { - "ListSkills": { - "result_key": "SkillSummaries", - "output_token": "NextToken", - "input_token": "NextToken", - "limit_key": "MaxResults" - }, - "SearchUsers": { - "result_key": "Users", - "output_token": "NextToken", - "input_token": "NextToken", - "limit_key": "MaxResults" - }, - "ListTags": { - "result_key": "Tags", - "output_token": "NextToken", - "input_token": "NextToken", - "limit_key": "MaxResults" - }, - "SearchProfiles": { - "result_key": "Profiles", - "output_token": "NextToken", - "input_token": "NextToken", - "limit_key": "MaxResults" - }, - "SearchSkillGroups": { - "result_key": "SkillGroups", - "output_token": "NextToken", - "input_token": "NextToken", - "limit_key": "MaxResults" - }, - "SearchDevices": { - "result_key": "Devices", - "output_token": "NextToken", - "input_token": "NextToken", - "limit_key": "MaxResults" - }, - "SearchRooms": { - "result_key": "Rooms", - "output_token": "NextToken", - "input_token": "NextToken", - "limit_key": "MaxResults" - }, - "ListBusinessReportSchedules": { - "input_token": "NextToken", - "limit_key": "MaxResults", - "output_token": "NextToken", - "result_key": "BusinessReportSchedules" - }, - "ListConferenceProviders": { - "input_token": "NextToken", - "limit_key": "MaxResults", - "output_token": "NextToken", - "result_key": "ConferenceProviders" - }, - "ListDeviceEvents": { - "input_token": "NextToken", - "limit_key": "MaxResults", - "output_token": "NextToken", - "result_key": "DeviceEvents" - }, - "ListSkillsStoreCategories": { - "input_token": "NextToken", - "limit_key": "MaxResults", - "output_token": "NextToken", - "result_key": "CategoryList" - }, - "ListSkillsStoreSkillsByCategory": { - "input_token": "NextToken", - "limit_key": "MaxResults", - "output_token": "NextToken", - "result_key": "SkillsStoreSkills" - }, - "ListSmartHomeAppliances": { - "input_token": "NextToken", - "limit_key": "MaxResults", - "output_token": "NextToken", - "result_key": "SmartHomeAppliances" - } - } -} diff --git a/botocore/data/alexaforbusiness/2017-11-09/service-2.json b/botocore/data/alexaforbusiness/2017-11-09/service-2.json deleted file mode 100644 index 9aa7a8702d..0000000000 --- a/botocore/data/alexaforbusiness/2017-11-09/service-2.json +++ /dev/null @@ -1,6318 +0,0 @@ -{ - "version":"2.0", - "metadata":{ - "apiVersion":"2017-11-09", - "endpointPrefix":"a4b", - "jsonVersion":"1.1", - "protocol":"json", - "serviceFullName":"Alexa For Business", - "serviceId":"Alexa For Business", - "signatureVersion":"v4", - "targetPrefix":"AlexaForBusiness", - "uid":"alexaforbusiness-2017-11-09" - }, - "operations":{ - "ApproveSkill":{ - "name":"ApproveSkill", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ApproveSkillRequest"}, - "output":{"shape":"ApproveSkillResponse"}, - "errors":[ - {"shape":"LimitExceededException"}, - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Associates a skill with the organization under the customer's AWS account. If a skill is private, the user implicitly accepts access to this skill during enablement.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "AssociateContactWithAddressBook":{ - "name":"AssociateContactWithAddressBook", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"AssociateContactWithAddressBookRequest"}, - "output":{"shape":"AssociateContactWithAddressBookResponse"}, - "errors":[ - {"shape":"LimitExceededException"} - ], - "documentation":"

Associates a contact with a given address book.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "AssociateDeviceWithNetworkProfile":{ - "name":"AssociateDeviceWithNetworkProfile", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"AssociateDeviceWithNetworkProfileRequest"}, - "output":{"shape":"AssociateDeviceWithNetworkProfileResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"}, - {"shape":"DeviceNotRegisteredException"} - ], - "documentation":"

Associates a device with the specified network profile.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "AssociateDeviceWithRoom":{ - "name":"AssociateDeviceWithRoom", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"AssociateDeviceWithRoomRequest"}, - "output":{"shape":"AssociateDeviceWithRoomResponse"}, - "errors":[ - {"shape":"LimitExceededException"}, - {"shape":"ConcurrentModificationException"}, - {"shape":"DeviceNotRegisteredException"} - ], - "documentation":"

Associates a device with a given room. This applies all the settings from the room profile to the device, and all the skills in any skill groups added to that room. This operation requires the device to be online, or else a manual sync is required.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "AssociateSkillGroupWithRoom":{ - "name":"AssociateSkillGroupWithRoom", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"AssociateSkillGroupWithRoomRequest"}, - "output":{"shape":"AssociateSkillGroupWithRoomResponse"}, - "errors":[ - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Associates a skill group with a given room. This enables all skills in the associated skill group on all devices in the room.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "AssociateSkillWithSkillGroup":{ - "name":"AssociateSkillWithSkillGroup", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"AssociateSkillWithSkillGroupRequest"}, - "output":{"shape":"AssociateSkillWithSkillGroupResponse"}, - "errors":[ - {"shape":"ConcurrentModificationException"}, - {"shape":"NotFoundException"}, - {"shape":"SkillNotLinkedException"} - ], - "documentation":"

Associates a skill with a skill group.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "AssociateSkillWithUsers":{ - "name":"AssociateSkillWithUsers", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"AssociateSkillWithUsersRequest"}, - "output":{"shape":"AssociateSkillWithUsersResponse"}, - "errors":[ - {"shape":"ConcurrentModificationException"}, - {"shape":"NotFoundException"} - ], - "documentation":"

Makes a private skill available for enrolled users to enable on their devices.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "CreateAddressBook":{ - "name":"CreateAddressBook", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"CreateAddressBookRequest"}, - "output":{"shape":"CreateAddressBookResponse"}, - "errors":[ - {"shape":"AlreadyExistsException"}, - {"shape":"LimitExceededException"} - ], - "documentation":"

Creates an address book with the specified details.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "CreateBusinessReportSchedule":{ - "name":"CreateBusinessReportSchedule", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"CreateBusinessReportScheduleRequest"}, - "output":{"shape":"CreateBusinessReportScheduleResponse"}, - "errors":[ - {"shape":"AlreadyExistsException"} - ], - "documentation":"

Creates a recurring schedule for usage reports to deliver to the specified S3 location with a specified daily or weekly interval.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "CreateConferenceProvider":{ - "name":"CreateConferenceProvider", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"CreateConferenceProviderRequest"}, - "output":{"shape":"CreateConferenceProviderResponse"}, - "errors":[ - {"shape":"AlreadyExistsException"} - ], - "documentation":"

Adds a new conference provider under the user's AWS account.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "CreateContact":{ - "name":"CreateContact", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"CreateContactRequest"}, - "output":{"shape":"CreateContactResponse"}, - "errors":[ - {"shape":"AlreadyExistsException"}, - {"shape":"LimitExceededException"} - ], - "documentation":"

Creates a contact with the specified details.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "CreateGatewayGroup":{ - "name":"CreateGatewayGroup", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"CreateGatewayGroupRequest"}, - "output":{"shape":"CreateGatewayGroupResponse"}, - "errors":[ - {"shape":"AlreadyExistsException"}, - {"shape":"LimitExceededException"} - ], - "documentation":"

Creates a gateway group with the specified details.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "CreateNetworkProfile":{ - "name":"CreateNetworkProfile", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"CreateNetworkProfileRequest"}, - "output":{"shape":"CreateNetworkProfileResponse"}, - "errors":[ - {"shape":"AlreadyExistsException"}, - {"shape":"LimitExceededException"}, - {"shape":"ConcurrentModificationException"}, - {"shape":"InvalidCertificateAuthorityException"}, - {"shape":"InvalidServiceLinkedRoleStateException"} - ], - "documentation":"

Creates a network profile with the specified details.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "CreateProfile":{ - "name":"CreateProfile", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"CreateProfileRequest"}, - "output":{"shape":"CreateProfileResponse"}, - "errors":[ - {"shape":"LimitExceededException"}, - {"shape":"AlreadyExistsException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Creates a new room profile with the specified details.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "CreateRoom":{ - "name":"CreateRoom", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"CreateRoomRequest"}, - "output":{"shape":"CreateRoomResponse"}, - "errors":[ - {"shape":"AlreadyExistsException"}, - {"shape":"LimitExceededException"} - ], - "documentation":"

Creates a room with the specified details.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "CreateSkillGroup":{ - "name":"CreateSkillGroup", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"CreateSkillGroupRequest"}, - "output":{"shape":"CreateSkillGroupResponse"}, - "errors":[ - {"shape":"AlreadyExistsException"}, - {"shape":"LimitExceededException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Creates a skill group with a specified name and description.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "CreateUser":{ - "name":"CreateUser", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"CreateUserRequest"}, - "output":{"shape":"CreateUserResponse"}, - "errors":[ - {"shape":"ResourceInUseException"}, - {"shape":"LimitExceededException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Creates a user.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteAddressBook":{ - "name":"DeleteAddressBook", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteAddressBookRequest"}, - "output":{"shape":"DeleteAddressBookResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Deletes an address book by the address book ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteBusinessReportSchedule":{ - "name":"DeleteBusinessReportSchedule", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteBusinessReportScheduleRequest"}, - "output":{"shape":"DeleteBusinessReportScheduleResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Deletes the recurring report delivery schedule with the specified schedule ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteConferenceProvider":{ - "name":"DeleteConferenceProvider", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteConferenceProviderRequest"}, - "output":{"shape":"DeleteConferenceProviderResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Deletes a conference provider.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteContact":{ - "name":"DeleteContact", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteContactRequest"}, - "output":{"shape":"DeleteContactResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Deletes a contact by the contact ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteDevice":{ - "name":"DeleteDevice", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteDeviceRequest"}, - "output":{"shape":"DeleteDeviceResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"}, - {"shape":"InvalidCertificateAuthorityException"} - ], - "documentation":"

Removes a device from Alexa For Business.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteDeviceUsageData":{ - "name":"DeleteDeviceUsageData", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteDeviceUsageDataRequest"}, - "output":{"shape":"DeleteDeviceUsageDataResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"DeviceNotRegisteredException"}, - {"shape":"LimitExceededException"} - ], - "documentation":"

When this action is called for a specified shared device, it allows authorized users to delete the device's entire previous history of voice input data and associated response data. This action can be called once every 24 hours for a specific shared device.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteGatewayGroup":{ - "name":"DeleteGatewayGroup", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteGatewayGroupRequest"}, - "output":{"shape":"DeleteGatewayGroupResponse"}, - "errors":[ - {"shape":"ResourceAssociatedException"} - ], - "documentation":"

Deletes a gateway group.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteNetworkProfile":{ - "name":"DeleteNetworkProfile", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteNetworkProfileRequest"}, - "output":{"shape":"DeleteNetworkProfileResponse"}, - "errors":[ - {"shape":"ResourceInUseException"}, - {"shape":"ConcurrentModificationException"}, - {"shape":"NotFoundException"} - ], - "documentation":"

Deletes a network profile by the network profile ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteProfile":{ - "name":"DeleteProfile", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteProfileRequest"}, - "output":{"shape":"DeleteProfileResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Deletes a room profile by the profile ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteRoom":{ - "name":"DeleteRoom", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteRoomRequest"}, - "output":{"shape":"DeleteRoomResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Deletes a room by the room ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteRoomSkillParameter":{ - "name":"DeleteRoomSkillParameter", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteRoomSkillParameterRequest"}, - "output":{"shape":"DeleteRoomSkillParameterResponse"}, - "errors":[ - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Deletes room skill parameter details by room, skill, and parameter key ID.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteSkillAuthorization":{ - "name":"DeleteSkillAuthorization", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteSkillAuthorizationRequest"}, - "output":{"shape":"DeleteSkillAuthorizationResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Unlinks a third-party account from a skill.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteSkillGroup":{ - "name":"DeleteSkillGroup", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteSkillGroupRequest"}, - "output":{"shape":"DeleteSkillGroupResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Deletes a skill group by skill group ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DeleteUser":{ - "name":"DeleteUser", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DeleteUserRequest"}, - "output":{"shape":"DeleteUserResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Deletes a specified user by user ARN and enrollment ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DisassociateContactFromAddressBook":{ - "name":"DisassociateContactFromAddressBook", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DisassociateContactFromAddressBookRequest"}, - "output":{"shape":"DisassociateContactFromAddressBookResponse"}, - "documentation":"

Disassociates a contact from a given address book.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DisassociateDeviceFromRoom":{ - "name":"DisassociateDeviceFromRoom", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DisassociateDeviceFromRoomRequest"}, - "output":{"shape":"DisassociateDeviceFromRoomResponse"}, - "errors":[ - {"shape":"ConcurrentModificationException"}, - {"shape":"DeviceNotRegisteredException"} - ], - "documentation":"

Disassociates a device from its current room. The device continues to be connected to the Wi-Fi network and is still registered to the account. The device settings and skills are removed from the room.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DisassociateSkillFromSkillGroup":{ - "name":"DisassociateSkillFromSkillGroup", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DisassociateSkillFromSkillGroupRequest"}, - "output":{"shape":"DisassociateSkillFromSkillGroupResponse"}, - "errors":[ - {"shape":"ConcurrentModificationException"}, - {"shape":"NotFoundException"} - ], - "documentation":"

Disassociates a skill from a skill group.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DisassociateSkillFromUsers":{ - "name":"DisassociateSkillFromUsers", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DisassociateSkillFromUsersRequest"}, - "output":{"shape":"DisassociateSkillFromUsersResponse"}, - "errors":[ - {"shape":"ConcurrentModificationException"}, - {"shape":"NotFoundException"} - ], - "documentation":"

Makes a private skill unavailable for enrolled users and prevents them from enabling it on their devices.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "DisassociateSkillGroupFromRoom":{ - "name":"DisassociateSkillGroupFromRoom", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DisassociateSkillGroupFromRoomRequest"}, - "output":{"shape":"DisassociateSkillGroupFromRoomResponse"}, - "errors":[ - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Disassociates a skill group from a specified room. This disables all skills in the skill group on all devices in the room.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ForgetSmartHomeAppliances":{ - "name":"ForgetSmartHomeAppliances", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ForgetSmartHomeAppliancesRequest"}, - "output":{"shape":"ForgetSmartHomeAppliancesResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Forgets smart home appliances associated to a room.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetAddressBook":{ - "name":"GetAddressBook", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetAddressBookRequest"}, - "output":{"shape":"GetAddressBookResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Gets address the book details by the address book ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetConferencePreference":{ - "name":"GetConferencePreference", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetConferencePreferenceRequest"}, - "output":{"shape":"GetConferencePreferenceResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Retrieves the existing conference preferences.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetConferenceProvider":{ - "name":"GetConferenceProvider", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetConferenceProviderRequest"}, - "output":{"shape":"GetConferenceProviderResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Gets details about a specific conference provider.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetContact":{ - "name":"GetContact", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetContactRequest"}, - "output":{"shape":"GetContactResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Gets the contact details by the contact ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetDevice":{ - "name":"GetDevice", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetDeviceRequest"}, - "output":{"shape":"GetDeviceResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Gets the details of a device by device ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetGateway":{ - "name":"GetGateway", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetGatewayRequest"}, - "output":{"shape":"GetGatewayResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Retrieves the details of a gateway.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetGatewayGroup":{ - "name":"GetGatewayGroup", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetGatewayGroupRequest"}, - "output":{"shape":"GetGatewayGroupResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Retrieves the details of a gateway group.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetInvitationConfiguration":{ - "name":"GetInvitationConfiguration", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetInvitationConfigurationRequest"}, - "output":{"shape":"GetInvitationConfigurationResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Retrieves the configured values for the user enrollment invitation email template.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetNetworkProfile":{ - "name":"GetNetworkProfile", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetNetworkProfileRequest"}, - "output":{"shape":"GetNetworkProfileResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"InvalidSecretsManagerResourceException"} - ], - "documentation":"

Gets the network profile details by the network profile ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetProfile":{ - "name":"GetProfile", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetProfileRequest"}, - "output":{"shape":"GetProfileResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Gets the details of a room profile by profile ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetRoom":{ - "name":"GetRoom", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetRoomRequest"}, - "output":{"shape":"GetRoomResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Gets room details by room ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetRoomSkillParameter":{ - "name":"GetRoomSkillParameter", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetRoomSkillParameterRequest"}, - "output":{"shape":"GetRoomSkillParameterResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Gets room skill parameter details by room, skill, and parameter key ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "GetSkillGroup":{ - "name":"GetSkillGroup", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetSkillGroupRequest"}, - "output":{"shape":"GetSkillGroupResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Gets skill group details by skill group ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ListBusinessReportSchedules":{ - "name":"ListBusinessReportSchedules", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListBusinessReportSchedulesRequest"}, - "output":{"shape":"ListBusinessReportSchedulesResponse"}, - "documentation":"

Lists the details of the schedules that a user configured. A download URL of the report associated with each schedule is returned every time this action is called. A new download URL is returned each time, and is valid for 24 hours.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ListConferenceProviders":{ - "name":"ListConferenceProviders", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListConferenceProvidersRequest"}, - "output":{"shape":"ListConferenceProvidersResponse"}, - "documentation":"

Lists conference providers under a specific AWS account.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ListDeviceEvents":{ - "name":"ListDeviceEvents", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListDeviceEventsRequest"}, - "output":{"shape":"ListDeviceEventsResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Lists the device event history, including device connection status, for up to 30 days.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ListGatewayGroups":{ - "name":"ListGatewayGroups", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListGatewayGroupsRequest"}, - "output":{"shape":"ListGatewayGroupsResponse"}, - "documentation":"

Retrieves a list of gateway group summaries. Use GetGatewayGroup to retrieve details of a specific gateway group.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ListGateways":{ - "name":"ListGateways", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListGatewaysRequest"}, - "output":{"shape":"ListGatewaysResponse"}, - "documentation":"

Retrieves a list of gateway summaries. Use GetGateway to retrieve details of a specific gateway. An optional gateway group ARN can be provided to only retrieve gateway summaries of gateways that are associated with that gateway group ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ListSkills":{ - "name":"ListSkills", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListSkillsRequest"}, - "output":{"shape":"ListSkillsResponse"}, - "documentation":"

Lists all enabled skills in a specific skill group.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ListSkillsStoreCategories":{ - "name":"ListSkillsStoreCategories", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListSkillsStoreCategoriesRequest"}, - "output":{"shape":"ListSkillsStoreCategoriesResponse"}, - "documentation":"

Lists all categories in the Alexa skill store.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ListSkillsStoreSkillsByCategory":{ - "name":"ListSkillsStoreSkillsByCategory", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListSkillsStoreSkillsByCategoryRequest"}, - "output":{"shape":"ListSkillsStoreSkillsByCategoryResponse"}, - "documentation":"

Lists all skills in the Alexa skill store by category.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ListSmartHomeAppliances":{ - "name":"ListSmartHomeAppliances", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListSmartHomeAppliancesRequest"}, - "output":{"shape":"ListSmartHomeAppliancesResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Lists all of the smart home appliances associated with a room.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ListTags":{ - "name":"ListTags", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListTagsRequest"}, - "output":{"shape":"ListTagsResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Lists all tags for the specified resource.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "PutConferencePreference":{ - "name":"PutConferencePreference", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"PutConferencePreferenceRequest"}, - "output":{"shape":"PutConferencePreferenceResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Sets the conference preferences on a specific conference provider at the account level.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "PutInvitationConfiguration":{ - "name":"PutInvitationConfiguration", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"PutInvitationConfigurationRequest"}, - "output":{"shape":"PutInvitationConfigurationResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Configures the email template for the user enrollment invitation with the specified attributes.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "PutRoomSkillParameter":{ - "name":"PutRoomSkillParameter", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"PutRoomSkillParameterRequest"}, - "output":{"shape":"PutRoomSkillParameterResponse"}, - "errors":[ - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Updates room skill parameter details by room, skill, and parameter key ID. Not all skills have a room skill parameter.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "PutSkillAuthorization":{ - "name":"PutSkillAuthorization", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"PutSkillAuthorizationRequest"}, - "output":{"shape":"PutSkillAuthorizationResponse"}, - "errors":[ - {"shape":"UnauthorizedException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Links a user's account to a third-party skill provider. If this API operation is called by an assumed IAM role, the skill being linked must be a private skill. Also, the skill must be owned by the AWS account that assumed the IAM role.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "RegisterAVSDevice":{ - "name":"RegisterAVSDevice", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"RegisterAVSDeviceRequest"}, - "output":{"shape":"RegisterAVSDeviceResponse"}, - "errors":[ - {"shape":"LimitExceededException"}, - {"shape":"ConcurrentModificationException"}, - {"shape":"NotFoundException"}, - {"shape":"InvalidDeviceException"} - ], - "documentation":"

Registers an Alexa-enabled device built by an Original Equipment Manufacturer (OEM) using Alexa Voice Service (AVS).

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "RejectSkill":{ - "name":"RejectSkill", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"RejectSkillRequest"}, - "output":{"shape":"RejectSkillResponse"}, - "errors":[ - {"shape":"ConcurrentModificationException"}, - {"shape":"NotFoundException"} - ], - "documentation":"

Disassociates a skill from the organization under a user's AWS account. If the skill is a private skill, it moves to an AcceptStatus of PENDING. Any private or public skill that is rejected can be added later by calling the ApproveSkill API.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "ResolveRoom":{ - "name":"ResolveRoom", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ResolveRoomRequest"}, - "output":{"shape":"ResolveRoomResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Determines the details for the room from which a skill request was invoked. This operation is used by skill developers.

To query ResolveRoom from an Alexa skill, the skill ID needs to be authorized. When the skill is using an AWS Lambda function, the skill is automatically authorized when you publish your skill as a private skill to your AWS account. Skills that are hosted using a custom web service must be manually authorized. To get your skill authorized, contact AWS Support with your AWS account ID that queries the ResolveRoom API and skill ID.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "RevokeInvitation":{ - "name":"RevokeInvitation", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"RevokeInvitationRequest"}, - "output":{"shape":"RevokeInvitationResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Revokes an invitation and invalidates the enrollment URL.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "SearchAddressBooks":{ - "name":"SearchAddressBooks", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"SearchAddressBooksRequest"}, - "output":{"shape":"SearchAddressBooksResponse"}, - "documentation":"

Searches address books and lists the ones that meet a set of filter and sort criteria.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "SearchContacts":{ - "name":"SearchContacts", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"SearchContactsRequest"}, - "output":{"shape":"SearchContactsResponse"}, - "documentation":"

Searches contacts and lists the ones that meet a set of filter and sort criteria.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "SearchDevices":{ - "name":"SearchDevices", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"SearchDevicesRequest"}, - "output":{"shape":"SearchDevicesResponse"}, - "documentation":"

Searches devices and lists the ones that meet a set of filter criteria.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "SearchNetworkProfiles":{ - "name":"SearchNetworkProfiles", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"SearchNetworkProfilesRequest"}, - "output":{"shape":"SearchNetworkProfilesResponse"}, - "documentation":"

Searches network profiles and lists the ones that meet a set of filter and sort criteria.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "SearchProfiles":{ - "name":"SearchProfiles", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"SearchProfilesRequest"}, - "output":{"shape":"SearchProfilesResponse"}, - "documentation":"

Searches room profiles and lists the ones that meet a set of filter criteria.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "SearchRooms":{ - "name":"SearchRooms", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"SearchRoomsRequest"}, - "output":{"shape":"SearchRoomsResponse"}, - "documentation":"

Searches rooms and lists the ones that meet a set of filter and sort criteria.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "SearchSkillGroups":{ - "name":"SearchSkillGroups", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"SearchSkillGroupsRequest"}, - "output":{"shape":"SearchSkillGroupsResponse"}, - "documentation":"

Searches skill groups and lists the ones that meet a set of filter and sort criteria.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "SearchUsers":{ - "name":"SearchUsers", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"SearchUsersRequest"}, - "output":{"shape":"SearchUsersResponse"}, - "documentation":"

Searches users and lists the ones that meet a set of filter and sort criteria.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "SendAnnouncement":{ - "name":"SendAnnouncement", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"SendAnnouncementRequest"}, - "output":{"shape":"SendAnnouncementResponse"}, - "errors":[ - {"shape":"LimitExceededException"}, - {"shape":"AlreadyExistsException"} - ], - "documentation":"

Triggers an asynchronous flow to send text, SSML, or audio announcements to rooms that are identified by a search or filter.

" - }, - "SendInvitation":{ - "name":"SendInvitation", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"SendInvitationRequest"}, - "output":{"shape":"SendInvitationResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"InvalidUserStatusException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Sends an enrollment invitation email with a URL to a user. The URL is valid for 30 days or until you call this operation again, whichever comes first.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "StartDeviceSync":{ - "name":"StartDeviceSync", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"StartDeviceSyncRequest"}, - "output":{"shape":"StartDeviceSyncResponse"}, - "errors":[ - {"shape":"DeviceNotRegisteredException"} - ], - "documentation":"

Resets a device and its account to the known default settings. This clears all information and settings set by previous users in the following ways:

  • Bluetooth - This unpairs all bluetooth devices paired with your echo device.

  • Volume - This resets the echo device's volume to the default value.

  • Notifications - This clears all notifications from your echo device.

  • Lists - This clears all to-do items from your echo device.

  • Settings - This internally syncs the room's profile (if the device is assigned to a room), contacts, address books, delegation access for account linking, and communications (if enabled on the room profile).

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "StartSmartHomeApplianceDiscovery":{ - "name":"StartSmartHomeApplianceDiscovery", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"StartSmartHomeApplianceDiscoveryRequest"}, - "output":{"shape":"StartSmartHomeApplianceDiscoveryResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Initiates the discovery of any smart home appliances associated with the room.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "TagResource":{ - "name":"TagResource", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"TagResourceRequest"}, - "output":{"shape":"TagResourceResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Adds metadata tags to a specified resource.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UntagResource":{ - "name":"UntagResource", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UntagResourceRequest"}, - "output":{"shape":"UntagResourceResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Removes metadata tags from a specified resource.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UpdateAddressBook":{ - "name":"UpdateAddressBook", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateAddressBookRequest"}, - "output":{"shape":"UpdateAddressBookResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"NameInUseException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Updates address book details by the address book ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UpdateBusinessReportSchedule":{ - "name":"UpdateBusinessReportSchedule", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateBusinessReportScheduleRequest"}, - "output":{"shape":"UpdateBusinessReportScheduleResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Updates the configuration of the report delivery schedule with the specified schedule ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UpdateConferenceProvider":{ - "name":"UpdateConferenceProvider", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateConferenceProviderRequest"}, - "output":{"shape":"UpdateConferenceProviderResponse"}, - "errors":[ - {"shape":"NotFoundException"} - ], - "documentation":"

Updates an existing conference provider's settings.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UpdateContact":{ - "name":"UpdateContact", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateContactRequest"}, - "output":{"shape":"UpdateContactResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Updates the contact details by the contact ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UpdateDevice":{ - "name":"UpdateDevice", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateDeviceRequest"}, - "output":{"shape":"UpdateDeviceResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"ConcurrentModificationException"}, - {"shape":"DeviceNotRegisteredException"} - ], - "documentation":"

Updates the device name by device ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UpdateGateway":{ - "name":"UpdateGateway", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateGatewayRequest"}, - "output":{"shape":"UpdateGatewayResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"NameInUseException"} - ], - "documentation":"

Updates the details of a gateway. If any optional field is not provided, the existing corresponding value is left unmodified.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UpdateGatewayGroup":{ - "name":"UpdateGatewayGroup", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateGatewayGroupRequest"}, - "output":{"shape":"UpdateGatewayGroupResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"NameInUseException"} - ], - "documentation":"

Updates the details of a gateway group. If any optional field is not provided, the existing corresponding value is left unmodified.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UpdateNetworkProfile":{ - "name":"UpdateNetworkProfile", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateNetworkProfileRequest"}, - "output":{"shape":"UpdateNetworkProfileResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"NameInUseException"}, - {"shape":"ConcurrentModificationException"}, - {"shape":"InvalidCertificateAuthorityException"}, - {"shape":"InvalidSecretsManagerResourceException"} - ], - "documentation":"

Updates a network profile by the network profile ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UpdateProfile":{ - "name":"UpdateProfile", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateProfileRequest"}, - "output":{"shape":"UpdateProfileResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"NameInUseException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Updates an existing room profile by room profile ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UpdateRoom":{ - "name":"UpdateRoom", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateRoomRequest"}, - "output":{"shape":"UpdateRoomResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"NameInUseException"} - ], - "documentation":"

Updates room details by room ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - }, - "UpdateSkillGroup":{ - "name":"UpdateSkillGroup", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateSkillGroupRequest"}, - "output":{"shape":"UpdateSkillGroupResponse"}, - "errors":[ - {"shape":"NotFoundException"}, - {"shape":"NameInUseException"}, - {"shape":"ConcurrentModificationException"} - ], - "documentation":"

Updates skill group details by skill group ARN.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" - } - }, - "shapes":{ - "Address":{ - "type":"string", - "max":500, - "min":1 - }, - "AddressBook":{ - "type":"structure", - "members":{ - "AddressBookArn":{ - "shape":"Arn", - "documentation":"

The ARN of the address book.

" - }, - "Name":{ - "shape":"AddressBookName", - "documentation":"

The name of the address book.

" - }, - "Description":{ - "shape":"AddressBookDescription", - "documentation":"

The description of the address book.

" - } - }, - "documentation":"

An address book with attributes.

" - }, - "AddressBookData":{ - "type":"structure", - "members":{ - "AddressBookArn":{ - "shape":"Arn", - "documentation":"

The ARN of the address book.

" - }, - "Name":{ - "shape":"AddressBookName", - "documentation":"

The name of the address book.

" - }, - "Description":{ - "shape":"AddressBookDescription", - "documentation":"

The description of the address book.

" - } - }, - "documentation":"

Information related to an address book.

" - }, - "AddressBookDataList":{ - "type":"list", - "member":{"shape":"AddressBookData"} - }, - "AddressBookDescription":{ - "type":"string", - "max":200, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "AddressBookName":{ - "type":"string", - "max":100, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "AlreadyExistsException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

The resource being created already exists.

", - "exception":true - }, - "AmazonId":{ - "type":"string", - "pattern":"[a-zA-Z0-9]{1,18}" - }, - "ApplianceDescription":{"type":"string"}, - "ApplianceFriendlyName":{"type":"string"}, - "ApplianceManufacturerName":{"type":"string"}, - "ApproveSkillRequest":{ - "type":"structure", - "required":["SkillId"], - "members":{ - "SkillId":{ - "shape":"SkillId", - "documentation":"

The unique identifier of the skill.

" - } - } - }, - "ApproveSkillResponse":{ - "type":"structure", - "members":{ - } - }, - "Arn":{ - "type":"string", - "pattern":"arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}" - }, - "AssociateContactWithAddressBookRequest":{ - "type":"structure", - "required":[ - "ContactArn", - "AddressBookArn" - ], - "members":{ - "ContactArn":{ - "shape":"Arn", - "documentation":"

The ARN of the contact to associate with an address book.

" - }, - "AddressBookArn":{ - "shape":"Arn", - "documentation":"

The ARN of the address book with which to associate the contact.

" - } - } - }, - "AssociateContactWithAddressBookResponse":{ - "type":"structure", - "members":{ - } - }, - "AssociateDeviceWithNetworkProfileRequest":{ - "type":"structure", - "required":[ - "DeviceArn", - "NetworkProfileArn" - ], - "members":{ - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The device ARN.

" - }, - "NetworkProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the network profile to associate with a device.

" - } - } - }, - "AssociateDeviceWithNetworkProfileResponse":{ - "type":"structure", - "members":{ - } - }, - "AssociateDeviceWithRoomRequest":{ - "type":"structure", - "members":{ - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The ARN of the device to associate to a room. Required.

" - }, - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room with which to associate the device. Required.

" - } - } - }, - "AssociateDeviceWithRoomResponse":{ - "type":"structure", - "members":{ - } - }, - "AssociateSkillGroupWithRoomRequest":{ - "type":"structure", - "members":{ - "SkillGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the skill group to associate with a room. Required.

" - }, - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room with which to associate the skill group. Required.

" - } - } - }, - "AssociateSkillGroupWithRoomResponse":{ - "type":"structure", - "members":{ - } - }, - "AssociateSkillWithSkillGroupRequest":{ - "type":"structure", - "required":["SkillId"], - "members":{ - "SkillGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the skill group to associate the skill to. Required.

" - }, - "SkillId":{ - "shape":"SkillId", - "documentation":"

The unique identifier of the skill.

" - } - } - }, - "AssociateSkillWithSkillGroupResponse":{ - "type":"structure", - "members":{ - } - }, - "AssociateSkillWithUsersRequest":{ - "type":"structure", - "required":["SkillId"], - "members":{ - "SkillId":{ - "shape":"SkillId", - "documentation":"

The private skill ID you want to make available to enrolled users.

" - } - } - }, - "AssociateSkillWithUsersResponse":{ - "type":"structure", - "members":{ - } - }, - "Audio":{ - "type":"structure", - "required":[ - "Locale", - "Location" - ], - "members":{ - "Locale":{ - "shape":"Locale", - "documentation":"

The locale of the audio message. Currently, en-US is supported.

" - }, - "Location":{ - "shape":"AudioLocation", - "documentation":"

The location of the audio file. Currently, S3 URLs are supported. Only S3 locations comprised of safe characters are valid. For more information, see Safe Characters.

" - } - }, - "documentation":"

The audio message. There is a 1 MB limit on the audio file input and the only supported format is MP3. To convert your MP3 audio files to an Alexa-friendly,

required codec version (MPEG version 2) and bit rate (48 kbps), you might use converter software. One option for this is a command-line tool, FFmpeg. For more information, see FFmpeg. The following command converts the provided <input-file> to an MP3 file that is played in the announcement:

ffmpeg -i <input-file> -ac 2 -codec:a libmp3lame -b:a 48k -ar 16000 <output-file.mp3>

" - }, - "AudioList":{ - "type":"list", - "member":{"shape":"Audio"}, - "max":1 - }, - "AudioLocation":{ - "type":"string", - "max":1200, - "min":0, - "pattern":"https://([A-Za-z0-9_.-]+)?(s3-[A-Za-z0-9-]+|s3\\.([A-Za-z0-9-])+|s3|s3.dualstack\\.([A-Za-z0-9-])+)+.amazonaws.com/.*" - }, - "AuthorizationResult":{ - "type":"map", - "key":{"shape":"Key"}, - "value":{"shape":"Value"}, - "sensitive":true - }, - "Boolean":{"type":"boolean"}, - "BulletPoint":{"type":"string"}, - "BulletPoints":{ - "type":"list", - "member":{"shape":"BulletPoint"} - }, - "BusinessReport":{ - "type":"structure", - "members":{ - "Status":{ - "shape":"BusinessReportStatus", - "documentation":"

The status of the report generation execution (RUNNING, SUCCEEDED, or FAILED).

" - }, - "FailureCode":{ - "shape":"BusinessReportFailureCode", - "documentation":"

The failure code.

" - }, - "S3Location":{ - "shape":"BusinessReportS3Location", - "documentation":"

The S3 location of the output reports.

" - }, - "DeliveryTime":{ - "shape":"BusinessReportDeliveryTime", - "documentation":"

The time of report delivery.

" - }, - "DownloadUrl":{ - "shape":"BusinessReportDownloadUrl", - "documentation":"

The download link where a user can download the report.

" - } - }, - "documentation":"

Usage report with specified parameters.

" - }, - "BusinessReportContentRange":{ - "type":"structure", - "required":["Interval"], - "members":{ - "Interval":{ - "shape":"BusinessReportInterval", - "documentation":"

The interval of the content range.

" - } - }, - "documentation":"

The content range of the report.

" - }, - "BusinessReportDeliveryTime":{"type":"timestamp"}, - "BusinessReportDownloadUrl":{"type":"string"}, - "BusinessReportFailureCode":{ - "type":"string", - "enum":[ - "ACCESS_DENIED", - "NO_SUCH_BUCKET", - "INTERNAL_FAILURE" - ] - }, - "BusinessReportFormat":{ - "type":"string", - "enum":[ - "CSV", - "CSV_ZIP" - ] - }, - "BusinessReportInterval":{ - "type":"string", - "enum":[ - "ONE_DAY", - "ONE_WEEK", - "THIRTY_DAYS" - ] - }, - "BusinessReportRecurrence":{ - "type":"structure", - "members":{ - "StartDate":{ - "shape":"Date", - "documentation":"

The start date.

" - } - }, - "documentation":"

The recurrence of the reports.

" - }, - "BusinessReportS3Location":{ - "type":"structure", - "members":{ - "Path":{ - "shape":"BusinessReportS3Path", - "documentation":"

The path of the business report.

" - }, - "BucketName":{ - "shape":"CustomerS3BucketName", - "documentation":"

The S3 bucket name of the output reports.

" - } - }, - "documentation":"

The S3 location of the output reports.

" - }, - "BusinessReportS3Path":{"type":"string"}, - "BusinessReportSchedule":{ - "type":"structure", - "members":{ - "ScheduleArn":{ - "shape":"Arn", - "documentation":"

The ARN of the business report schedule.

" - }, - "ScheduleName":{ - "shape":"BusinessReportScheduleName", - "documentation":"

The name identifier of the schedule.

" - }, - "S3BucketName":{ - "shape":"CustomerS3BucketName", - "documentation":"

The S3 bucket name of the output reports.

" - }, - "S3KeyPrefix":{ - "shape":"S3KeyPrefix", - "documentation":"

The S3 key where the report is delivered.

" - }, - "Format":{ - "shape":"BusinessReportFormat", - "documentation":"

The format of the generated report (individual CSV files or zipped files of individual files).

" - }, - "ContentRange":{ - "shape":"BusinessReportContentRange", - "documentation":"

The content range of the reports.

" - }, - "Recurrence":{ - "shape":"BusinessReportRecurrence", - "documentation":"

The recurrence of the reports.

" - }, - "LastBusinessReport":{ - "shape":"BusinessReport", - "documentation":"

The details of the last business report delivery for a specified time interval.

" - } - }, - "documentation":"

The schedule of the usage report.

" - }, - "BusinessReportScheduleList":{ - "type":"list", - "member":{"shape":"BusinessReportSchedule"} - }, - "BusinessReportScheduleName":{ - "type":"string", - "max":64, - "min":0, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "BusinessReportStatus":{ - "type":"string", - "enum":[ - "RUNNING", - "SUCCEEDED", - "FAILED" - ] - }, - "Category":{ - "type":"structure", - "members":{ - "CategoryId":{ - "shape":"CategoryId", - "documentation":"

The ID of the skill store category.

" - }, - "CategoryName":{ - "shape":"CategoryName", - "documentation":"

The name of the skill store category.

" - } - }, - "documentation":"

The skill store category that is shown. Alexa skills are assigned a specific skill category during creation, such as News, Social, and Sports.

" - }, - "CategoryId":{ - "type":"long", - "min":1 - }, - "CategoryList":{ - "type":"list", - "member":{"shape":"Category"} - }, - "CategoryName":{"type":"string"}, - "CertificateTime":{"type":"timestamp"}, - "ClientId":{ - "type":"string", - "pattern":"^\\S+{1,256}$" - }, - "ClientRequestToken":{ - "type":"string", - "documentation":"

A unique, user-specified identifier for the request that ensures idempotency.

", - "max":150, - "min":10, - "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" - }, - "CommsProtocol":{ - "type":"string", - "enum":[ - "SIP", - "SIPS", - "H323" - ] - }, - "ConcurrentModificationException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

There is a concurrent modification of resources.

", - "exception":true - }, - "ConferencePreference":{ - "type":"structure", - "members":{ - "DefaultConferenceProviderArn":{ - "shape":"Arn", - "documentation":"

The ARN of the default conference provider.

" - } - }, - "documentation":"

The default conference provider that is used if no other scheduled meetings are detected.

" - }, - "ConferenceProvider":{ - "type":"structure", - "members":{ - "Arn":{ - "shape":"Arn", - "documentation":"

The ARN of the newly created conference provider.

" - }, - "Name":{ - "shape":"ConferenceProviderName", - "documentation":"

The name of the conference provider.

" - }, - "Type":{ - "shape":"ConferenceProviderType", - "documentation":"

The type of conference providers.

" - }, - "IPDialIn":{ - "shape":"IPDialIn", - "documentation":"

The IP endpoint and protocol for calling.

" - }, - "PSTNDialIn":{ - "shape":"PSTNDialIn", - "documentation":"

The information for PSTN conferencing.

" - }, - "MeetingSetting":{ - "shape":"MeetingSetting", - "documentation":"

The meeting settings for the conference provider.

" - } - }, - "documentation":"

An entity that provides a conferencing solution. Alexa for Business acts as the voice interface and mediator that connects users to their preferred conference provider. Examples of conference providers include Amazon Chime, Zoom, Cisco, and Polycom.

" - }, - "ConferenceProviderName":{ - "type":"string", - "max":50, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "ConferenceProviderType":{ - "type":"string", - "enum":[ - "CHIME", - "BLUEJEANS", - "FUZE", - "GOOGLE_HANGOUTS", - "POLYCOM", - "RINGCENTRAL", - "SKYPE_FOR_BUSINESS", - "WEBEX", - "ZOOM", - "CUSTOM" - ] - }, - "ConferenceProvidersList":{ - "type":"list", - "member":{"shape":"ConferenceProvider"} - }, - "ConnectionStatus":{ - "type":"string", - "enum":[ - "ONLINE", - "OFFLINE" - ] - }, - "ConnectionStatusUpdatedTime":{"type":"timestamp"}, - "Contact":{ - "type":"structure", - "members":{ - "ContactArn":{ - "shape":"Arn", - "documentation":"

The ARN of the contact.

" - }, - "DisplayName":{ - "shape":"ContactName", - "documentation":"

The name of the contact to display on the console.

" - }, - "FirstName":{ - "shape":"ContactName", - "documentation":"

The first name of the contact, used to call the contact on the device.

" - }, - "LastName":{ - "shape":"ContactName", - "documentation":"

The last name of the contact, used to call the contact on the device.

" - }, - "PhoneNumber":{ - "shape":"RawPhoneNumber", - "documentation":"

The phone number of the contact. The phone number type defaults to WORK. You can either specify PhoneNumber or PhoneNumbers. We recommend that you use PhoneNumbers, which lets you specify the phone number type and multiple numbers.

" - }, - "PhoneNumbers":{ - "shape":"PhoneNumberList", - "documentation":"

The list of phone numbers for the contact.

" - }, - "SipAddresses":{ - "shape":"SipAddressList", - "documentation":"

The list of SIP addresses for the contact.

" - } - }, - "documentation":"

A contact with attributes.

" - }, - "ContactData":{ - "type":"structure", - "members":{ - "ContactArn":{ - "shape":"Arn", - "documentation":"

The ARN of the contact.

" - }, - "DisplayName":{ - "shape":"ContactName", - "documentation":"

The name of the contact to display on the console.

" - }, - "FirstName":{ - "shape":"ContactName", - "documentation":"

The first name of the contact, used to call the contact on the device.

" - }, - "LastName":{ - "shape":"ContactName", - "documentation":"

The last name of the contact, used to call the contact on the device.

" - }, - "PhoneNumber":{ - "shape":"RawPhoneNumber", - "documentation":"

The phone number of the contact. The phone number type defaults to WORK. You can specify PhoneNumber or PhoneNumbers. We recommend that you use PhoneNumbers, which lets you specify the phone number type and multiple numbers.

" - }, - "PhoneNumbers":{ - "shape":"PhoneNumberList", - "documentation":"

The list of phone numbers for the contact.

" - }, - "SipAddresses":{ - "shape":"SipAddressList", - "documentation":"

The list of SIP addresses for the contact.

" - } - }, - "documentation":"

Information related to a contact.

" - }, - "ContactDataList":{ - "type":"list", - "member":{"shape":"ContactData"} - }, - "ContactName":{ - "type":"string", - "max":100, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "Content":{ - "type":"structure", - "members":{ - "TextList":{ - "shape":"TextList", - "documentation":"

The list of text messages.

" - }, - "SsmlList":{ - "shape":"SsmlList", - "documentation":"

The list of SSML messages.

" - }, - "AudioList":{ - "shape":"AudioList", - "documentation":"

The list of audio messages.

" - } - }, - "documentation":"

The content definition. This can contain only one text, SSML, or audio list object.

" - }, - "CountryCode":{ - "type":"string", - "pattern":"\\d{1,3}" - }, - "CreateAddressBookRequest":{ - "type":"structure", - "required":["Name"], - "members":{ - "Name":{ - "shape":"AddressBookName", - "documentation":"

The name of the address book.

" - }, - "Description":{ - "shape":"AddressBookDescription", - "documentation":"

The description of the address book.

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

A unique, user-specified identifier for the request that ensures idempotency.

", - "idempotencyToken":true - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" - } - } - }, - "CreateAddressBookResponse":{ - "type":"structure", - "members":{ - "AddressBookArn":{ - "shape":"Arn", - "documentation":"

The ARN of the newly created address book.

" - } - } - }, - "CreateBusinessReportScheduleRequest":{ - "type":"structure", - "required":[ - "Format", - "ContentRange" - ], - "members":{ - "ScheduleName":{ - "shape":"BusinessReportScheduleName", - "documentation":"

The name identifier of the schedule.

" - }, - "S3BucketName":{ - "shape":"CustomerS3BucketName", - "documentation":"

The S3 bucket name of the output reports. If this isn't specified, the report can be retrieved from a download link by calling ListBusinessReportSchedule.

" - }, - "S3KeyPrefix":{ - "shape":"S3KeyPrefix", - "documentation":"

The S3 key where the report is delivered.

" - }, - "Format":{ - "shape":"BusinessReportFormat", - "documentation":"

The format of the generated report (individual CSV files or zipped files of individual files).

" - }, - "ContentRange":{ - "shape":"BusinessReportContentRange", - "documentation":"

The content range of the reports.

" - }, - "Recurrence":{ - "shape":"BusinessReportRecurrence", - "documentation":"

The recurrence of the reports. If this isn't specified, the report will only be delivered one time when the API is called.

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The client request token.

", - "idempotencyToken":true - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags for the business report schedule.

" - } - } - }, - "CreateBusinessReportScheduleResponse":{ - "type":"structure", - "members":{ - "ScheduleArn":{ - "shape":"Arn", - "documentation":"

The ARN of the business report schedule.

" - } - } - }, - "CreateConferenceProviderRequest":{ - "type":"structure", - "required":[ - "ConferenceProviderName", - "ConferenceProviderType", - "MeetingSetting" - ], - "members":{ - "ConferenceProviderName":{ - "shape":"ConferenceProviderName", - "documentation":"

The name of the conference provider.

" - }, - "ConferenceProviderType":{ - "shape":"ConferenceProviderType", - "documentation":"

Represents a type within a list of predefined types.

" - }, - "IPDialIn":{ - "shape":"IPDialIn", - "documentation":"

The IP endpoint and protocol for calling.

" - }, - "PSTNDialIn":{ - "shape":"PSTNDialIn", - "documentation":"

The information for PSTN conferencing.

" - }, - "MeetingSetting":{ - "shape":"MeetingSetting", - "documentation":"

The meeting settings for the conference provider.

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The request token of the client.

", - "idempotencyToken":true - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" - } - } - }, - "CreateConferenceProviderResponse":{ - "type":"structure", - "members":{ - "ConferenceProviderArn":{ - "shape":"Arn", - "documentation":"

The ARN of the newly-created conference provider.

" - } - } - }, - "CreateContactRequest":{ - "type":"structure", - "required":["FirstName"], - "members":{ - "DisplayName":{ - "shape":"ContactName", - "documentation":"

The name of the contact to display on the console.

" - }, - "FirstName":{ - "shape":"ContactName", - "documentation":"

The first name of the contact that is used to call the contact on the device.

" - }, - "LastName":{ - "shape":"ContactName", - "documentation":"

The last name of the contact that is used to call the contact on the device.

" - }, - "PhoneNumber":{ - "shape":"RawPhoneNumber", - "documentation":"

The phone number of the contact in E.164 format. The phone number type defaults to WORK. You can specify PhoneNumber or PhoneNumbers. We recommend that you use PhoneNumbers, which lets you specify the phone number type and multiple numbers.

" - }, - "PhoneNumbers":{ - "shape":"PhoneNumberList", - "documentation":"

The list of phone numbers for the contact.

" - }, - "SipAddresses":{ - "shape":"SipAddressList", - "documentation":"

The list of SIP addresses for the contact.

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

A unique, user-specified identifier for this request that ensures idempotency.

", - "idempotencyToken":true - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" - } - } - }, - "CreateContactResponse":{ - "type":"structure", - "members":{ - "ContactArn":{ - "shape":"Arn", - "documentation":"

The ARN of the newly created address book.

" - } - } - }, - "CreateEndOfMeetingReminder":{ - "type":"structure", - "required":[ - "ReminderAtMinutes", - "ReminderType", - "Enabled" - ], - "members":{ - "ReminderAtMinutes":{ - "shape":"EndOfMeetingReminderMinutesList", - "documentation":"

A range of 3 to 15 minutes that determines when the reminder begins.

" - }, - "ReminderType":{ - "shape":"EndOfMeetingReminderType", - "documentation":"

The type of sound that users hear during the end of meeting reminder.

" - }, - "Enabled":{ - "shape":"Boolean", - "documentation":"

Whether an end of meeting reminder is enabled or not.

" - } - }, - "documentation":"

Creates settings for the end of meeting reminder feature that are applied to a room profile. The end of meeting reminder enables Alexa to remind users when a meeting is ending.

" - }, - "CreateGatewayGroupRequest":{ - "type":"structure", - "required":[ - "Name", - "ClientRequestToken" - ], - "members":{ - "Name":{ - "shape":"GatewayGroupName", - "documentation":"

The name of the gateway group.

" - }, - "Description":{ - "shape":"GatewayGroupDescription", - "documentation":"

The description of the gateway group.

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

A unique, user-specified identifier for the request that ensures idempotency.

", - "idempotencyToken":true - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" - } - } - }, - "CreateGatewayGroupResponse":{ - "type":"structure", - "members":{ - "GatewayGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the created gateway group.

" - } - } - }, - "CreateInstantBooking":{ - "type":"structure", - "required":[ - "DurationInMinutes", - "Enabled" - ], - "members":{ - "DurationInMinutes":{ - "shape":"Minutes", - "documentation":"

Duration between 15 and 240 minutes at increments of 15 that determines how long to book an available room when a meeting is started with Alexa.

" - }, - "Enabled":{ - "shape":"Boolean", - "documentation":"

Whether instant booking is enabled or not.

" - } - }, - "documentation":"

Creates settings for the instant booking feature that are applied to a room profile. When users start their meeting with Alexa, Alexa automatically books the room for the configured duration if the room is available.

" - }, - "CreateMeetingRoomConfiguration":{ - "type":"structure", - "members":{ - "RoomUtilizationMetricsEnabled":{ - "shape":"Boolean", - "documentation":"

Whether room utilization metrics are enabled or not.

" - }, - "EndOfMeetingReminder":{"shape":"CreateEndOfMeetingReminder"}, - "InstantBooking":{ - "shape":"CreateInstantBooking", - "documentation":"

Settings to automatically book a room for a configured duration if it's free when joining a meeting with Alexa.

" - }, - "RequireCheckIn":{ - "shape":"CreateRequireCheckIn", - "documentation":"

Settings for requiring a check in when a room is reserved. Alexa can cancel a room reservation if it's not checked into to make the room available for others. Users can check in by joining the meeting with Alexa or an AVS device, or by saying “Alexa, check in.”

" - }, - "ProactiveJoin":{"shape":"CreateProactiveJoin"} - }, - "documentation":"

Creates meeting room settings of a room profile.

" - }, - "CreateNetworkProfileRequest":{ - "type":"structure", - "required":[ - "NetworkProfileName", - "Ssid", - "SecurityType", - "ClientRequestToken" - ], - "members":{ - "NetworkProfileName":{ - "shape":"NetworkProfileName", - "documentation":"

The name of the network profile associated with a device.

" - }, - "Description":{ - "shape":"NetworkProfileDescription", - "documentation":"

Detailed information about a device's network profile.

" - }, - "Ssid":{ - "shape":"NetworkSsid", - "documentation":"

The SSID of the Wi-Fi network.

" - }, - "SecurityType":{ - "shape":"NetworkSecurityType", - "documentation":"

The security type of the Wi-Fi network. This can be WPA2_ENTERPRISE, WPA2_PSK, WPA_PSK, WEP, or OPEN.

" - }, - "EapMethod":{ - "shape":"NetworkEapMethod", - "documentation":"

The authentication standard that is used in the EAP framework. Currently, EAP_TLS is supported.

" - }, - "CurrentPassword":{ - "shape":"CurrentWiFiPassword", - "documentation":"

The current password of the Wi-Fi network.

" - }, - "NextPassword":{ - "shape":"NextWiFiPassword", - "documentation":"

The next, or subsequent, password of the Wi-Fi network. This password is asynchronously transmitted to the device and is used when the password of the network changes to NextPassword.

" - }, - "CertificateAuthorityArn":{ - "shape":"Arn", - "documentation":"

The ARN of the Private Certificate Authority (PCA) created in AWS Certificate Manager (ACM). This is used to issue certificates to the devices.

" - }, - "TrustAnchors":{ - "shape":"TrustAnchorList", - "documentation":"

The root certificates of your authentication server that is installed on your devices and used to trust your authentication server during EAP negotiation.

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "idempotencyToken":true - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" - } - } - }, - "CreateNetworkProfileResponse":{ - "type":"structure", - "members":{ - "NetworkProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the network profile associated with a device.

" - } - } - }, - "CreateProactiveJoin":{ - "type":"structure", - "required":["EnabledByMotion"], - "members":{ - "EnabledByMotion":{"shape":"Boolean"} - } - }, - "CreateProfileRequest":{ - "type":"structure", - "required":[ - "ProfileName", - "Timezone", - "Address", - "DistanceUnit", - "TemperatureUnit", - "WakeWord" - ], - "members":{ - "ProfileName":{ - "shape":"ProfileName", - "documentation":"

The name of a room profile.

" - }, - "Timezone":{ - "shape":"Timezone", - "documentation":"

The time zone used by a room profile.

" - }, - "Address":{ - "shape":"Address", - "documentation":"

The valid address for the room.

" - }, - "DistanceUnit":{ - "shape":"DistanceUnit", - "documentation":"

The distance unit to be used by devices in the profile.

" - }, - "TemperatureUnit":{ - "shape":"TemperatureUnit", - "documentation":"

The temperature unit to be used by devices in the profile.

" - }, - "WakeWord":{ - "shape":"WakeWord", - "documentation":"

A wake word for Alexa, Echo, Amazon, or a computer.

" - }, - "Locale":{ - "shape":"DeviceLocale", - "documentation":"

The locale of the room profile. (This is currently only available to a limited preview audience.)

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The user-specified token that is used during the creation of a profile.

", - "idempotencyToken":true - }, - "SetupModeDisabled":{ - "shape":"Boolean", - "documentation":"

Whether room profile setup is enabled.

" - }, - "MaxVolumeLimit":{ - "shape":"MaxVolumeLimit", - "documentation":"

The maximum volume limit for a room profile.

" - }, - "PSTNEnabled":{ - "shape":"Boolean", - "documentation":"

Whether PSTN calling is enabled.

" - }, - "DataRetentionOptIn":{ - "shape":"Boolean", - "documentation":"

Whether data retention of the profile is enabled.

" - }, - "MeetingRoomConfiguration":{ - "shape":"CreateMeetingRoomConfiguration", - "documentation":"

The meeting room settings of a room profile.

" - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags for the profile.

" - } - } - }, - "CreateProfileResponse":{ - "type":"structure", - "members":{ - "ProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the newly created room profile in the response.

" - } - } - }, - "CreateRequireCheckIn":{ - "type":"structure", - "required":[ - "ReleaseAfterMinutes", - "Enabled" - ], - "members":{ - "ReleaseAfterMinutes":{ - "shape":"Minutes", - "documentation":"

Duration between 5 and 20 minutes to determine when to release the room if it's not checked into.

" - }, - "Enabled":{ - "shape":"Boolean", - "documentation":"

Whether require check in is enabled or not.

" - } - }, - "documentation":"

Creates settings for the require check in feature that are applied to a room profile. Require check in allows a meeting room’s Alexa or AVS device to prompt the user to check in; otherwise, the room will be released.

" - }, - "CreateRoomRequest":{ - "type":"structure", - "required":["RoomName"], - "members":{ - "RoomName":{ - "shape":"RoomName", - "documentation":"

The name for the room.

" - }, - "Description":{ - "shape":"RoomDescription", - "documentation":"

The description for the room.

" - }, - "ProfileArn":{ - "shape":"Arn", - "documentation":"

The profile ARN for the room. This is required.

" - }, - "ProviderCalendarId":{ - "shape":"ProviderCalendarId", - "documentation":"

The calendar ARN for the room.

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

A unique, user-specified identifier for this request that ensures idempotency.

", - "idempotencyToken":true - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags for the room.

" - } - } - }, - "CreateRoomResponse":{ - "type":"structure", - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the newly created room in the response.

" - } - } - }, - "CreateSkillGroupRequest":{ - "type":"structure", - "required":["SkillGroupName"], - "members":{ - "SkillGroupName":{ - "shape":"SkillGroupName", - "documentation":"

The name for the skill group.

" - }, - "Description":{ - "shape":"SkillGroupDescription", - "documentation":"

The description for the skill group.

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

A unique, user-specified identifier for this request that ensures idempotency.

", - "idempotencyToken":true - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags for the skill group.

" - } - } - }, - "CreateSkillGroupResponse":{ - "type":"structure", - "members":{ - "SkillGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the newly created skill group in the response.

" - } - } - }, - "CreateUserRequest":{ - "type":"structure", - "required":["UserId"], - "members":{ - "UserId":{ - "shape":"user_UserId", - "documentation":"

The ARN for the user.

" - }, - "FirstName":{ - "shape":"user_FirstName", - "documentation":"

The first name for the user.

" - }, - "LastName":{ - "shape":"user_LastName", - "documentation":"

The last name for the user.

" - }, - "Email":{ - "shape":"Email", - "documentation":"

The email address for the user.

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

A unique, user-specified identifier for this request that ensures idempotency.

", - "idempotencyToken":true - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags for the user.

" - } - } - }, - "CreateUserResponse":{ - "type":"structure", - "members":{ - "UserArn":{ - "shape":"Arn", - "documentation":"

The ARN of the newly created user in the response.

" - } - } - }, - "CurrentWiFiPassword":{ - "type":"string", - "max":128, - "min":5, - "pattern":"[\\x00-\\x7F]*", - "sensitive":true - }, - "CustomerS3BucketName":{ - "type":"string", - "pattern":"[a-z0-9-\\.]{3,63}" - }, - "Date":{ - "type":"string", - "pattern":"^\\d{4}\\-(0?[1-9]|1[012])\\-(0?[1-9]|[12][0-9]|3[01])$" - }, - "DeleteAddressBookRequest":{ - "type":"structure", - "required":["AddressBookArn"], - "members":{ - "AddressBookArn":{ - "shape":"Arn", - "documentation":"

The ARN of the address book to delete.

" - } - } - }, - "DeleteAddressBookResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteBusinessReportScheduleRequest":{ - "type":"structure", - "required":["ScheduleArn"], - "members":{ - "ScheduleArn":{ - "shape":"Arn", - "documentation":"

The ARN of the business report schedule.

" - } - } - }, - "DeleteBusinessReportScheduleResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteConferenceProviderRequest":{ - "type":"structure", - "required":["ConferenceProviderArn"], - "members":{ - "ConferenceProviderArn":{ - "shape":"Arn", - "documentation":"

The ARN of the conference provider.

" - } - } - }, - "DeleteConferenceProviderResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteContactRequest":{ - "type":"structure", - "required":["ContactArn"], - "members":{ - "ContactArn":{ - "shape":"Arn", - "documentation":"

The ARN of the contact to delete.

" - } - } - }, - "DeleteContactResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteDeviceRequest":{ - "type":"structure", - "required":["DeviceArn"], - "members":{ - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The ARN of the device for which to request details.

" - } - } - }, - "DeleteDeviceResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteDeviceUsageDataRequest":{ - "type":"structure", - "required":[ - "DeviceArn", - "DeviceUsageType" - ], - "members":{ - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The ARN of the device.

" - }, - "DeviceUsageType":{ - "shape":"DeviceUsageType", - "documentation":"

The type of usage data to delete.

" - } - } - }, - "DeleteDeviceUsageDataResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteGatewayGroupRequest":{ - "type":"structure", - "required":["GatewayGroupArn"], - "members":{ - "GatewayGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the gateway group to delete.

" - } - } - }, - "DeleteGatewayGroupResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteNetworkProfileRequest":{ - "type":"structure", - "required":["NetworkProfileArn"], - "members":{ - "NetworkProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the network profile associated with a device.

" - } - } - }, - "DeleteNetworkProfileResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteProfileRequest":{ - "type":"structure", - "members":{ - "ProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room profile to delete. Required.

" - } - } - }, - "DeleteProfileResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteRoomRequest":{ - "type":"structure", - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room to delete. Required.

" - } - } - }, - "DeleteRoomResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteRoomSkillParameterRequest":{ - "type":"structure", - "required":[ - "SkillId", - "ParameterKey" - ], - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room from which to remove the room skill parameter details.

" - }, - "SkillId":{ - "shape":"SkillId", - "documentation":"

The ID of the skill from which to remove the room skill parameter details.

" - }, - "ParameterKey":{ - "shape":"RoomSkillParameterKey", - "documentation":"

The room skill parameter key for which to remove details.

" - } - } - }, - "DeleteRoomSkillParameterResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteSkillAuthorizationRequest":{ - "type":"structure", - "required":["SkillId"], - "members":{ - "SkillId":{ - "shape":"SkillId", - "documentation":"

The unique identifier of a skill.

" - }, - "RoomArn":{ - "shape":"Arn", - "documentation":"

The room that the skill is authorized for.

" - } - } - }, - "DeleteSkillAuthorizationResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteSkillGroupRequest":{ - "type":"structure", - "members":{ - "SkillGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the skill group to delete. Required.

" - } - } - }, - "DeleteSkillGroupResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteUserRequest":{ - "type":"structure", - "required":["EnrollmentId"], - "members":{ - "UserArn":{ - "shape":"Arn", - "documentation":"

The ARN of the user to delete in the organization. Required.

" - }, - "EnrollmentId":{ - "shape":"EnrollmentId", - "documentation":"

The ARN of the user's enrollment in the organization. Required.

" - } - } - }, - "DeleteUserResponse":{ - "type":"structure", - "members":{ - } - }, - "DeveloperInfo":{ - "type":"structure", - "members":{ - "DeveloperName":{ - "shape":"DeveloperName", - "documentation":"

The name of the developer.

" - }, - "PrivacyPolicy":{ - "shape":"PrivacyPolicy", - "documentation":"

The URL of the privacy policy.

" - }, - "Email":{ - "shape":"Email", - "documentation":"

The email of the developer.

" - }, - "Url":{ - "shape":"Url", - "documentation":"

The website of the developer.

" - } - }, - "documentation":"

The details about the developer that published the skill.

" - }, - "DeveloperName":{"type":"string"}, - "Device":{ - "type":"structure", - "members":{ - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The ARN of a device.

" - }, - "DeviceSerialNumber":{ - "shape":"DeviceSerialNumber", - "documentation":"

The serial number of a device.

" - }, - "DeviceType":{ - "shape":"DeviceType", - "documentation":"

The type of a device.

" - }, - "DeviceName":{ - "shape":"DeviceName", - "documentation":"

The name of a device.

" - }, - "SoftwareVersion":{ - "shape":"SoftwareVersion", - "documentation":"

The software version of a device.

" - }, - "MacAddress":{ - "shape":"MacAddress", - "documentation":"

The MAC address of a device.

" - }, - "RoomArn":{ - "shape":"Arn", - "documentation":"

The room ARN of a device.

" - }, - "DeviceStatus":{ - "shape":"DeviceStatus", - "documentation":"

The status of a device. If the status is not READY, check the DeviceStatusInfo value for details.

" - }, - "DeviceStatusInfo":{ - "shape":"DeviceStatusInfo", - "documentation":"

Detailed information about a device's status.

" - }, - "NetworkProfileInfo":{ - "shape":"DeviceNetworkProfileInfo", - "documentation":"

Detailed information about a device's network profile.

" - } - }, - "documentation":"

A device with attributes.

" - }, - "DeviceData":{ - "type":"structure", - "members":{ - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The ARN of a device.

" - }, - "DeviceSerialNumber":{ - "shape":"DeviceSerialNumber", - "documentation":"

The serial number of a device.

" - }, - "DeviceType":{ - "shape":"DeviceType", - "documentation":"

The type of a device.

" - }, - "DeviceName":{ - "shape":"DeviceName", - "documentation":"

The name of a device.

" - }, - "SoftwareVersion":{ - "shape":"SoftwareVersion", - "documentation":"

The software version of a device.

" - }, - "MacAddress":{ - "shape":"MacAddress", - "documentation":"

The MAC address of a device.

" - }, - "DeviceStatus":{ - "shape":"DeviceStatus", - "documentation":"

The status of a device.

" - }, - "NetworkProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the network profile associated with a device.

" - }, - "NetworkProfileName":{ - "shape":"NetworkProfileName", - "documentation":"

The name of the network profile associated with a device.

" - }, - "RoomArn":{ - "shape":"Arn", - "documentation":"

The room ARN associated with a device.

" - }, - "RoomName":{ - "shape":"DeviceRoomName", - "documentation":"

The name of the room associated with a device.

" - }, - "DeviceStatusInfo":{ - "shape":"DeviceStatusInfo", - "documentation":"

Detailed information about a device's status.

" - }, - "CreatedTime":{ - "shape":"DeviceDataCreatedTime", - "documentation":"

The time (in epoch) when the device data was created.

" - } - }, - "documentation":"

Device attributes.

" - }, - "DeviceDataCreatedTime":{"type":"timestamp"}, - "DeviceDataList":{ - "type":"list", - "member":{"shape":"DeviceData"} - }, - "DeviceEvent":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"DeviceEventType", - "documentation":"

The type of device event.

" - }, - "Value":{ - "shape":"DeviceEventValue", - "documentation":"

The value of the event.

" - }, - "Timestamp":{ - "shape":"DeviceEventTime", - "documentation":"

The time (in epoch) when the event occurred.

" - } - }, - "documentation":"

The list of device events.

" - }, - "DeviceEventList":{ - "type":"list", - "member":{"shape":"DeviceEvent"} - }, - "DeviceEventTime":{"type":"timestamp"}, - "DeviceEventType":{ - "type":"string", - "enum":[ - "CONNECTION_STATUS", - "DEVICE_STATUS" - ] - }, - "DeviceEventValue":{"type":"string"}, - "DeviceLocale":{ - "type":"string", - "max":256, - "min":1 - }, - "DeviceName":{ - "type":"string", - "max":100, - "min":2, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "DeviceNetworkProfileInfo":{ - "type":"structure", - "members":{ - "NetworkProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the network profile associated with a device.

" - }, - "CertificateArn":{ - "shape":"Arn", - "documentation":"

The ARN of the certificate associated with a device.

" - }, - "CertificateExpirationTime":{ - "shape":"CertificateTime", - "documentation":"

The time (in epoch) when the certificate expires.

" - } - }, - "documentation":"

Detailed information about a device's network profile.

" - }, - "DeviceNotRegisteredException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

The request failed because this device is no longer registered and therefore no longer managed by this account.

", - "exception":true - }, - "DeviceRoomName":{ - "type":"string", - "max":100, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "DeviceSerialNumber":{ - "type":"string", - "pattern":"[a-zA-Z0-9]{1,200}" - }, - "DeviceSerialNumberForAVS":{ - "type":"string", - "pattern":"^[a-zA-Z0-9]{1,50}$" - }, - "DeviceStatus":{ - "type":"string", - "enum":[ - "READY", - "PENDING", - "WAS_OFFLINE", - "DEREGISTERED", - "FAILED" - ] - }, - "DeviceStatusDetail":{ - "type":"structure", - "members":{ - "Feature":{ - "shape":"Feature", - "documentation":"

The list of available features on the device.

" - }, - "Code":{ - "shape":"DeviceStatusDetailCode", - "documentation":"

The device status detail code.

" - } - }, - "documentation":"

Details of a device’s status.

" - }, - "DeviceStatusDetailCode":{ - "type":"string", - "enum":[ - "DEVICE_SOFTWARE_UPDATE_NEEDED", - "DEVICE_WAS_OFFLINE", - "CREDENTIALS_ACCESS_FAILURE", - "TLS_VERSION_MISMATCH", - "ASSOCIATION_REJECTION", - "AUTHENTICATION_FAILURE", - "DHCP_FAILURE", - "INTERNET_UNAVAILABLE", - "DNS_FAILURE", - "UNKNOWN_FAILURE", - "CERTIFICATE_ISSUING_LIMIT_EXCEEDED", - "INVALID_CERTIFICATE_AUTHORITY", - "NETWORK_PROFILE_NOT_FOUND", - "INVALID_PASSWORD_STATE", - "PASSWORD_NOT_FOUND", - "PASSWORD_MANAGER_ACCESS_DENIED", - "CERTIFICATE_AUTHORITY_ACCESS_DENIED" - ] - }, - "DeviceStatusDetails":{ - "type":"list", - "member":{"shape":"DeviceStatusDetail"} - }, - "DeviceStatusInfo":{ - "type":"structure", - "members":{ - "DeviceStatusDetails":{ - "shape":"DeviceStatusDetails", - "documentation":"

One or more device status detail descriptions.

" - }, - "ConnectionStatus":{ - "shape":"ConnectionStatus", - "documentation":"

The latest available information about the connection status of a device.

" - }, - "ConnectionStatusUpdatedTime":{ - "shape":"ConnectionStatusUpdatedTime", - "documentation":"

The time (in epoch) when the device connection status changed.

" - } - }, - "documentation":"

Detailed information about a device's status.

" - }, - "DeviceType":{ - "type":"string", - "pattern":"[a-zA-Z0-9]{1,200}" - }, - "DeviceUsageType":{ - "type":"string", - "enum":["VOICE"] - }, - "DisassociateContactFromAddressBookRequest":{ - "type":"structure", - "required":[ - "ContactArn", - "AddressBookArn" - ], - "members":{ - "ContactArn":{ - "shape":"Arn", - "documentation":"

The ARN of the contact to disassociate from an address book.

" - }, - "AddressBookArn":{ - "shape":"Arn", - "documentation":"

The ARN of the address from which to disassociate the contact.

" - } - } - }, - "DisassociateContactFromAddressBookResponse":{ - "type":"structure", - "members":{ - } - }, - "DisassociateDeviceFromRoomRequest":{ - "type":"structure", - "members":{ - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The ARN of the device to disassociate from a room. Required.

" - } - } - }, - "DisassociateDeviceFromRoomResponse":{ - "type":"structure", - "members":{ - } - }, - "DisassociateSkillFromSkillGroupRequest":{ - "type":"structure", - "required":["SkillId"], - "members":{ - "SkillGroupArn":{ - "shape":"Arn", - "documentation":"

The unique identifier of a skill. Required.

" - }, - "SkillId":{ - "shape":"SkillId", - "documentation":"

The ARN of a skill group to associate to a skill.

" - } - } - }, - "DisassociateSkillFromSkillGroupResponse":{ - "type":"structure", - "members":{ - } - }, - "DisassociateSkillFromUsersRequest":{ - "type":"structure", - "required":["SkillId"], - "members":{ - "SkillId":{ - "shape":"SkillId", - "documentation":"

The private skill ID you want to make unavailable for enrolled users.

" - } - } - }, - "DisassociateSkillFromUsersResponse":{ - "type":"structure", - "members":{ - } - }, - "DisassociateSkillGroupFromRoomRequest":{ - "type":"structure", - "members":{ - "SkillGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the skill group to disassociate from a room. Required.

" - }, - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room from which the skill group is to be disassociated. Required.

" - } - } - }, - "DisassociateSkillGroupFromRoomResponse":{ - "type":"structure", - "members":{ - } - }, - "DistanceUnit":{ - "type":"string", - "enum":[ - "METRIC", - "IMPERIAL" - ] - }, - "Email":{ - "type":"string", - "max":128, - "min":1, - "pattern":"\\w[+-.\\w]*@\\w[\\w\\.\\-]+\\.[0-9a-zA-Z]{2,24}" - }, - "EnablementType":{ - "type":"string", - "enum":[ - "ENABLED", - "PENDING" - ] - }, - "EnablementTypeFilter":{ - "type":"string", - "enum":[ - "ENABLED", - "PENDING" - ] - }, - "EndOfMeetingReminder":{ - "type":"structure", - "members":{ - "ReminderAtMinutes":{ - "shape":"EndOfMeetingReminderMinutesList", - "documentation":"

A range of 3 to 15 minutes that determines when the reminder begins.

" - }, - "ReminderType":{ - "shape":"EndOfMeetingReminderType", - "documentation":"

The type of sound that users hear during the end of meeting reminder.

" - }, - "Enabled":{ - "shape":"Boolean", - "documentation":"

Whether an end of meeting reminder is enabled or not.

" - } - }, - "documentation":"

Settings for the end of meeting reminder feature that are applied to a room profile. The end of meeting reminder enables Alexa to remind users when a meeting is ending.

" - }, - "EndOfMeetingReminderMinutesList":{ - "type":"list", - "member":{"shape":"Minutes"}, - "max":1, - "min":1 - }, - "EndOfMeetingReminderType":{ - "type":"string", - "enum":[ - "ANNOUNCEMENT_TIME_CHECK", - "ANNOUNCEMENT_VARIABLE_TIME_LEFT", - "CHIME", - "KNOCK" - ] - }, - "EndUserLicenseAgreement":{"type":"string"}, - "Endpoint":{ - "type":"string", - "max":256, - "min":1 - }, - "EnrollmentId":{ - "type":"string", - "max":128, - "min":0 - }, - "EnrollmentStatus":{ - "type":"string", - "enum":[ - "INITIALIZED", - "PENDING", - "REGISTERED", - "DISASSOCIATING", - "DEREGISTERING" - ] - }, - "ErrorMessage":{"type":"string"}, - "Feature":{ - "type":"string", - "enum":[ - "BLUETOOTH", - "VOLUME", - "NOTIFICATIONS", - "LISTS", - "SKILLS", - "NETWORK_PROFILE", - "SETTINGS", - "ALL" - ] - }, - "Features":{ - "type":"list", - "member":{"shape":"Feature"} - }, - "Filter":{ - "type":"structure", - "required":[ - "Key", - "Values" - ], - "members":{ - "Key":{ - "shape":"FilterKey", - "documentation":"

The key of a filter.

" - }, - "Values":{ - "shape":"FilterValueList", - "documentation":"

The values of a filter.

" - } - }, - "documentation":"

A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria.

" - }, - "FilterKey":{ - "type":"string", - "max":500, - "min":1 - }, - "FilterList":{ - "type":"list", - "member":{"shape":"Filter"}, - "max":25 - }, - "FilterValue":{ - "type":"string", - "max":500, - "min":1 - }, - "FilterValueList":{ - "type":"list", - "member":{"shape":"FilterValue"}, - "max":50 - }, - "ForgetSmartHomeAppliancesRequest":{ - "type":"structure", - "required":["RoomArn"], - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The room that the appliances are associated with.

" - } - } - }, - "ForgetSmartHomeAppliancesResponse":{ - "type":"structure", - "members":{ - } - }, - "Gateway":{ - "type":"structure", - "members":{ - "Arn":{ - "shape":"Arn", - "documentation":"

The ARN of the gateway.

" - }, - "Name":{ - "shape":"GatewayName", - "documentation":"

The name of the gateway.

" - }, - "Description":{ - "shape":"GatewayDescription", - "documentation":"

The description of the gateway.

" - }, - "GatewayGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the gateway group that the gateway is associated to.

" - }, - "SoftwareVersion":{ - "shape":"GatewayVersion", - "documentation":"

The software version of the gateway. The gateway automatically updates its software version during normal operation.

" - } - }, - "documentation":"

The details of the gateway.

" - }, - "GatewayDescription":{ - "type":"string", - "max":200, - "min":0, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "GatewayGroup":{ - "type":"structure", - "members":{ - "Arn":{ - "shape":"Arn", - "documentation":"

The ARN of the gateway group.

" - }, - "Name":{ - "shape":"GatewayGroupName", - "documentation":"

The name of the gateway group.

" - }, - "Description":{ - "shape":"GatewayGroupDescription", - "documentation":"

The description of the gateway group.

" - } - }, - "documentation":"

The details of the gateway group.

" - }, - "GatewayGroupDescription":{ - "type":"string", - "max":200, - "min":0 - }, - "GatewayGroupName":{ - "type":"string", - "max":100, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "GatewayGroupSummaries":{ - "type":"list", - "member":{"shape":"GatewayGroupSummary"} - }, - "GatewayGroupSummary":{ - "type":"structure", - "members":{ - "Arn":{ - "shape":"Arn", - "documentation":"

The ARN of the gateway group.

" - }, - "Name":{ - "shape":"GatewayGroupName", - "documentation":"

The name of the gateway group.

" - }, - "Description":{ - "shape":"GatewayGroupDescription", - "documentation":"

The description of the gateway group.

" - } - }, - "documentation":"

The summary of a gateway group.

" - }, - "GatewayName":{ - "type":"string", - "max":253, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "GatewaySummaries":{ - "type":"list", - "member":{"shape":"GatewaySummary"} - }, - "GatewaySummary":{ - "type":"structure", - "members":{ - "Arn":{ - "shape":"Arn", - "documentation":"

The ARN of the gateway.

" - }, - "Name":{ - "shape":"GatewayName", - "documentation":"

The name of the gateway.

" - }, - "Description":{ - "shape":"GatewayDescription", - "documentation":"

The description of the gateway.

" - }, - "GatewayGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the gateway group that the gateway is associated to.

" - }, - "SoftwareVersion":{ - "shape":"GatewayVersion", - "documentation":"

The software version of the gateway. The gateway automatically updates its software version during normal operation.

" - } - }, - "documentation":"

The summary of a gateway.

" - }, - "GatewayVersion":{ - "type":"string", - "max":50, - "min":1 - }, - "GenericKeyword":{"type":"string"}, - "GenericKeywords":{ - "type":"list", - "member":{"shape":"GenericKeyword"} - }, - "GetAddressBookRequest":{ - "type":"structure", - "required":["AddressBookArn"], - "members":{ - "AddressBookArn":{ - "shape":"Arn", - "documentation":"

The ARN of the address book for which to request details.

" - } - } - }, - "GetAddressBookResponse":{ - "type":"structure", - "members":{ - "AddressBook":{ - "shape":"AddressBook", - "documentation":"

The details of the requested address book.

" - } - } - }, - "GetConferencePreferenceRequest":{ - "type":"structure", - "members":{ - } - }, - "GetConferencePreferenceResponse":{ - "type":"structure", - "members":{ - "Preference":{ - "shape":"ConferencePreference", - "documentation":"

The conference preference.

" - } - } - }, - "GetConferenceProviderRequest":{ - "type":"structure", - "required":["ConferenceProviderArn"], - "members":{ - "ConferenceProviderArn":{ - "shape":"Arn", - "documentation":"

The ARN of the newly created conference provider.

" - } - } - }, - "GetConferenceProviderResponse":{ - "type":"structure", - "members":{ - "ConferenceProvider":{ - "shape":"ConferenceProvider", - "documentation":"

The conference provider.

" - } - } - }, - "GetContactRequest":{ - "type":"structure", - "required":["ContactArn"], - "members":{ - "ContactArn":{ - "shape":"Arn", - "documentation":"

The ARN of the contact for which to request details.

" - } - } - }, - "GetContactResponse":{ - "type":"structure", - "members":{ - "Contact":{ - "shape":"Contact", - "documentation":"

The details of the requested contact.

" - } - } - }, - "GetDeviceRequest":{ - "type":"structure", - "members":{ - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The ARN of the device for which to request details. Required.

" - } - } - }, - "GetDeviceResponse":{ - "type":"structure", - "members":{ - "Device":{ - "shape":"Device", - "documentation":"

The details of the device requested. Required.

" - } - } - }, - "GetGatewayGroupRequest":{ - "type":"structure", - "required":["GatewayGroupArn"], - "members":{ - "GatewayGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the gateway group to get.

" - } - } - }, - "GetGatewayGroupResponse":{ - "type":"structure", - "members":{ - "GatewayGroup":{"shape":"GatewayGroup"} - } - }, - "GetGatewayRequest":{ - "type":"structure", - "required":["GatewayArn"], - "members":{ - "GatewayArn":{ - "shape":"Arn", - "documentation":"

The ARN of the gateway to get.

" - } - } - }, - "GetGatewayResponse":{ - "type":"structure", - "members":{ - "Gateway":{ - "shape":"Gateway", - "documentation":"

The details of the gateway.

" - } - } - }, - "GetInvitationConfigurationRequest":{ - "type":"structure", - "members":{ - } - }, - "GetInvitationConfigurationResponse":{ - "type":"structure", - "members":{ - "OrganizationName":{ - "shape":"OrganizationName", - "documentation":"

The name of the organization sending the enrollment invite to a user.

" - }, - "ContactEmail":{ - "shape":"Email", - "documentation":"

The email ID of the organization or individual contact that the enrolled user can use.

" - }, - "PrivateSkillIds":{ - "shape":"ShortSkillIdList", - "documentation":"

The list of private skill IDs that you want to recommend to the user to enable in the invitation.

" - } - } - }, - "GetNetworkProfileRequest":{ - "type":"structure", - "required":["NetworkProfileArn"], - "members":{ - "NetworkProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the network profile associated with a device.

" - } - } - }, - "GetNetworkProfileResponse":{ - "type":"structure", - "members":{ - "NetworkProfile":{ - "shape":"NetworkProfile", - "documentation":"

The network profile associated with a device.

" - } - } - }, - "GetProfileRequest":{ - "type":"structure", - "members":{ - "ProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room profile for which to request details. Required.

" - } - } - }, - "GetProfileResponse":{ - "type":"structure", - "members":{ - "Profile":{ - "shape":"Profile", - "documentation":"

The details of the room profile requested. Required.

" - } - } - }, - "GetRoomRequest":{ - "type":"structure", - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room for which to request details. Required.

" - } - } - }, - "GetRoomResponse":{ - "type":"structure", - "members":{ - "Room":{ - "shape":"Room", - "documentation":"

The details of the room requested.

" - } - } - }, - "GetRoomSkillParameterRequest":{ - "type":"structure", - "required":[ - "SkillId", - "ParameterKey" - ], - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room from which to get the room skill parameter details.

" - }, - "SkillId":{ - "shape":"SkillId", - "documentation":"

The ARN of the skill from which to get the room skill parameter details. Required.

" - }, - "ParameterKey":{ - "shape":"RoomSkillParameterKey", - "documentation":"

The room skill parameter key for which to get details. Required.

" - } - } - }, - "GetRoomSkillParameterResponse":{ - "type":"structure", - "members":{ - "RoomSkillParameter":{ - "shape":"RoomSkillParameter", - "documentation":"

The details of the room skill parameter requested. Required.

" - } - } - }, - "GetSkillGroupRequest":{ - "type":"structure", - "members":{ - "SkillGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the skill group for which to get details. Required.

" - } - } - }, - "GetSkillGroupResponse":{ - "type":"structure", - "members":{ - "SkillGroup":{ - "shape":"SkillGroup", - "documentation":"

The details of the skill group requested. Required.

" - } - } - }, - "IPDialIn":{ - "type":"structure", - "required":[ - "Endpoint", - "CommsProtocol" - ], - "members":{ - "Endpoint":{ - "shape":"Endpoint", - "documentation":"

The IP address.

" - }, - "CommsProtocol":{ - "shape":"CommsProtocol", - "documentation":"

The protocol, including SIP, SIPS, and H323.

" - } - }, - "documentation":"

The IP endpoint and protocol for calling.

" - }, - "IconUrl":{"type":"string"}, - "InstantBooking":{ - "type":"structure", - "members":{ - "DurationInMinutes":{ - "shape":"Minutes", - "documentation":"

Duration between 15 and 240 minutes at increments of 15 that determines how long to book an available room when a meeting is started with Alexa.

" - }, - "Enabled":{ - "shape":"Boolean", - "documentation":"

Whether instant booking is enabled or not.

" - } - }, - "documentation":"

Settings for the instant booking feature that are applied to a room profile. When users start their meeting with Alexa, Alexa automatically books the room for the configured duration if the room is available.

" - }, - "InvalidCertificateAuthorityException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

The Certificate Authority can't issue or revoke a certificate.

", - "exception":true - }, - "InvalidDeviceException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

The device is in an invalid state.

", - "exception":true - }, - "InvalidSecretsManagerResourceException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

A password in SecretsManager is in an invalid state.

", - "exception":true - }, - "InvalidServiceLinkedRoleStateException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

The service linked role is locked for deletion.

", - "exception":true - }, - "InvalidUserStatusException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

The attempt to update a user is invalid due to the user's current status.

", - "exception":true - }, - "InvocationPhrase":{"type":"string"}, - "Key":{ - "type":"string", - "min":1 - }, - "LimitExceededException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

You are performing an action that would put you beyond your account's limits.

", - "exception":true - }, - "ListBusinessReportSchedulesRequest":{ - "type":"structure", - "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token used to list the remaining schedules from the previous API call.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of schedules listed in the call.

" - } - } - }, - "ListBusinessReportSchedulesResponse":{ - "type":"structure", - "members":{ - "BusinessReportSchedules":{ - "shape":"BusinessReportScheduleList", - "documentation":"

The schedule of the reports.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token used to list the remaining schedules from the previous API call.

" - } - } - }, - "ListConferenceProvidersRequest":{ - "type":"structure", - "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

The tokens used for pagination.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of conference providers to be returned, per paginated calls.

" - } - } - }, - "ListConferenceProvidersResponse":{ - "type":"structure", - "members":{ - "ConferenceProviders":{ - "shape":"ConferenceProvidersList", - "documentation":"

The conference providers.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The tokens used for pagination.

" - } - } - }, - "ListDeviceEventsRequest":{ - "type":"structure", - "required":["DeviceArn"], - "members":{ - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The ARN of a device.

" - }, - "EventType":{ - "shape":"DeviceEventType", - "documentation":"

The event type to filter device events. If EventType isn't specified, this returns a list of all device events in reverse chronological order. If EventType is specified, this returns a list of device events for that EventType in reverse chronological order.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response only includes results beyond the token, up to the value specified by MaxResults. When the end of results is reached, the response has a value of null.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of results to include in the response. The default value is 50. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" - } - } - }, - "ListDeviceEventsResponse":{ - "type":"structure", - "members":{ - "DeviceEvents":{ - "shape":"DeviceEventList", - "documentation":"

The device events requested for the device ARN.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token returned to indicate that there is more data available.

" - } - } - }, - "ListGatewayGroupsRequest":{ - "type":"structure", - "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token used to paginate though multiple pages of gateway group summaries.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of gateway group summaries to return. The default is 50.

" - } - } - }, - "ListGatewayGroupsResponse":{ - "type":"structure", - "members":{ - "GatewayGroups":{ - "shape":"GatewayGroupSummaries", - "documentation":"

The gateway groups in the list.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token used to paginate though multiple pages of gateway group summaries.

" - } - } - }, - "ListGatewaysRequest":{ - "type":"structure", - "members":{ - "GatewayGroupArn":{ - "shape":"Arn", - "documentation":"

The gateway group ARN for which to list gateways.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token used to paginate though multiple pages of gateway summaries.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of gateway summaries to return. The default is 50.

" - } - } - }, - "ListGatewaysResponse":{ - "type":"structure", - "members":{ - "Gateways":{ - "shape":"GatewaySummaries", - "documentation":"

The gateways in the list.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token used to paginate though multiple pages of gateway summaries.

" - } - } - }, - "ListSkillsRequest":{ - "type":"structure", - "members":{ - "SkillGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the skill group for which to list enabled skills.

" - }, - "EnablementType":{ - "shape":"EnablementTypeFilter", - "documentation":"

Whether the skill is enabled under the user's account.

" - }, - "SkillType":{ - "shape":"SkillTypeFilter", - "documentation":"

Whether the skill is publicly available or is a private skill.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.

" - }, - "MaxResults":{ - "shape":"SkillListMaxResults", - "documentation":"

The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" - } - } - }, - "ListSkillsResponse":{ - "type":"structure", - "members":{ - "SkillSummaries":{ - "shape":"SkillSummaryList", - "documentation":"

The list of enabled skills requested. Required.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token returned to indicate that there is more data available.

" - } - } - }, - "ListSkillsStoreCategoriesRequest":{ - "type":"structure", - "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

The tokens used for pagination.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of categories returned, per paginated calls.

" - } - } - }, - "ListSkillsStoreCategoriesResponse":{ - "type":"structure", - "members":{ - "CategoryList":{ - "shape":"CategoryList", - "documentation":"

The list of categories.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The tokens used for pagination.

" - } - } - }, - "ListSkillsStoreSkillsByCategoryRequest":{ - "type":"structure", - "required":["CategoryId"], - "members":{ - "CategoryId":{ - "shape":"CategoryId", - "documentation":"

The category ID for which the skills are being retrieved from the skill store.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The tokens used for pagination.

" - }, - "MaxResults":{ - "shape":"SkillListMaxResults", - "documentation":"

The maximum number of skills returned per paginated calls.

" - } - } - }, - "ListSkillsStoreSkillsByCategoryResponse":{ - "type":"structure", - "members":{ - "SkillsStoreSkills":{ - "shape":"SkillsStoreSkillList", - "documentation":"

The skill store skills.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The tokens used for pagination.

" - } - } - }, - "ListSmartHomeAppliancesRequest":{ - "type":"structure", - "required":["RoomArn"], - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The room that the appliances are associated with.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of appliances to be returned, per paginated calls.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The tokens used for pagination.

" - } - } - }, - "ListSmartHomeAppliancesResponse":{ - "type":"structure", - "members":{ - "SmartHomeAppliances":{ - "shape":"SmartHomeApplianceList", - "documentation":"

The smart home appliances.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The tokens used for pagination.

" - } - } - }, - "ListTagsRequest":{ - "type":"structure", - "required":["Arn"], - "members":{ - "Arn":{ - "shape":"Arn", - "documentation":"

The ARN of the specified resource for which to list tags.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" - } - } - }, - "ListTagsResponse":{ - "type":"structure", - "members":{ - "Tags":{ - "shape":"TagList", - "documentation":"

The tags requested for the specified resource.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token returned to indicate that there is more data available.

" - } - } - }, - "Locale":{ - "type":"string", - "enum":["en-US"] - }, - "MacAddress":{"type":"string"}, - "MaxResults":{ - "type":"integer", - "max":50, - "min":1 - }, - "MaxVolumeLimit":{"type":"integer"}, - "MeetingRoomConfiguration":{ - "type":"structure", - "members":{ - "RoomUtilizationMetricsEnabled":{ - "shape":"Boolean", - "documentation":"

Whether room utilization metrics are enabled or not.

" - }, - "EndOfMeetingReminder":{ - "shape":"EndOfMeetingReminder", - "documentation":"

Settings for the end of meeting reminder feature that are applied to a room profile. The end of meeting reminder enables Alexa to remind users when a meeting is ending.

" - }, - "InstantBooking":{ - "shape":"InstantBooking", - "documentation":"

Settings to automatically book the room if available for a configured duration when joining a meeting with Alexa.

" - }, - "RequireCheckIn":{ - "shape":"RequireCheckIn", - "documentation":"

Settings for requiring a check in when a room is reserved. Alexa can cancel a room reservation if it's not checked into. This makes the room available for others. Users can check in by joining the meeting with Alexa or an AVS device, or by saying “Alexa, check in.”

" - }, - "ProactiveJoin":{"shape":"ProactiveJoin"} - }, - "documentation":"

Meeting room settings of a room profile.

" - }, - "MeetingSetting":{ - "type":"structure", - "required":["RequirePin"], - "members":{ - "RequirePin":{ - "shape":"RequirePin", - "documentation":"

The values that indicate whether the pin is always required.

" - } - }, - "documentation":"

The values that indicate whether a pin is always required (YES), never required (NO), or OPTIONAL.

  • If YES, Alexa will always ask for a meeting pin.

  • If NO, Alexa will never ask for a meeting pin.

  • If OPTIONAL, Alexa will ask if you have a meeting pin and if the customer responds with yes, it will ask for the meeting pin.

" - }, - "Minutes":{"type":"integer"}, - "NameInUseException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

The name sent in the request is already in use.

", - "exception":true - }, - "NetworkEapMethod":{ - "type":"string", - "enum":["EAP_TLS"] - }, - "NetworkProfile":{ - "type":"structure", - "members":{ - "NetworkProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the network profile associated with a device.

" - }, - "NetworkProfileName":{ - "shape":"NetworkProfileName", - "documentation":"

The name of the network profile associated with a device.

" - }, - "Description":{ - "shape":"NetworkProfileDescription", - "documentation":"

Detailed information about a device's network profile.

" - }, - "Ssid":{ - "shape":"NetworkSsid", - "documentation":"

The SSID of the Wi-Fi network.

" - }, - "SecurityType":{ - "shape":"NetworkSecurityType", - "documentation":"

The security type of the Wi-Fi network. This can be WPA2_ENTERPRISE, WPA2_PSK, WPA_PSK, WEP, or OPEN.

" - }, - "EapMethod":{ - "shape":"NetworkEapMethod", - "documentation":"

The authentication standard that is used in the EAP framework. Currently, EAP_TLS is supported.

" - }, - "CurrentPassword":{ - "shape":"CurrentWiFiPassword", - "documentation":"

The current password of the Wi-Fi network.

" - }, - "NextPassword":{ - "shape":"NextWiFiPassword", - "documentation":"

The next, or subsequent, password of the Wi-Fi network. This password is asynchronously transmitted to the device and is used when the password of the network changes to NextPassword.

" - }, - "CertificateAuthorityArn":{ - "shape":"Arn", - "documentation":"

The ARN of the Private Certificate Authority (PCA) created in AWS Certificate Manager (ACM). This is used to issue certificates to the devices.

" - }, - "TrustAnchors":{ - "shape":"TrustAnchorList", - "documentation":"

The root certificates of your authentication server, which is installed on your devices and used to trust your authentication server during EAP negotiation.

" - } - }, - "documentation":"

The network profile associated with a device.

" - }, - "NetworkProfileData":{ - "type":"structure", - "members":{ - "NetworkProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the network profile associated with a device.

" - }, - "NetworkProfileName":{ - "shape":"NetworkProfileName", - "documentation":"

The name of the network profile associated with a device.

" - }, - "Description":{ - "shape":"NetworkProfileDescription", - "documentation":"

Detailed information about a device's network profile.

" - }, - "Ssid":{ - "shape":"NetworkSsid", - "documentation":"

The SSID of the Wi-Fi network.

" - }, - "SecurityType":{ - "shape":"NetworkSecurityType", - "documentation":"

The security type of the Wi-Fi network. This can be WPA2_ENTERPRISE, WPA2_PSK, WPA_PSK, WEP, or OPEN.

" - }, - "EapMethod":{ - "shape":"NetworkEapMethod", - "documentation":"

The authentication standard that is used in the EAP framework. Currently, EAP_TLS is supported.

" - }, - "CertificateAuthorityArn":{ - "shape":"Arn", - "documentation":"

The ARN of the Private Certificate Authority (PCA) created in AWS Certificate Manager (ACM). This is used to issue certificates to the devices.

" - } - }, - "documentation":"

The data associated with a network profile.

" - }, - "NetworkProfileDataList":{ - "type":"list", - "member":{"shape":"NetworkProfileData"} - }, - "NetworkProfileDescription":{ - "type":"string", - "max":200, - "min":0, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "NetworkProfileName":{ - "type":"string", - "max":100, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "NetworkSecurityType":{ - "type":"string", - "enum":[ - "OPEN", - "WEP", - "WPA_PSK", - "WPA2_PSK", - "WPA2_ENTERPRISE" - ] - }, - "NetworkSsid":{ - "type":"string", - "max":32, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "NewInThisVersionBulletPoints":{ - "type":"list", - "member":{"shape":"BulletPoint"} - }, - "NextToken":{ - "type":"string", - "max":1100, - "min":1 - }, - "NextWiFiPassword":{ - "type":"string", - "max":128, - "min":0, - "pattern":"(^$)|([\\x00-\\x7F]{5,})", - "sensitive":true - }, - "NotFoundException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

The resource is not found.

", - "exception":true - }, - "OneClickIdDelay":{ - "type":"string", - "max":2, - "min":1 - }, - "OneClickPinDelay":{ - "type":"string", - "max":2, - "min":1 - }, - "OrganizationName":{ - "type":"string", - "max":100, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "OutboundPhoneNumber":{ - "type":"string", - "pattern":"\\d{10}" - }, - "PSTNDialIn":{ - "type":"structure", - "required":[ - "CountryCode", - "PhoneNumber", - "OneClickIdDelay", - "OneClickPinDelay" - ], - "members":{ - "CountryCode":{ - "shape":"CountryCode", - "documentation":"

The zip code.

" - }, - "PhoneNumber":{ - "shape":"OutboundPhoneNumber", - "documentation":"

The phone number to call to join the conference.

" - }, - "OneClickIdDelay":{ - "shape":"OneClickIdDelay", - "documentation":"

The delay duration before Alexa enters the conference ID with dual-tone multi-frequency (DTMF). Each number on the dial pad corresponds to a DTMF tone, which is how we send data over the telephone network.

" - }, - "OneClickPinDelay":{ - "shape":"OneClickPinDelay", - "documentation":"

The delay duration before Alexa enters the conference pin with dual-tone multi-frequency (DTMF). Each number on the dial pad corresponds to a DTMF tone, which is how we send data over the telephone network.

" - } - }, - "documentation":"

The information for public switched telephone network (PSTN) conferencing.

" - }, - "PhoneNumber":{ - "type":"structure", - "required":[ - "Number", - "Type" - ], - "members":{ - "Number":{ - "shape":"RawPhoneNumber", - "documentation":"

The raw value of the phone number.

" - }, - "Type":{ - "shape":"PhoneNumberType", - "documentation":"

The type of the phone number.

" - } - }, - "documentation":"

The phone number for the contact containing the raw number and phone number type.

" - }, - "PhoneNumberList":{ - "type":"list", - "member":{"shape":"PhoneNumber"}, - "max":3, - "min":0 - }, - "PhoneNumberType":{ - "type":"string", - "enum":[ - "MOBILE", - "WORK", - "HOME" - ], - "sensitive":true - }, - "PrivacyPolicy":{"type":"string"}, - "ProactiveJoin":{ - "type":"structure", - "members":{ - "EnabledByMotion":{"shape":"Boolean"} - } - }, - "ProductDescription":{"type":"string"}, - "ProductId":{ - "type":"string", - "pattern":"^[a-zA-Z0-9_]{1,256}$" - }, - "Profile":{ - "type":"structure", - "members":{ - "ProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of a room profile.

" - }, - "ProfileName":{ - "shape":"ProfileName", - "documentation":"

The name of a room profile.

" - }, - "IsDefault":{ - "shape":"Boolean", - "documentation":"

Retrieves if the profile is default or not.

" - }, - "Address":{ - "shape":"Address", - "documentation":"

The address of a room profile.

" - }, - "Timezone":{ - "shape":"Timezone", - "documentation":"

The time zone of a room profile.

" - }, - "DistanceUnit":{ - "shape":"DistanceUnit", - "documentation":"

The distance unit of a room profile.

" - }, - "TemperatureUnit":{ - "shape":"TemperatureUnit", - "documentation":"

The temperature unit of a room profile.

" - }, - "WakeWord":{ - "shape":"WakeWord", - "documentation":"

The wake word of a room profile.

" - }, - "Locale":{ - "shape":"DeviceLocale", - "documentation":"

The locale of a room profile. (This is currently available only to a limited preview audience.)

" - }, - "SetupModeDisabled":{ - "shape":"Boolean", - "documentation":"

The setup mode of a room profile.

" - }, - "MaxVolumeLimit":{ - "shape":"MaxVolumeLimit", - "documentation":"

The max volume limit of a room profile.

" - }, - "PSTNEnabled":{ - "shape":"Boolean", - "documentation":"

The PSTN setting of a room profile.

" - }, - "DataRetentionOptIn":{ - "shape":"Boolean", - "documentation":"

Whether data retention of the profile is enabled.

" - }, - "AddressBookArn":{ - "shape":"Arn", - "documentation":"

The ARN of the address book.

" - }, - "MeetingRoomConfiguration":{ - "shape":"MeetingRoomConfiguration", - "documentation":"

Meeting room settings of a room profile.

" - } - }, - "documentation":"

A room profile with attributes.

" - }, - "ProfileData":{ - "type":"structure", - "members":{ - "ProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of a room profile.

" - }, - "ProfileName":{ - "shape":"ProfileName", - "documentation":"

The name of a room profile.

" - }, - "IsDefault":{ - "shape":"Boolean", - "documentation":"

Retrieves if the profile data is default or not.

" - }, - "Address":{ - "shape":"Address", - "documentation":"

The address of a room profile.

" - }, - "Timezone":{ - "shape":"Timezone", - "documentation":"

The time zone of a room profile.

" - }, - "DistanceUnit":{ - "shape":"DistanceUnit", - "documentation":"

The distance unit of a room profile.

" - }, - "TemperatureUnit":{ - "shape":"TemperatureUnit", - "documentation":"

The temperature unit of a room profile.

" - }, - "WakeWord":{ - "shape":"WakeWord", - "documentation":"

The wake word of a room profile.

" - }, - "Locale":{ - "shape":"DeviceLocale", - "documentation":"

The locale of a room profile. (This is currently available only to a limited preview audience.)

" - } - }, - "documentation":"

The data of a room profile.

" - }, - "ProfileDataList":{ - "type":"list", - "member":{"shape":"ProfileData"} - }, - "ProfileName":{ - "type":"string", - "max":100, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "ProviderCalendarId":{ - "type":"string", - "max":100, - "min":0 - }, - "PutConferencePreferenceRequest":{ - "type":"structure", - "required":["ConferencePreference"], - "members":{ - "ConferencePreference":{ - "shape":"ConferencePreference", - "documentation":"

The conference preference of a specific conference provider.

" - } - } - }, - "PutConferencePreferenceResponse":{ - "type":"structure", - "members":{ - } - }, - "PutInvitationConfigurationRequest":{ - "type":"structure", - "required":["OrganizationName"], - "members":{ - "OrganizationName":{ - "shape":"OrganizationName", - "documentation":"

The name of the organization sending the enrollment invite to a user.

" - }, - "ContactEmail":{ - "shape":"Email", - "documentation":"

The email ID of the organization or individual contact that the enrolled user can use.

" - }, - "PrivateSkillIds":{ - "shape":"ShortSkillIdList", - "documentation":"

The list of private skill IDs that you want to recommend to the user to enable in the invitation.

" - } - } - }, - "PutInvitationConfigurationResponse":{ - "type":"structure", - "members":{ - } - }, - "PutRoomSkillParameterRequest":{ - "type":"structure", - "required":[ - "SkillId", - "RoomSkillParameter" - ], - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room associated with the room skill parameter. Required.

" - }, - "SkillId":{ - "shape":"SkillId", - "documentation":"

The ARN of the skill associated with the room skill parameter. Required.

" - }, - "RoomSkillParameter":{ - "shape":"RoomSkillParameter", - "documentation":"

The updated room skill parameter. Required.

" - } - } - }, - "PutRoomSkillParameterResponse":{ - "type":"structure", - "members":{ - } - }, - "PutSkillAuthorizationRequest":{ - "type":"structure", - "required":[ - "AuthorizationResult", - "SkillId" - ], - "members":{ - "AuthorizationResult":{ - "shape":"AuthorizationResult", - "documentation":"

The authorization result specific to OAUTH code grant output. \"Code” must be populated in the AuthorizationResult map to establish the authorization.

" - }, - "SkillId":{ - "shape":"SkillId", - "documentation":"

The unique identifier of a skill.

" - }, - "RoomArn":{ - "shape":"Arn", - "documentation":"

The room that the skill is authorized for.

" - } - } - }, - "PutSkillAuthorizationResponse":{ - "type":"structure", - "members":{ - } - }, - "RawPhoneNumber":{ - "type":"string", - "max":50, - "min":0, - "pattern":"^[\\+0-9\\#\\,\\(][\\+0-9\\-\\.\\/\\(\\)\\,\\#\\s]+$", - "sensitive":true - }, - "RegisterAVSDeviceRequest":{ - "type":"structure", - "required":[ - "ClientId", - "UserCode", - "ProductId", - "AmazonId" - ], - "members":{ - "ClientId":{ - "shape":"ClientId", - "documentation":"

The client ID of the OEM used for code-based linking authorization on an AVS device.

" - }, - "UserCode":{ - "shape":"UserCode", - "documentation":"

The code that is obtained after your AVS device has made a POST request to LWA as a part of the Device Authorization Request component of the OAuth code-based linking specification.

" - }, - "ProductId":{ - "shape":"ProductId", - "documentation":"

The product ID used to identify your AVS device during authorization.

" - }, - "DeviceSerialNumber":{ - "shape":"DeviceSerialNumberForAVS", - "documentation":"

The key generated by the OEM that uniquely identifies a specified instance of your AVS device.

" - }, - "AmazonId":{ - "shape":"AmazonId", - "documentation":"

The device type ID for your AVS device generated by Amazon when the OEM creates a new product on Amazon's Developer Console.

" - }, - "RoomArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the room with which to associate your AVS device.

" - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" - } - } - }, - "RegisterAVSDeviceResponse":{ - "type":"structure", - "members":{ - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The ARN of the device.

" - } - } - }, - "RejectSkillRequest":{ - "type":"structure", - "required":["SkillId"], - "members":{ - "SkillId":{ - "shape":"SkillId", - "documentation":"

The unique identifier of the skill.

" - } - } - }, - "RejectSkillResponse":{ - "type":"structure", - "members":{ - } - }, - "ReleaseDate":{"type":"string"}, - "RequireCheckIn":{ - "type":"structure", - "members":{ - "ReleaseAfterMinutes":{ - "shape":"Minutes", - "documentation":"

Duration between 5 and 20 minutes to determine when to release the room if it's not checked into.

" - }, - "Enabled":{ - "shape":"Boolean", - "documentation":"

Whether require check in is enabled or not.

" - } - }, - "documentation":"

Settings for the require check in feature that are applied to a room profile. Require check in allows a meeting room’s Alexa or AVS device to prompt the user to check in; otherwise, the room will be released.

" - }, - "RequirePin":{ - "type":"string", - "enum":[ - "YES", - "NO", - "OPTIONAL" - ] - }, - "ResolveRoomRequest":{ - "type":"structure", - "required":[ - "UserId", - "SkillId" - ], - "members":{ - "UserId":{ - "shape":"UserId", - "documentation":"

The ARN of the user. Required.

" - }, - "SkillId":{ - "shape":"SkillId", - "documentation":"

The ARN of the skill that was requested. Required.

" - } - } - }, - "ResolveRoomResponse":{ - "type":"structure", - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room from which the skill request was invoked.

" - }, - "RoomName":{ - "shape":"RoomName", - "documentation":"

The name of the room from which the skill request was invoked.

" - }, - "RoomSkillParameters":{ - "shape":"RoomSkillParameters", - "documentation":"

Response to get the room profile request. Required.

" - } - } - }, - "ResourceAssociatedException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

Another resource is associated with the resource in the request.

", - "exception":true - }, - "ResourceInUseException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"}, - "ClientRequestToken":{"shape":"ClientRequestToken"} - }, - "documentation":"

The resource in the request is already in use.

", - "exception":true - }, - "ReviewKey":{"type":"string"}, - "ReviewValue":{"type":"string"}, - "Reviews":{ - "type":"map", - "key":{"shape":"ReviewKey"}, - "value":{"shape":"ReviewValue"} - }, - "RevokeInvitationRequest":{ - "type":"structure", - "members":{ - "UserArn":{ - "shape":"Arn", - "documentation":"

The ARN of the user for whom to revoke an enrollment invitation. Required.

" - }, - "EnrollmentId":{ - "shape":"EnrollmentId", - "documentation":"

The ARN of the enrollment invitation to revoke. Required.

" - } - } - }, - "RevokeInvitationResponse":{ - "type":"structure", - "members":{ - } - }, - "Room":{ - "type":"structure", - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of a room.

" - }, - "RoomName":{ - "shape":"RoomName", - "documentation":"

The name of a room.

" - }, - "Description":{ - "shape":"RoomDescription", - "documentation":"

The description of a room.

" - }, - "ProviderCalendarId":{ - "shape":"ProviderCalendarId", - "documentation":"

The provider calendar ARN of a room.

" - }, - "ProfileArn":{ - "shape":"Arn", - "documentation":"

The profile ARN of a room.

" - } - }, - "documentation":"

A room with attributes.

" - }, - "RoomData":{ - "type":"structure", - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of a room.

" - }, - "RoomName":{ - "shape":"RoomName", - "documentation":"

The name of a room.

" - }, - "Description":{ - "shape":"RoomDescription", - "documentation":"

The description of a room.

" - }, - "ProviderCalendarId":{ - "shape":"ProviderCalendarId", - "documentation":"

The provider calendar ARN of a room.

" - }, - "ProfileArn":{ - "shape":"Arn", - "documentation":"

The profile ARN of a room.

" - }, - "ProfileName":{ - "shape":"ProfileName", - "documentation":"

The profile name of a room.

" - } - }, - "documentation":"

The data of a room.

" - }, - "RoomDataList":{ - "type":"list", - "member":{"shape":"RoomData"} - }, - "RoomDescription":{ - "type":"string", - "max":200, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "RoomName":{ - "type":"string", - "max":100, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "RoomSkillParameter":{ - "type":"structure", - "required":[ - "ParameterKey", - "ParameterValue" - ], - "members":{ - "ParameterKey":{ - "shape":"RoomSkillParameterKey", - "documentation":"

The parameter key of a room skill parameter. ParameterKey is an enumerated type that only takes “DEFAULT” or “SCOPE” as valid values.

" - }, - "ParameterValue":{ - "shape":"RoomSkillParameterValue", - "documentation":"

The parameter value of a room skill parameter.

" - } - }, - "documentation":"

A skill parameter associated with a room.

" - }, - "RoomSkillParameterKey":{ - "type":"string", - "max":256, - "min":1 - }, - "RoomSkillParameterValue":{ - "type":"string", - "max":512, - "min":1 - }, - "RoomSkillParameters":{ - "type":"list", - "member":{"shape":"RoomSkillParameter"} - }, - "S3KeyPrefix":{ - "type":"string", - "max":100, - "min":0, - "pattern":"[A-Za-z0-9!_\\-\\.\\*'()/]*" - }, - "SampleUtterances":{ - "type":"list", - "member":{"shape":"Utterance"} - }, - "SearchAddressBooksRequest":{ - "type":"structure", - "members":{ - "Filters":{ - "shape":"FilterList", - "documentation":"

The filters to use to list a specified set of address books. The supported filter key is AddressBookName.

" - }, - "SortCriteria":{ - "shape":"SortList", - "documentation":"

The sort order to use in listing the specified set of address books. The supported sort key is AddressBookName.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response only includes results beyond the token, up to the value specified by MaxResults.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" - } - } - }, - "SearchAddressBooksResponse":{ - "type":"structure", - "members":{ - "AddressBooks":{ - "shape":"AddressBookDataList", - "documentation":"

The address books that meet the specified set of filter criteria, in sort order.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token returned to indicate that there is more data available.

" - }, - "TotalCount":{ - "shape":"TotalCount", - "documentation":"

The total number of address books returned.

" - } - } - }, - "SearchContactsRequest":{ - "type":"structure", - "members":{ - "Filters":{ - "shape":"FilterList", - "documentation":"

The filters to use to list a specified set of address books. The supported filter keys are DisplayName, FirstName, LastName, and AddressBookArns.

" - }, - "SortCriteria":{ - "shape":"SortList", - "documentation":"

The sort order to use in listing the specified set of contacts. The supported sort keys are DisplayName, FirstName, and LastName.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response only includes results beyond the token, up to the value specified by MaxResults.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" - } - } - }, - "SearchContactsResponse":{ - "type":"structure", - "members":{ - "Contacts":{ - "shape":"ContactDataList", - "documentation":"

The contacts that meet the specified set of filter criteria, in sort order.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token returned to indicate that there is more data available.

" - }, - "TotalCount":{ - "shape":"TotalCount", - "documentation":"

The total number of contacts returned.

" - } - } - }, - "SearchDevicesRequest":{ - "type":"structure", - "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" - }, - "Filters":{ - "shape":"FilterList", - "documentation":"

The filters to use to list a specified set of devices. Supported filter keys are DeviceName, DeviceStatus, DeviceStatusDetailCode, RoomName, DeviceType, DeviceSerialNumber, UnassociatedOnly, ConnectionStatus (ONLINE and OFFLINE), NetworkProfileName, NetworkProfileArn, Feature, and FailureCode.

" - }, - "SortCriteria":{ - "shape":"SortList", - "documentation":"

The sort order to use in listing the specified set of devices. Supported sort keys are DeviceName, DeviceStatus, RoomName, DeviceType, DeviceSerialNumber, ConnectionStatus, NetworkProfileName, NetworkProfileArn, Feature, and FailureCode.

" - } - } - }, - "SearchDevicesResponse":{ - "type":"structure", - "members":{ - "Devices":{ - "shape":"DeviceDataList", - "documentation":"

The devices that meet the specified set of filter criteria, in sort order.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token returned to indicate that there is more data available.

" - }, - "TotalCount":{ - "shape":"TotalCount", - "documentation":"

The total number of devices returned.

" - } - } - }, - "SearchNetworkProfilesRequest":{ - "type":"structure", - "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" - }, - "Filters":{ - "shape":"FilterList", - "documentation":"

The filters to use to list a specified set of network profiles. Valid filters are NetworkProfileName, Ssid, and SecurityType.

" - }, - "SortCriteria":{ - "shape":"SortList", - "documentation":"

The sort order to use to list the specified set of network profiles. Valid sort criteria includes NetworkProfileName, Ssid, and SecurityType.

" - } - } - }, - "SearchNetworkProfilesResponse":{ - "type":"structure", - "members":{ - "NetworkProfiles":{ - "shape":"NetworkProfileDataList", - "documentation":"

The network profiles that meet the specified set of filter criteria, in sort order. It is a list of NetworkProfileData objects.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.

" - }, - "TotalCount":{ - "shape":"TotalCount", - "documentation":"

The total number of network profiles returned.

" - } - } - }, - "SearchProfilesRequest":{ - "type":"structure", - "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" - }, - "Filters":{ - "shape":"FilterList", - "documentation":"

The filters to use to list a specified set of room profiles. Supported filter keys are ProfileName and Address. Required.

" - }, - "SortCriteria":{ - "shape":"SortList", - "documentation":"

The sort order to use in listing the specified set of room profiles. Supported sort keys are ProfileName and Address.

" - } - } - }, - "SearchProfilesResponse":{ - "type":"structure", - "members":{ - "Profiles":{ - "shape":"ProfileDataList", - "documentation":"

The profiles that meet the specified set of filter criteria, in sort order.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token returned to indicate that there is more data available.

" - }, - "TotalCount":{ - "shape":"TotalCount", - "documentation":"

The total number of room profiles returned.

" - } - } - }, - "SearchRoomsRequest":{ - "type":"structure", - "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" - }, - "Filters":{ - "shape":"FilterList", - "documentation":"

The filters to use to list a specified set of rooms. The supported filter keys are RoomName and ProfileName.

" - }, - "SortCriteria":{ - "shape":"SortList", - "documentation":"

The sort order to use in listing the specified set of rooms. The supported sort keys are RoomName and ProfileName.

" - } - } - }, - "SearchRoomsResponse":{ - "type":"structure", - "members":{ - "Rooms":{ - "shape":"RoomDataList", - "documentation":"

The rooms that meet the specified set of filter criteria, in sort order.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token returned to indicate that there is more data available.

" - }, - "TotalCount":{ - "shape":"TotalCount", - "documentation":"

The total number of rooms returned.

" - } - } - }, - "SearchSkillGroupsRequest":{ - "type":"structure", - "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults. Required.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" - }, - "Filters":{ - "shape":"FilterList", - "documentation":"

The filters to use to list a specified set of skill groups. The supported filter key is SkillGroupName.

" - }, - "SortCriteria":{ - "shape":"SortList", - "documentation":"

The sort order to use in listing the specified set of skill groups. The supported sort key is SkillGroupName.

" - } - } - }, - "SearchSkillGroupsResponse":{ - "type":"structure", - "members":{ - "SkillGroups":{ - "shape":"SkillGroupDataList", - "documentation":"

The skill groups that meet the filter criteria, in sort order.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token returned to indicate that there is more data available.

" - }, - "TotalCount":{ - "shape":"TotalCount", - "documentation":"

The total number of skill groups returned.

" - } - } - }, - "SearchUsersRequest":{ - "type":"structure", - "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults. Required.

" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. Required.

" - }, - "Filters":{ - "shape":"FilterList", - "documentation":"

The filters to use for listing a specific set of users. Required. Supported filter keys are UserId, FirstName, LastName, Email, and EnrollmentStatus.

" - }, - "SortCriteria":{ - "shape":"SortList", - "documentation":"

The sort order to use in listing the filtered set of users. Required. Supported sort keys are UserId, FirstName, LastName, Email, and EnrollmentStatus.

" - } - } - }, - "SearchUsersResponse":{ - "type":"structure", - "members":{ - "Users":{ - "shape":"UserDataList", - "documentation":"

The users that meet the specified set of filter criteria, in sort order.

" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

The token returned to indicate that there is more data available.

" - }, - "TotalCount":{ - "shape":"TotalCount", - "documentation":"

The total number of users returned.

" - } - } - }, - "SendAnnouncementRequest":{ - "type":"structure", - "required":[ - "RoomFilters", - "Content", - "ClientRequestToken" - ], - "members":{ - "RoomFilters":{ - "shape":"FilterList", - "documentation":"

The filters to use to send an announcement to a specified list of rooms. The supported filter keys are RoomName, ProfileName, RoomArn, and ProfileArn. To send to all rooms, specify an empty RoomFilters list.

" - }, - "Content":{ - "shape":"Content", - "documentation":"

The announcement content. This can contain only one of the three possible announcement types (text, SSML or audio).

" - }, - "TimeToLiveInSeconds":{ - "shape":"TimeToLiveInSeconds", - "documentation":"

The time to live for an announcement. Default is 300. If delivery doesn't occur within this time, the announcement is not delivered.

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The unique, user-specified identifier for the request that ensures idempotency.

", - "idempotencyToken":true - } - } - }, - "SendAnnouncementResponse":{ - "type":"structure", - "members":{ - "AnnouncementArn":{ - "shape":"Arn", - "documentation":"

The identifier of the announcement.

" - } - } - }, - "SendInvitationRequest":{ - "type":"structure", - "members":{ - "UserArn":{ - "shape":"Arn", - "documentation":"

The ARN of the user to whom to send an invitation. Required.

" - } - } - }, - "SendInvitationResponse":{ - "type":"structure", - "members":{ - } - }, - "ShortDescription":{"type":"string"}, - "ShortSkillIdList":{ - "type":"list", - "member":{"shape":"SkillId"}, - "max":3, - "min":0 - }, - "SipAddress":{ - "type":"structure", - "required":[ - "Uri", - "Type" - ], - "members":{ - "Uri":{ - "shape":"SipUri", - "documentation":"

The URI for the SIP address.

" - }, - "Type":{ - "shape":"SipType", - "documentation":"

The type of the SIP address.

" - } - }, - "documentation":"

The SIP address for the contact containing the URI and SIP address type.

" - }, - "SipAddressList":{ - "type":"list", - "member":{"shape":"SipAddress"}, - "max":1, - "min":0 - }, - "SipType":{ - "type":"string", - "enum":["WORK"], - "sensitive":true - }, - "SipUri":{ - "type":"string", - "max":256, - "min":1, - "pattern":"^sip[s]?:([^@:]+)\\@([^@]+)$", - "sensitive":true - }, - "SkillDetails":{ - "type":"structure", - "members":{ - "ProductDescription":{ - "shape":"ProductDescription", - "documentation":"

The description of the product.

" - }, - "InvocationPhrase":{ - "shape":"InvocationPhrase", - "documentation":"

The phrase used to trigger the skill.

" - }, - "ReleaseDate":{ - "shape":"ReleaseDate", - "documentation":"

The date when the skill was released.

" - }, - "EndUserLicenseAgreement":{ - "shape":"EndUserLicenseAgreement", - "documentation":"

The URL of the end user license agreement.

" - }, - "GenericKeywords":{ - "shape":"GenericKeywords", - "documentation":"

The generic keywords associated with the skill that can be used to find a skill.

" - }, - "BulletPoints":{ - "shape":"BulletPoints", - "documentation":"

The details about what the skill supports organized as bullet points.

" - }, - "NewInThisVersionBulletPoints":{ - "shape":"NewInThisVersionBulletPoints", - "documentation":"

The updates added in bullet points.

" - }, - "SkillTypes":{ - "shape":"SkillTypes", - "documentation":"

The types of skills.

" - }, - "Reviews":{ - "shape":"Reviews", - "documentation":"

This member has been deprecated.

The list of reviews for the skill, including Key and Value pair.

" - }, - "DeveloperInfo":{ - "shape":"DeveloperInfo", - "documentation":"

The details about the developer that published the skill.

" - } - }, - "documentation":"

Granular information about the skill.

" - }, - "SkillGroup":{ - "type":"structure", - "members":{ - "SkillGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of a skill group.

" - }, - "SkillGroupName":{ - "shape":"SkillGroupName", - "documentation":"

The name of a skill group.

" - }, - "Description":{ - "shape":"SkillGroupDescription", - "documentation":"

The description of a skill group.

" - } - }, - "documentation":"

A skill group with attributes.

" - }, - "SkillGroupData":{ - "type":"structure", - "members":{ - "SkillGroupArn":{ - "shape":"Arn", - "documentation":"

The skill group ARN of a skill group.

" - }, - "SkillGroupName":{ - "shape":"SkillGroupName", - "documentation":"

The skill group name of a skill group.

" - }, - "Description":{ - "shape":"SkillGroupDescription", - "documentation":"

The description of a skill group.

" - } - }, - "documentation":"

The attributes of a skill group.

" - }, - "SkillGroupDataList":{ - "type":"list", - "member":{"shape":"SkillGroupData"} - }, - "SkillGroupDescription":{ - "type":"string", - "max":200, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "SkillGroupName":{ - "type":"string", - "max":100, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "SkillId":{ - "type":"string", - "pattern":"(^amzn1\\.ask\\.skill\\.[0-9a-f\\-]{1,200})|(^amzn1\\.echo-sdk-ams\\.app\\.[0-9a-f\\-]{1,200})" - }, - "SkillListMaxResults":{ - "type":"integer", - "max":10, - "min":1 - }, - "SkillName":{ - "type":"string", - "max":100, - "min":1, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "SkillNotLinkedException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

The skill must be linked to a third-party account.

", - "exception":true - }, - "SkillStoreType":{"type":"string"}, - "SkillSummary":{ - "type":"structure", - "members":{ - "SkillId":{ - "shape":"SkillId", - "documentation":"

The ARN of the skill summary.

" - }, - "SkillName":{ - "shape":"SkillName", - "documentation":"

The name of the skill.

" - }, - "SupportsLinking":{ - "shape":"boolean", - "documentation":"

Linking support for a skill.

" - }, - "EnablementType":{ - "shape":"EnablementType", - "documentation":"

Whether the skill is enabled under the user's account, or if it requires linking to be used.

" - }, - "SkillType":{ - "shape":"SkillType", - "documentation":"

Whether the skill is publicly available or is a private skill.

" - } - }, - "documentation":"

The summary of skills.

" - }, - "SkillSummaryList":{ - "type":"list", - "member":{"shape":"SkillSummary"} - }, - "SkillType":{ - "type":"string", - "enum":[ - "PUBLIC", - "PRIVATE" - ], - "max":100, - "min":1, - "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" - }, - "SkillTypeFilter":{ - "type":"string", - "enum":[ - "PUBLIC", - "PRIVATE", - "ALL" - ] - }, - "SkillTypes":{ - "type":"list", - "member":{"shape":"SkillStoreType"} - }, - "SkillsStoreSkill":{ - "type":"structure", - "members":{ - "SkillId":{ - "shape":"SkillId", - "documentation":"

The ARN of the skill.

" - }, - "SkillName":{ - "shape":"SkillName", - "documentation":"

The name of the skill.

" - }, - "ShortDescription":{ - "shape":"ShortDescription", - "documentation":"

Short description about the skill.

" - }, - "IconUrl":{ - "shape":"IconUrl", - "documentation":"

The URL where the skill icon resides.

" - }, - "SampleUtterances":{ - "shape":"SampleUtterances", - "documentation":"

Sample utterances that interact with the skill.

" - }, - "SkillDetails":{ - "shape":"SkillDetails", - "documentation":"

Information about the skill.

" - }, - "SupportsLinking":{ - "shape":"boolean", - "documentation":"

Linking support for a skill.

" - } - }, - "documentation":"

The detailed information about an Alexa skill.

" - }, - "SkillsStoreSkillList":{ - "type":"list", - "member":{"shape":"SkillsStoreSkill"} - }, - "SmartHomeAppliance":{ - "type":"structure", - "members":{ - "FriendlyName":{ - "shape":"ApplianceFriendlyName", - "documentation":"

The friendly name of the smart home appliance.

" - }, - "Description":{ - "shape":"ApplianceDescription", - "documentation":"

The description of the smart home appliance.

" - }, - "ManufacturerName":{ - "shape":"ApplianceManufacturerName", - "documentation":"

The name of the manufacturer of the smart home appliance.

" - } - }, - "documentation":"

A smart home appliance that can connect to a central system. Any domestic device can be a smart appliance.

" - }, - "SmartHomeApplianceList":{ - "type":"list", - "member":{"shape":"SmartHomeAppliance"} - }, - "SoftwareVersion":{"type":"string"}, - "Sort":{ - "type":"structure", - "required":[ - "Key", - "Value" - ], - "members":{ - "Key":{ - "shape":"SortKey", - "documentation":"

The sort key of a sort object.

" - }, - "Value":{ - "shape":"SortValue", - "documentation":"

The sort value of a sort object.

" - } - }, - "documentation":"

An object representing a sort criteria.

" - }, - "SortKey":{ - "type":"string", - "max":500, - "min":1 - }, - "SortList":{ - "type":"list", - "member":{"shape":"Sort"}, - "max":25 - }, - "SortValue":{ - "type":"string", - "enum":[ - "ASC", - "DESC" - ] - }, - "Ssml":{ - "type":"structure", - "required":[ - "Locale", - "Value" - ], - "members":{ - "Locale":{ - "shape":"Locale", - "documentation":"

The locale of the SSML message. Currently, en-US is supported.

" - }, - "Value":{ - "shape":"SsmlValue", - "documentation":"

The value of the SSML message in the correct SSML format. The audio tag is not supported.

" - } - }, - "documentation":"

The SSML message. For more information, see SSML Reference.

" - }, - "SsmlList":{ - "type":"list", - "member":{"shape":"Ssml"}, - "max":1 - }, - "SsmlValue":{ - "type":"string", - "max":4096, - "min":0, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "StartDeviceSyncRequest":{ - "type":"structure", - "required":["Features"], - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room with which the device to sync is associated. Required.

" - }, - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The ARN of the device to sync. Required.

" - }, - "Features":{ - "shape":"Features", - "documentation":"

Request structure to start the device sync. Required.

" - } - } - }, - "StartDeviceSyncResponse":{ - "type":"structure", - "members":{ - } - }, - "StartSmartHomeApplianceDiscoveryRequest":{ - "type":"structure", - "required":["RoomArn"], - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The room where smart home appliance discovery was initiated.

" - } - } - }, - "StartSmartHomeApplianceDiscoveryResponse":{ - "type":"structure", - "members":{ - } - }, - "Tag":{ - "type":"structure", - "required":[ - "Key", - "Value" - ], - "members":{ - "Key":{ - "shape":"TagKey", - "documentation":"

The key of a tag. Tag keys are case-sensitive.

" - }, - "Value":{ - "shape":"TagValue", - "documentation":"

The value of a tag. Tag values are case sensitive and can be null.

" - } - }, - "documentation":"

A key-value pair that can be associated with a resource.

" - }, - "TagKey":{ - "type":"string", - "max":128, - "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" - }, - "TagKeyList":{ - "type":"list", - "member":{"shape":"TagKey"} - }, - "TagList":{ - "type":"list", - "member":{"shape":"Tag"} - }, - "TagResourceRequest":{ - "type":"structure", - "required":[ - "Arn", - "Tags" - ], - "members":{ - "Arn":{ - "shape":"Arn", - "documentation":"

The ARN of the resource to which to add metadata tags. Required.

" - }, - "Tags":{ - "shape":"TagList", - "documentation":"

The tags to be added to the specified resource. Do not provide system tags. Required.

" - } - } - }, - "TagResourceResponse":{ - "type":"structure", - "members":{ - } - }, - "TagValue":{ - "type":"string", - "max":256, - "min":0, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" - }, - "TemperatureUnit":{ - "type":"string", - "enum":[ - "FAHRENHEIT", - "CELSIUS" - ] - }, - "Text":{ - "type":"structure", - "required":[ - "Locale", - "Value" - ], - "members":{ - "Locale":{ - "shape":"Locale", - "documentation":"

The locale of the text message. Currently, en-US is supported.

" - }, - "Value":{ - "shape":"TextValue", - "documentation":"

The value of the text message.

" - } - }, - "documentation":"

The text message.

" - }, - "TextList":{ - "type":"list", - "member":{"shape":"Text"}, - "max":1 - }, - "TextValue":{ - "type":"string", - "max":4096, - "min":0, - "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" - }, - "TimeToLiveInSeconds":{ - "type":"integer", - "max":3600, - "min":1 - }, - "Timezone":{ - "type":"string", - "max":100, - "min":1 - }, - "TotalCount":{"type":"integer"}, - "TrustAnchor":{ - "type":"string", - "pattern":"-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}(\\u000D?\\u000A)?" - }, - "TrustAnchorList":{ - "type":"list", - "member":{"shape":"TrustAnchor"}, - "max":5, - "min":1 - }, - "UnauthorizedException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ErrorMessage"} - }, - "documentation":"

The caller has no permissions to operate on the resource involved in the API call.

", - "exception":true - }, - "UntagResourceRequest":{ - "type":"structure", - "required":[ - "Arn", - "TagKeys" - ], - "members":{ - "Arn":{ - "shape":"Arn", - "documentation":"

The ARN of the resource from which to remove metadata tags. Required.

" - }, - "TagKeys":{ - "shape":"TagKeyList", - "documentation":"

The tags to be removed from the specified resource. Do not provide system tags. Required.

" - } - } - }, - "UntagResourceResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateAddressBookRequest":{ - "type":"structure", - "required":["AddressBookArn"], - "members":{ - "AddressBookArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room to update.

" - }, - "Name":{ - "shape":"AddressBookName", - "documentation":"

The updated name of the room.

" - }, - "Description":{ - "shape":"AddressBookDescription", - "documentation":"

The updated description of the room.

" - } - } - }, - "UpdateAddressBookResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateBusinessReportScheduleRequest":{ - "type":"structure", - "required":["ScheduleArn"], - "members":{ - "ScheduleArn":{ - "shape":"Arn", - "documentation":"

The ARN of the business report schedule.

" - }, - "S3BucketName":{ - "shape":"CustomerS3BucketName", - "documentation":"

The S3 location of the output reports.

" - }, - "S3KeyPrefix":{ - "shape":"S3KeyPrefix", - "documentation":"

The S3 key where the report is delivered.

" - }, - "Format":{ - "shape":"BusinessReportFormat", - "documentation":"

The format of the generated report (individual CSV files or zipped files of individual files).

" - }, - "ScheduleName":{ - "shape":"BusinessReportScheduleName", - "documentation":"

The name identifier of the schedule.

" - }, - "Recurrence":{ - "shape":"BusinessReportRecurrence", - "documentation":"

The recurrence of the reports.

" - } - } - }, - "UpdateBusinessReportScheduleResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateConferenceProviderRequest":{ - "type":"structure", - "required":[ - "ConferenceProviderArn", - "ConferenceProviderType", - "MeetingSetting" - ], - "members":{ - "ConferenceProviderArn":{ - "shape":"Arn", - "documentation":"

The ARN of the conference provider.

" - }, - "ConferenceProviderType":{ - "shape":"ConferenceProviderType", - "documentation":"

The type of the conference provider.

" - }, - "IPDialIn":{ - "shape":"IPDialIn", - "documentation":"

The IP endpoint and protocol for calling.

" - }, - "PSTNDialIn":{ - "shape":"PSTNDialIn", - "documentation":"

The information for PSTN conferencing.

" - }, - "MeetingSetting":{ - "shape":"MeetingSetting", - "documentation":"

The meeting settings for the conference provider.

" - } - } - }, - "UpdateConferenceProviderResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateContactRequest":{ - "type":"structure", - "required":["ContactArn"], - "members":{ - "ContactArn":{ - "shape":"Arn", - "documentation":"

The ARN of the contact to update.

" - }, - "DisplayName":{ - "shape":"ContactName", - "documentation":"

The updated display name of the contact.

" - }, - "FirstName":{ - "shape":"ContactName", - "documentation":"

The updated first name of the contact.

" - }, - "LastName":{ - "shape":"ContactName", - "documentation":"

The updated last name of the contact.

" - }, - "PhoneNumber":{ - "shape":"RawPhoneNumber", - "documentation":"

The updated phone number of the contact. The phone number type defaults to WORK. You can either specify PhoneNumber or PhoneNumbers. We recommend that you use PhoneNumbers, which lets you specify the phone number type and multiple numbers.

" - }, - "PhoneNumbers":{ - "shape":"PhoneNumberList", - "documentation":"

The list of phone numbers for the contact.

" - }, - "SipAddresses":{ - "shape":"SipAddressList", - "documentation":"

The list of SIP addresses for the contact.

" - } - } - }, - "UpdateContactResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateDeviceRequest":{ - "type":"structure", - "members":{ - "DeviceArn":{ - "shape":"Arn", - "documentation":"

The ARN of the device to update. Required.

" - }, - "DeviceName":{ - "shape":"DeviceName", - "documentation":"

The updated device name. Required.

" - } - } - }, - "UpdateDeviceResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateEndOfMeetingReminder":{ - "type":"structure", - "members":{ - "ReminderAtMinutes":{ - "shape":"EndOfMeetingReminderMinutesList", - "documentation":"

Updates settings for the end of meeting reminder feature that are applied to a room profile. The end of meeting reminder enables Alexa to remind users when a meeting is ending.

" - }, - "ReminderType":{ - "shape":"EndOfMeetingReminderType", - "documentation":"

The type of sound that users hear during the end of meeting reminder.

" - }, - "Enabled":{ - "shape":"Boolean", - "documentation":"

Whether an end of meeting reminder is enabled or not.

" - } - }, - "documentation":"

Settings for the end of meeting reminder feature that are applied to a room profile. The end of meeting reminder enables Alexa to remind users when a meeting is ending.

" - }, - "UpdateGatewayGroupRequest":{ - "type":"structure", - "required":["GatewayGroupArn"], - "members":{ - "GatewayGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the gateway group to update.

" - }, - "Name":{ - "shape":"GatewayGroupName", - "documentation":"

The updated name of the gateway group.

" - }, - "Description":{ - "shape":"GatewayGroupDescription", - "documentation":"

The updated description of the gateway group.

" - } - } - }, - "UpdateGatewayGroupResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateGatewayRequest":{ - "type":"structure", - "required":["GatewayArn"], - "members":{ - "GatewayArn":{ - "shape":"Arn", - "documentation":"

The ARN of the gateway to update.

" - }, - "Name":{ - "shape":"GatewayName", - "documentation":"

The updated name of the gateway.

" - }, - "Description":{ - "shape":"GatewayDescription", - "documentation":"

The updated description of the gateway.

" - }, - "SoftwareVersion":{ - "shape":"GatewayVersion", - "documentation":"

The updated software version of the gateway. The gateway automatically updates its software version during normal operation.

" - } - } - }, - "UpdateGatewayResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateInstantBooking":{ - "type":"structure", - "members":{ - "DurationInMinutes":{ - "shape":"Minutes", - "documentation":"

Duration between 15 and 240 minutes at increments of 15 that determines how long to book an available room when a meeting is started with Alexa.

" - }, - "Enabled":{ - "shape":"Boolean", - "documentation":"

Whether instant booking is enabled or not.

" - } - }, - "documentation":"

Updates settings for the instant booking feature that are applied to a room profile. If instant booking is enabled, Alexa automatically reserves a room if it is free when a user joins a meeting with Alexa.

" - }, - "UpdateMeetingRoomConfiguration":{ - "type":"structure", - "members":{ - "RoomUtilizationMetricsEnabled":{ - "shape":"Boolean", - "documentation":"

Whether room utilization metrics are enabled or not.

" - }, - "EndOfMeetingReminder":{ - "shape":"UpdateEndOfMeetingReminder", - "documentation":"

Settings for the end of meeting reminder feature that are applied to a room profile. The end of meeting reminder enables Alexa to remind users when a meeting is ending.

" - }, - "InstantBooking":{ - "shape":"UpdateInstantBooking", - "documentation":"

Settings to automatically book an available room available for a configured duration when joining a meeting with Alexa.

" - }, - "RequireCheckIn":{ - "shape":"UpdateRequireCheckIn", - "documentation":"

Settings for requiring a check in when a room is reserved. Alexa can cancel a room reservation if it's not checked into to make the room available for others. Users can check in by joining the meeting with Alexa or an AVS device, or by saying “Alexa, check in.”

" - }, - "ProactiveJoin":{"shape":"UpdateProactiveJoin"} - }, - "documentation":"

Updates meeting room settings of a room profile.

" - }, - "UpdateNetworkProfileRequest":{ - "type":"structure", - "required":["NetworkProfileArn"], - "members":{ - "NetworkProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the network profile associated with a device.

" - }, - "NetworkProfileName":{ - "shape":"NetworkProfileName", - "documentation":"

The name of the network profile associated with a device.

" - }, - "Description":{ - "shape":"NetworkProfileDescription", - "documentation":"

Detailed information about a device's network profile.

" - }, - "CurrentPassword":{ - "shape":"CurrentWiFiPassword", - "documentation":"

The current password of the Wi-Fi network.

" - }, - "NextPassword":{ - "shape":"NextWiFiPassword", - "documentation":"

The next, or subsequent, password of the Wi-Fi network. This password is asynchronously transmitted to the device and is used when the password of the network changes to NextPassword.

" - }, - "CertificateAuthorityArn":{ - "shape":"Arn", - "documentation":"

The ARN of the Private Certificate Authority (PCA) created in AWS Certificate Manager (ACM). This is used to issue certificates to the devices.

" - }, - "TrustAnchors":{ - "shape":"TrustAnchorList", - "documentation":"

The root certificate(s) of your authentication server that will be installed on your devices and used to trust your authentication server during EAP negotiation.

" - } - } - }, - "UpdateNetworkProfileResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateProactiveJoin":{ - "type":"structure", - "required":["EnabledByMotion"], - "members":{ - "EnabledByMotion":{"shape":"Boolean"} - } - }, - "UpdateProfileRequest":{ - "type":"structure", - "members":{ - "ProfileArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room profile to update. Required.

" - }, - "ProfileName":{ - "shape":"ProfileName", - "documentation":"

The updated name for the room profile.

" - }, - "IsDefault":{ - "shape":"Boolean", - "documentation":"

Sets the profile as default if selected. If this is missing, no update is done to the default status.

" - }, - "Timezone":{ - "shape":"Timezone", - "documentation":"

The updated timezone for the room profile.

" - }, - "Address":{ - "shape":"Address", - "documentation":"

The updated address for the room profile.

" - }, - "DistanceUnit":{ - "shape":"DistanceUnit", - "documentation":"

The updated distance unit for the room profile.

" - }, - "TemperatureUnit":{ - "shape":"TemperatureUnit", - "documentation":"

The updated temperature unit for the room profile.

" - }, - "WakeWord":{ - "shape":"WakeWord", - "documentation":"

The updated wake word for the room profile.

" - }, - "Locale":{ - "shape":"DeviceLocale", - "documentation":"

The updated locale for the room profile. (This is currently only available to a limited preview audience.)

" - }, - "SetupModeDisabled":{ - "shape":"Boolean", - "documentation":"

Whether the setup mode of the profile is enabled.

" - }, - "MaxVolumeLimit":{ - "shape":"MaxVolumeLimit", - "documentation":"

The updated maximum volume limit for the room profile.

" - }, - "PSTNEnabled":{ - "shape":"Boolean", - "documentation":"

Whether the PSTN setting of the room profile is enabled.

" - }, - "DataRetentionOptIn":{ - "shape":"Boolean", - "documentation":"

Whether data retention of the profile is enabled.

" - }, - "MeetingRoomConfiguration":{ - "shape":"UpdateMeetingRoomConfiguration", - "documentation":"

The updated meeting room settings of a room profile.

" - } - } - }, - "UpdateProfileResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateRequireCheckIn":{ - "type":"structure", - "members":{ - "ReleaseAfterMinutes":{ - "shape":"Minutes", - "documentation":"

Duration between 5 and 20 minutes to determine when to release the room if it's not checked into.

" - }, - "Enabled":{ - "shape":"Boolean", - "documentation":"

Whether require check in is enabled or not.

" - } - }, - "documentation":"

Updates settings for the require check in feature that are applied to a room profile. Require check in allows a meeting room’s Alexa or AVS device to prompt the user to check in; otherwise, the room will be released.

" - }, - "UpdateRoomRequest":{ - "type":"structure", - "members":{ - "RoomArn":{ - "shape":"Arn", - "documentation":"

The ARN of the room to update.

" - }, - "RoomName":{ - "shape":"RoomName", - "documentation":"

The updated name for the room.

" - }, - "Description":{ - "shape":"RoomDescription", - "documentation":"

The updated description for the room.

" - }, - "ProviderCalendarId":{ - "shape":"ProviderCalendarId", - "documentation":"

The updated provider calendar ARN for the room.

" - }, - "ProfileArn":{ - "shape":"Arn", - "documentation":"

The updated profile ARN for the room.

" - } - } - }, - "UpdateRoomResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateSkillGroupRequest":{ - "type":"structure", - "members":{ - "SkillGroupArn":{ - "shape":"Arn", - "documentation":"

The ARN of the skill group to update.

" - }, - "SkillGroupName":{ - "shape":"SkillGroupName", - "documentation":"

The updated name for the skill group.

" - }, - "Description":{ - "shape":"SkillGroupDescription", - "documentation":"

The updated description for the skill group.

" - } - } - }, - "UpdateSkillGroupResponse":{ - "type":"structure", - "members":{ - } - }, - "Url":{"type":"string"}, - "UserCode":{ - "type":"string", - "max":128, - "min":1 - }, - "UserData":{ - "type":"structure", - "members":{ - "UserArn":{ - "shape":"Arn", - "documentation":"

The ARN of a user.

" - }, - "FirstName":{ - "shape":"user_FirstName", - "documentation":"

The first name of a user.

" - }, - "LastName":{ - "shape":"user_LastName", - "documentation":"

The last name of a user.

" - }, - "Email":{ - "shape":"Email", - "documentation":"

The email of a user.

" - }, - "EnrollmentStatus":{ - "shape":"EnrollmentStatus", - "documentation":"

The enrollment status of a user.

" - }, - "EnrollmentId":{ - "shape":"EnrollmentId", - "documentation":"

The enrollment ARN of a user.

" - } - }, - "documentation":"

Information related to a user.

" - }, - "UserDataList":{ - "type":"list", - "member":{"shape":"UserData"} - }, - "UserId":{ - "type":"string", - "pattern":"amzn1\\.[A-Za-z0-9+-\\/=.]{1,300}" - }, - "Utterance":{"type":"string"}, - "Value":{ - "type":"string", - "min":1 - }, - "WakeWord":{ - "type":"string", - "enum":[ - "ALEXA", - "AMAZON", - "ECHO", - "COMPUTER" - ] - }, - "boolean":{"type":"boolean"}, - "user_FirstName":{ - "type":"string", - "max":30, - "min":0, - "pattern":"([A-Za-z\\-' 0-9._]|\\p{IsLetter})*" - }, - "user_LastName":{ - "type":"string", - "max":30, - "min":0, - "pattern":"([A-Za-z\\-' 0-9._]|\\p{IsLetter})*" - }, - "user_UserId":{ - "type":"string", - "max":128, - "min":1, - "pattern":"[a-zA-Z0-9@_+.-]*" - } - }, - "documentation":"

Alexa for Business has been retired and is no longer supported.

", - "deprecated":true, - "deprecatedMessage":"Alexa For Business is no longer supported" -} diff --git a/botocore/data/amplify/2017-07-25/service-2.json b/botocore/data/amplify/2017-07-25/service-2.json index 12ae52f822..7037a9f5be 100644 --- a/botocore/data/amplify/2017-07-25/service-2.json +++ b/botocore/data/amplify/2017-07-25/service-2.json @@ -11,7 +11,8 @@ "serviceId":"Amplify", "signatureVersion":"v4", "signingName":"amplify", - "uid":"amplify-2017-07-25" + "uid":"amplify-2017-07-25", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateApp":{ @@ -46,7 +47,7 @@ {"shape":"InternalFailureException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a new backend environment for an Amplify app.

This API is available only to Amplify Gen 1 applications where the backend is created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to applications created using the Amplify Gen 2 public preview. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" + "documentation":"

Creates a new backend environment for an Amplify app.

This API is available only to Amplify Gen 1 applications where the backend is created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" }, "CreateBranch":{ "name":"CreateBranch", @@ -150,7 +151,7 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Deletes a backend environment for an Amplify app.

This API is available only to Amplify Gen 1 applications where the backend was created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to applications created using the Amplify Gen 2 public preview. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" + "documentation":"

Deletes a backend environment for an Amplify app.

This API is available only to Amplify Gen 1 applications where the backend is created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" }, "DeleteBranch":{ "name":"DeleteBranch", @@ -283,7 +284,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Returns a backend environment for an Amplify app.

This API is available only to Amplify Gen 1 applications where the backend was created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to applications created using the Amplify Gen 2 public preview. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" + "documentation":"

Returns a backend environment for an Amplify app.

This API is available only to Amplify Gen 1 applications where the backend is created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" }, "GetBranch":{ "name":"GetBranch", @@ -395,7 +396,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Lists the backend environments for an Amplify app.

This API is available only to Amplify Gen 1 applications where the backend was created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to applications created using the Amplify Gen 2 public preview. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" + "documentation":"

Lists the backend environments for an Amplify app.

This API is available only to Amplify Gen 1 applications where the backend is created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" }, "ListBranches":{ "name":"ListBranches", @@ -747,6 +748,10 @@ "repositoryCloneMethod":{ "shape":"RepositoryCloneMethod", "documentation":"

This is for internal use.

The Amplify service uses this parameter to specify the authentication protocol to use to access the Git repository for an Amplify app. Amplify specifies TOKEN for a GitHub repository, SIGV4 for an Amazon Web Services CodeCommit repository, and SSH for GitLab and Bitbucket repositories.

" + }, + "cacheConfig":{ + "shape":"CacheConfig", + "documentation":"

The cache configuration for the Amplify app. If you don't specify the cache configuration type, Amplify uses the default AMPLIFY_MANAGED setting.

" } }, "documentation":"

Represents the different branches of a repository for building, deploying, and hosting an Amplify app.

" @@ -892,7 +897,7 @@ "documentation":"

The Amazon Resource Name (ARN) for the CloudFormation stack.

" } }, - "documentation":"

Describes the backend properties associated with an Amplify Branch.

" + "documentation":"

Describes the backend associated with an Amplify Branch.

This property is available to Amplify Gen 2 apps only. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" }, "BackendEnvironment":{ "type":"structure", @@ -928,7 +933,7 @@ "documentation":"

The last updated date and time for a backend environment that is part of an Amplify app.

" } }, - "documentation":"

Describes the backend environment for an Amplify app.

" + "documentation":"

Describes the backend environment associated with a Branch of a Gen 1 Amplify app. Amplify Gen 1 applications are created using Amplify Studio or the Amplify command line interface (CLI).

" }, "BackendEnvironmentArn":{ "type":"string", @@ -1083,7 +1088,7 @@ }, "backendEnvironmentArn":{ "shape":"BackendEnvironmentArn", - "documentation":"

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.

" + "documentation":"

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.

This property is available to Amplify Gen 1 apps only. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" }, "backend":{"shape":"Backend"} }, @@ -1113,13 +1118,31 @@ "pattern":"(?s).+", "sensitive":true }, + "CacheConfig":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"CacheConfigType", + "documentation":"

The type of cache configuration to use for an Amplify app.

The AMPLIFY_MANAGED cache configuration automatically applies an optimized cache configuration for your app based on its platform, routing rules, and rewrite rules. This is the default setting.

The AMPLIFY_MANAGED_NO_COOKIES cache configuration type is the same as AMPLIFY_MANAGED, except that it excludes all cookies from the cache key.

" + } + }, + "documentation":"

Describes the cache configuration for an Amplify app.

For more information about how Amplify applies an optimal cache configuration for your app based on the type of content that is being served, see Managing cache configuration in the Amplify User guide.

" + }, + "CacheConfigType":{ + "type":"string", + "enum":[ + "AMPLIFY_MANAGED", + "AMPLIFY_MANAGED_NO_COOKIES" + ] + }, "Certificate":{ "type":"structure", "required":["type"], "members":{ "type":{ "shape":"CertificateType", - "documentation":"

The type of SSL/TLS certificate that you want to use.

Specify AMPLIFY_MANAGED to use the default certificate that Amplify provisions for you.

Specify CUSTOM to use your own certificate that you have already added to Certificate Manager in your Amazon Web Services account. Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see Importing certificates into Certificate Manager in the ACM User guide .

" + "documentation":"

The type of SSL/TLS certificate that you want to use.

Specify AMPLIFY_MANAGED to use the default certificate that Amplify provisions for you.

Specify CUSTOM to use your own certificate that you have already added to Certificate Manager in your Amazon Web Services account. Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see Importing certificates into Certificate Manager in the ACM User guide.

" }, "customCertificateArn":{ "shape":"CertificateArn", @@ -1262,6 +1285,10 @@ "autoBranchCreationConfig":{ "shape":"AutoBranchCreationConfig", "documentation":"

The automated branch creation configuration for an Amplify app.

" + }, + "cacheConfig":{ + "shape":"CacheConfig", + "documentation":"

The cache configuration for the Amplify app.

" } }, "documentation":"

The request structure used to create apps in Amplify.

" @@ -1391,11 +1418,11 @@ }, "backendEnvironmentArn":{ "shape":"BackendEnvironmentArn", - "documentation":"

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.

" + "documentation":"

The Amazon Resource Name (ARN) for a backend environment that is part of a Gen 1 Amplify app.

This field is available to Amplify Gen 1 apps only where the backend is created using Amplify Studio or the Amplify command line interface (CLI).

" }, "backend":{ "shape":"Backend", - "documentation":"

The backend for a Branch of an Amplify app. Use for a backend created from an CloudFormation stack.

" + "documentation":"

The backend for a Branch of an Amplify app. Use for a backend created from an CloudFormation stack.

This field is available to Amplify Gen 2 apps only. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" } }, "documentation":"

The request structure for the create branch request.

" @@ -3275,6 +3302,10 @@ "accessToken":{ "shape":"AccessToken", "documentation":"

The personal access token for a GitHub repository for an Amplify app. The personal access token is used to authorize access to a GitHub repository using the Amplify GitHub App. The token is not stored.

Use accessToken for GitHub repositories only. To authorize access to a repository provider such as Bitbucket or CodeCommit, use oauthToken.

You must specify either accessToken or oauthToken when you update an app.

Existing Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the Amplify User Guide .

" + }, + "cacheConfig":{ + "shape":"CacheConfig", + "documentation":"

The cache configuration for the Amplify app.

" } }, "documentation":"

The request structure for the update app request.

" @@ -3367,11 +3398,11 @@ }, "backendEnvironmentArn":{ "shape":"BackendEnvironmentArn", - "documentation":"

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.

" + "documentation":"

The Amazon Resource Name (ARN) for a backend environment that is part of a Gen 1 Amplify app.

This field is available to Amplify Gen 1 apps only where the backend is created using Amplify Studio or the Amplify command line interface (CLI).

" }, "backend":{ "shape":"Backend", - "documentation":"

The backend for a Branch of an Amplify app. Use for a backend created from an CloudFormation stack.

" + "documentation":"

The backend for a Branch of an Amplify app. Use for a backend created from an CloudFormation stack.

This field is available to Amplify Gen 2 apps only. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code.

" } }, "documentation":"

The request structure for the update branch request.

" diff --git a/botocore/data/apigateway/2015-07-09/service-2.json b/botocore/data/apigateway/2015-07-09/service-2.json index 626f33d5a4..a86e1c4745 100644 --- a/botocore/data/apigateway/2015-07-09/service-2.json +++ b/botocore/data/apigateway/2015-07-09/service-2.json @@ -4,10 +4,12 @@ "apiVersion":"2015-07-09", "endpointPrefix":"apigateway", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon API Gateway", "serviceId":"API Gateway", "signatureVersion":"v4", - "uid":"apigateway-2015-07-09" + "uid":"apigateway-2015-07-09", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateApiKey":{ diff --git a/botocore/data/appintegrations/2020-07-29/service-2.json b/botocore/data/appintegrations/2020-07-29/service-2.json index 0a219e3f02..b2e80aeb7e 100644 --- a/botocore/data/appintegrations/2020-07-29/service-2.json +++ b/botocore/data/appintegrations/2020-07-29/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"app-integrations", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon AppIntegrations Service", "serviceId":"AppIntegrations", "signatureVersion":"v4", "signingName":"app-integrations", - "uid":"appintegrations-2020-07-29" + "uid":"appintegrations-2020-07-29", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateApplication":{ @@ -29,7 +31,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

This API is in preview release and subject to change.

Creates and persists an Application resource.

" + "documentation":"

Creates and persists an Application resource.

" }, "CreateDataIntegration":{ "name":"CreateDataIntegration", @@ -49,6 +51,24 @@ ], "documentation":"

Creates and persists a DataIntegration resource.

You cannot create a DataIntegration association for a DataIntegration that has been previously associated. Use a different DataIntegration, or recreate the DataIntegration using the CreateDataIntegration API.

" }, + "CreateDataIntegrationAssociation":{ + "name":"CreateDataIntegrationAssociation", + "http":{ + "method":"POST", + "requestUri":"/dataIntegrations/{Identifier}/associations" + }, + "input":{"shape":"CreateDataIntegrationAssociationRequest"}, + "output":{"shape":"CreateDataIntegrationAssociationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ResourceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates and persists a DataIntegrationAssociation resource.

" + }, "CreateEventIntegration":{ "name":"CreateEventIntegration", "http":{ @@ -133,7 +153,7 @@ {"shape":"InvalidRequestException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

This API is in preview release and subject to change.

Get an Application resource.

" + "documentation":"

Get an Application resource.

" }, "GetDataIntegration":{ "name":"GetDataIntegration", @@ -200,7 +220,7 @@ {"shape":"InvalidRequestException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

This API is in preview release and subject to change.

Lists applications in the account.

" + "documentation":"

Lists applications in the account.

" }, "ListDataIntegrationAssociations":{ "name":"ListDataIntegrationAssociations", @@ -332,7 +352,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

This API is in preview release and subject to change.

Updates and persists an Application resource.

" + "documentation":"

Updates and persists an Application resource.

" }, "UpdateDataIntegration":{ "name":"UpdateDataIntegration", @@ -351,6 +371,23 @@ ], "documentation":"

Updates the description of a DataIntegration.

You cannot create a DataIntegration association for a DataIntegration that has been previously associated. Use a different DataIntegration, or recreate the DataIntegration using the CreateDataIntegration API.

" }, + "UpdateDataIntegrationAssociation":{ + "name":"UpdateDataIntegrationAssociation", + "http":{ + "method":"PATCH", + "requestUri":"/dataIntegrations/{Identifier}/associations/{DataIntegrationAssociationIdentifier}" + }, + "input":{"shape":"UpdateDataIntegrationAssociationRequest"}, + "output":{"shape":"UpdateDataIntegrationAssociationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Updates and persists a DataIntegrationAssociation resource.

Updating a DataIntegrationAssociation with ExecutionConfiguration will rerun the on-demand job.

" + }, "UpdateEventIntegration":{ "name":"UpdateEventIntegration", "http":{ @@ -560,12 +597,58 @@ } } }, + "CreateDataIntegrationAssociationRequest":{ + "type":"structure", + "required":["DataIntegrationIdentifier"], + "members":{ + "DataIntegrationIdentifier":{ + "shape":"Identifier", + "documentation":"

A unique identifier for the DataIntegration.

", + "location":"uri", + "locationName":"Identifier" + }, + "ClientId":{ + "shape":"ClientId", + "documentation":"

The identifier for the client that is associated with the DataIntegration association.

" + }, + "ObjectConfiguration":{"shape":"ObjectConfiguration"}, + "DestinationURI":{ + "shape":"DestinationURI", + "documentation":"

The URI of the data destination.

" + }, + "ClientAssociationMetadata":{ + "shape":"ClientAssociationMetadata", + "documentation":"

The mapping of metadata to be extracted from the data.

" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs.

", + "idempotencyToken":true + }, + "ExecutionConfiguration":{ + "shape":"ExecutionConfiguration", + "documentation":"

The configuration for how the files should be pulled from the source.

" + } + } + }, + "CreateDataIntegrationAssociationResponse":{ + "type":"structure", + "members":{ + "DataIntegrationAssociationId":{ + "shape":"UUID", + "documentation":"

A unique identifier. for the DataIntegrationAssociation.

" + }, + "DataIntegrationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the DataIntegration.

" + } + } + }, "CreateDataIntegrationRequest":{ "type":"structure", "required":[ "Name", - "KmsKey", - "SourceURI" + "KmsKey" ], "members":{ "Name":{ @@ -578,7 +661,7 @@ }, "KmsKey":{ "shape":"NonBlankString", - "documentation":"

The KMS key for the DataIntegration.

" + "documentation":"

The KMS key ARN for the DataIntegration.

" }, "SourceURI":{ "shape":"SourceURI", @@ -628,7 +711,7 @@ }, "KmsKey":{ "shape":"NonBlankString", - "documentation":"

The KMS key for the DataIntegration.

" + "documentation":"

The KMS key ARN for the DataIntegration.

" }, "SourceURI":{ "shape":"SourceURI", @@ -714,7 +797,16 @@ "ClientId":{ "shape":"ClientId", "documentation":"

The identifier for the client that is associated with the DataIntegration association.

" - } + }, + "DestinationURI":{ + "shape":"DestinationURI", + "documentation":"

The URI of the data destination.

" + }, + "LastExecutionStatus":{ + "shape":"LastExecutionStatus", + "documentation":"

The execution status of the last job.

" + }, + "ExecutionConfiguration":{"shape":"ExecutionConfiguration"} }, "documentation":"

Summary information about the DataIntegration association.

" }, @@ -805,6 +897,12 @@ "min":0, "pattern":".*" }, + "DestinationURI":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^(\\w+\\:\\/\\/[\\w.-]+[\\w/!@#+=.-]+$)|(\\w+\\:\\/\\/[\\w.-]+[\\w/!@#+=.-]+[\\w/!@#+=.-]+[\\w/!@#+=.,-]+$)" + }, "DuplicateResourceException":{ "type":"structure", "members":{ @@ -921,6 +1019,34 @@ "min":1, "pattern":"^[a-zA-Z0-9\\/\\._\\-]+::[a-zA-Z0-9\\/\\._\\-]+(?:\\*)?$" }, + "ExecutionConfiguration":{ + "type":"structure", + "required":["ExecutionMode"], + "members":{ + "ExecutionMode":{ + "shape":"ExecutionMode", + "documentation":"

The mode for data import/export execution.

" + }, + "OnDemandConfiguration":{"shape":"OnDemandConfiguration"}, + "ScheduleConfiguration":{"shape":"ScheduleConfiguration"} + }, + "documentation":"

The configuration for how the files should be pulled from the source.

" + }, + "ExecutionMode":{ + "type":"string", + "enum":[ + "ON_DEMAND", + "SCHEDULED" + ] + }, + "ExecutionStatus":{ + "type":"string", + "enum":[ + "COMPLETED", + "IN_PROGRESS", + "FAILED" + ] + }, "ExternalUrlConfig":{ "type":"structure", "required":["AccessUrl"], @@ -1072,11 +1198,11 @@ }, "Description":{ "shape":"Description", - "documentation":"

The KMS key for the DataIntegration.

" + "documentation":"

The KMS key ARN for the DataIntegration.

" }, "KmsKey":{ "shape":"NonBlankString", - "documentation":"

The KMS key for the DataIntegration.

" + "documentation":"

The KMS key ARN for the DataIntegration.

" }, "SourceURI":{ "shape":"SourceURI", @@ -1172,6 +1298,20 @@ "error":{"httpStatusCode":400}, "exception":true }, + "LastExecutionStatus":{ + "type":"structure", + "members":{ + "ExecutionStatus":{ + "shape":"ExecutionStatus", + "documentation":"

The job status enum string.

" + }, + "StatusMessage":{ + "shape":"NonBlankString", + "documentation":"

The status message of a job.

" + } + }, + "documentation":"

The execution status of the last job.

" + }, "ListApplicationAssociationsRequest":{ "type":"structure", "required":["ApplicationId"], @@ -1436,6 +1576,21 @@ "value":{"shape":"FieldsMap"}, "documentation":"

The configuration for what data should be pulled from the source.

" }, + "OnDemandConfiguration":{ + "type":"structure", + "required":["StartTime"], + "members":{ + "StartTime":{ + "shape":"NonBlankString", + "documentation":"

The start time for data pull from the source as an Unix/epoch string in milliseconds

" + }, + "EndTime":{ + "shape":"NonBlankString", + "documentation":"

The end time for data pull from the source as an Unix/epoch string in milliseconds

" + } + }, + "documentation":"

The start and end time for data pull from the source.

" + }, "Permission":{ "type":"string", "documentation":"

The permission of an event or request that the application has access to.

", @@ -1694,6 +1849,37 @@ "members":{ } }, + "UpdateDataIntegrationAssociationRequest":{ + "type":"structure", + "required":[ + "DataIntegrationIdentifier", + "DataIntegrationAssociationIdentifier", + "ExecutionConfiguration" + ], + "members":{ + "DataIntegrationIdentifier":{ + "shape":"Identifier", + "documentation":"

A unique identifier for the DataIntegration.

", + "location":"uri", + "locationName":"Identifier" + }, + "DataIntegrationAssociationIdentifier":{ + "shape":"Identifier", + "documentation":"

A unique identifier. of the DataIntegrationAssociation resource

", + "location":"uri", + "locationName":"DataIntegrationAssociationIdentifier" + }, + "ExecutionConfiguration":{ + "shape":"ExecutionConfiguration", + "documentation":"

The configuration for how the files should be pulled from the source.

" + } + } + }, + "UpdateDataIntegrationAssociationResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateDataIntegrationRequest":{ "type":"structure", "required":["Identifier"], @@ -1741,5 +1927,5 @@ } } }, - "documentation":"

The Amazon AppIntegrations service enables you to configure and reuse connections to external applications.

For information about how you can use external applications with Amazon Connect, see Set up pre-built integrations and Deliver information to agents using Amazon Connect Wisdom in the Amazon Connect Administrator Guide.

" + "documentation":"

The Amazon AppIntegrations service enables you to configure and reuse connections to external applications.

For information about how you can use external applications with Amazon Connect, see the following topics in the Amazon Connect Administrator Guide:

" } diff --git a/botocore/data/application-autoscaling/2016-02-06/service-2.json b/botocore/data/application-autoscaling/2016-02-06/service-2.json index cd24f26ca5..3c0ed37529 100644 --- a/botocore/data/application-autoscaling/2016-02-06/service-2.json +++ b/botocore/data/application-autoscaling/2016-02-06/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"application-autoscaling", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Application Auto Scaling", "serviceId":"Application Auto Scaling", "signatureVersion":"v4", "signingName":"application-autoscaling", "targetPrefix":"AnyScaleFrontendService", - "uid":"application-autoscaling-2016-02-06" + "uid":"application-autoscaling-2016-02-06", + "auth":["aws.auth#sigv4"] }, "operations":{ "DeleteScalingPolicy":{ @@ -124,7 +126,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Describes the Application Auto Scaling scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

For more information, see Scheduled scaling and Managing scheduled scaling in the Application Auto Scaling User Guide.

" + "documentation":"

Describes the Application Auto Scaling scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

For more information, see Scheduled scaling in the Application Auto Scaling User Guide.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -137,7 +139,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns all the tags on the specified Application Auto Scaling scalable target.

For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

" + "documentation":"

Returns all the tags on the specified Application Auto Scaling scalable target.

For general information about tags, including the format and syntax, see Tagging your Amazon Web Services resources in the Amazon Web Services General Reference.

" }, "PutScalingPolicy":{ "name":"PutScalingPolicy", @@ -203,7 +205,7 @@ {"shape":"TooManyTagsException"}, {"shape":"ValidationException"} ], - "documentation":"

Adds or edits tags on an Application Auto Scaling scalable target.

Each tag consists of a tag key and a tag value, which are both case-sensitive strings. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value.

You can use this operation to tag an Application Auto Scaling scalable target, but you cannot tag a scaling policy or scheduled action.

You can also add tags to an Application Auto Scaling scalable target while creating it (RegisterScalableTarget).

For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

Use tags to control access to a scalable target. For more information, see Tagging support for Application Auto Scaling in the Application Auto Scaling User Guide.

" + "documentation":"

Adds or edits tags on an Application Auto Scaling scalable target.

Each tag consists of a tag key and a tag value, which are both case-sensitive strings. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value.

You can use this operation to tag an Application Auto Scaling scalable target, but you cannot tag a scaling policy or scheduled action.

You can also add tags to an Application Auto Scaling scalable target while creating it (RegisterScalableTarget).

For general information about tags, including the format and syntax, see Tagging your Amazon Web Services resources in the Amazon Web Services General Reference.

Use tags to control access to a scalable target. For more information, see Tagging support for Application Auto Scaling in the Application Auto Scaling User Guide.

" }, "UntagResource":{ "name":"UntagResource", @@ -315,11 +317,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" } } }, @@ -347,11 +349,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" } } }, @@ -374,11 +376,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" } } }, @@ -397,11 +399,11 @@ }, "ResourceIds":{ "shape":"ResourceIdsMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" }, "MaxResults":{ "shape":"MaxResults", @@ -436,11 +438,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" }, "MaxResults":{ "shape":"MaxResults", @@ -483,11 +485,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" }, "MaxResults":{ "shape":"MaxResults", @@ -526,11 +528,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" }, "MaxResults":{ "shape":"MaxResults", @@ -694,7 +696,10 @@ "NeptuneReaderAverageCPUUtilization", "SageMakerVariantProvisionedConcurrencyUtilization", "ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage", - "SageMakerInferenceComponentInvocationsPerCopy" + "SageMakerInferenceComponentInvocationsPerCopy", + "WorkSpacesAverageUserSessionsCapacityUtilization", + "SageMakerInferenceComponentConcurrentRequestsPerCopyHighResolution", + "SageMakerVariantConcurrentRequestsPerModelHighResolution" ] }, "MetricUnit":{"type":"string"}, @@ -720,7 +725,7 @@ "documentation":"

The current capacity.

" } }, - "documentation":"

Describes the reason for an activity that isn't scaled (not scaled activity), in machine-readable format. For help interpreting the not scaled reason details, see Scaling activities for Application Auto Scaling.

" + "documentation":"

Describes the reason for an activity that isn't scaled (not scaled activity), in machine-readable format. For help interpreting the not scaled reason details, see Scaling activities for Application Auto Scaling in the Application Auto Scaling User Guide.

" }, "NotScaledReasons":{ "type":"list", @@ -760,7 +765,7 @@ "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Spot Fleet or ECS service.

You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). The format of the resource label is:

app/my-alb/778d41231b141a0f/targetgroup/my-alb-target-group/943f017f100becff.

Where:

  • app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN

  • targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.

To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers API operation. To find the ARN for the target group, use the DescribeTargetGroups API operation.

" } }, - "documentation":"

Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.

Only the Amazon Web Services that you're using send metrics to Amazon CloudWatch. To determine whether a desired metric already exists by looking up its namespace and dimension using the CloudWatch metrics dashboard in the console, follow the procedure in Monitor your resources using CloudWatch in the Application Auto Scaling User Guide.

" + "documentation":"

Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.

For more information, Predefined metrics for target tracking scaling policies in the Application Auto Scaling User Guide.

" }, "PutScalingPolicyRequest":{ "type":"structure", @@ -781,11 +786,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" }, "PolicyType":{ "shape":"PolicyType", @@ -830,7 +835,7 @@ }, "Schedule":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Cron expressions are useful for scheduled actions that run periodically at a specified date and time, and rate expressions are useful for scheduled actions that run at a regular interval.

At and cron expressions use Universal Coordinated Time (UTC) by default.

The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year].

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information and examples, see Example scheduled actions for Application Auto Scaling in the Application Auto Scaling User Guide.

" + "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Cron expressions are useful for scheduled actions that run periodically at a specified date and time, and rate expressions are useful for scheduled actions that run at a regular interval.

At and cron expressions use Universal Coordinated Time (UTC) by default.

The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year].

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information, see Schedule recurring scaling actions using cron expressions in the Application Auto Scaling User Guide.

" }, "Timezone":{ "shape":"ResourceIdMaxLen1600", @@ -842,11 +847,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" }, "StartTime":{ "shape":"TimestampType", @@ -881,15 +886,15 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" }, "MinCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand. This property is required when registering a new scalable target.

For the following resources, the minimum value allowed is 0.

  • AppStream 2.0 fleets

  • Aurora DB clusters

  • ECS services

  • EMR clusters

  • Lambda provisioned concurrency

  • SageMaker endpoint variants

  • SageMaker Serverless endpoint provisioned concurrency

  • Spot Fleets

  • custom resources

It's strongly recommended that you specify a value greater than 0. A value greater than 0 means that data points are continuously reported to CloudWatch that scaling policies can use to scale on a metric like average CPU utilization.

For all other resources, the minimum allowed value depends on the type of resource that you are using. If you provide a value that is lower than what a resource can accept, an error occurs. In which case, the error message will provide the minimum value that the resource can accept.

" + "documentation":"

The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand. This property is required when registering a new scalable target.

For the following resources, the minimum value allowed is 0.

  • AppStream 2.0 fleets

  • Aurora DB clusters

  • ECS services

  • EMR clusters

  • Lambda provisioned concurrency

  • SageMaker endpoint variants

  • SageMaker inference components

  • SageMaker serverless endpoint provisioned concurrency

  • Spot Fleets

  • custom resources

It's strongly recommended that you specify a value greater than 0. A value greater than 0 means that data points are continuously reported to CloudWatch that scaling policies can use to scale on a metric like average CPU utilization.

For all other resources, the minimum allowed value depends on the type of resource that you are using. If you provide a value that is lower than what a resource can accept, an error occurs. In which case, the error message will provide the minimum value that the resource can accept.

" }, "MaxCapacity":{ "shape":"ResourceCapacity", @@ -897,11 +902,11 @@ }, "RoleARN":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which it creates if it does not yet exist. For more information, see Application Auto Scaling IAM roles.

" + "documentation":"

This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which it creates if it does not yet exist. For more information, see How Application Auto Scaling works with IAM.

" }, "SuspendedState":{ "shape":"SuspendedState", - "documentation":"

An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the specified scaling activities.

Suspension Outcomes

  • For DynamicScalingInSuspended, while a suspension is in effect, all scale-in activities that are triggered by a scaling policy are suspended.

  • For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended.

  • For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that involve scheduled actions are suspended.

For more information, see Suspending and resuming scaling in the Application Auto Scaling User Guide.

" + "documentation":"

An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the specified scaling activities.

Suspension Outcomes

  • For DynamicScalingInSuspended, while a suspension is in effect, all scale-in activities that are triggered by a scaling policy are suspended.

  • For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended.

  • For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that involve scheduled actions are suspended.

For more information, see Suspend and resume scaling in the Application Auto Scaling User Guide.

" }, "Tags":{ "shape":"TagMap", @@ -976,7 +981,8 @@ "elasticache:replication-group:Replicas", "neptune:cluster:ReadReplicaCount", "sagemaker:variant:DesiredProvisionedConcurrency", - "sagemaker:inference-component:DesiredCopyCount" + "sagemaker:inference-component:DesiredCopyCount", + "workspaces:workspacespool:DesiredUserSessions" ] }, "ScalableTarget":{ @@ -997,11 +1003,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" }, "MinCapacity":{ "shape":"ResourceCapacity", @@ -1075,11 +1081,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" }, "Description":{ "shape":"XmlString", @@ -1158,11 +1164,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" }, "PolicyType":{ "shape":"PolicyType", @@ -1213,7 +1219,7 @@ }, "Schedule":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Cron expressions are useful for scheduled actions that run periodically at a specified date and time, and rate expressions are useful for scheduled actions that run at a regular interval.

At and cron expressions use Universal Coordinated Time (UTC) by default.

The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year].

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information and examples, see Example scheduled actions for Application Auto Scaling in the Application Auto Scaling User Guide.

" + "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Cron expressions are useful for scheduled actions that run periodically at a specified date and time, and rate expressions are useful for scheduled actions that run at a regular interval.

At and cron expressions use Universal Coordinated Time (UTC) by default.

The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year].

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information, see Schedule recurring scaling actions using cron expressions in the Application Auto Scaling User Guide.

" }, "Timezone":{ "shape":"ResourceIdMaxLen1600", @@ -1221,11 +1227,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/my-cluster/my-service.

  • Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

  • Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster.

  • Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.

  • SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: inference-component/my-inference-component.

  • Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. Example: workspacespool/wspool-123456.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker Serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The task count of an ECS service.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.

  • appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

  • kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.

  • elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.

  • elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.

  • neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.

  • sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.

  • sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.

  • workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.

" }, "StartTime":{ "shape":"TimestampType", @@ -1272,7 +1278,8 @@ "cassandra", "kafka", "elasticache", - "neptune" + "neptune", + "workspaces" ] }, "StepAdjustment":{ @@ -1371,7 +1378,7 @@ }, "Tags":{ "shape":"TagMap", - "documentation":"

The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource.

Each tag consists of a tag key and a tag value.

You cannot have more than one tag on an Application Auto Scaling scalable target with the same tag key. If you specify an existing tag key with a different tag value, Application Auto Scaling replaces the current tag value with the specified one.

For information about the rules that apply to tag keys and tag values, see User-defined tag restrictions in the Amazon Web Services Billing and Cost Management User Guide.

" + "documentation":"

The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource.

Each tag consists of a tag key and a tag value.

You cannot have more than one tag on an Application Auto Scaling scalable target with the same tag key. If you specify an existing tag key with a different tag value, Application Auto Scaling replaces the current tag value with the specified one.

For information about the rules that apply to tag keys and tag values, see User-defined tag restrictions in the Amazon Web Services Billing User Guide.

" } } }, @@ -1500,7 +1507,7 @@ "documentation":"

The unit to use for the returned data points. For a complete list of the units that CloudWatch supports, see the MetricDatum data type in the Amazon CloudWatch API Reference.

" } }, - "documentation":"

This structure defines the CloudWatch metric to return, along with the statistic, period, and unit.

For more information about the CloudWatch terminology below, see Amazon CloudWatch concepts in the Amazon CloudWatch User Guide.

" + "documentation":"

This structure defines the CloudWatch metric to return, along with the statistic and unit.

For more information about the CloudWatch terminology below, see Amazon CloudWatch concepts in the Amazon CloudWatch User Guide.

" }, "TargetTrackingMetricUnit":{ "type":"string", @@ -1587,5 +1594,5 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" } }, - "documentation":"

With Application Auto Scaling, you can configure automatic scaling for the following resources:

  • Amazon AppStream 2.0 fleets

  • Amazon Aurora Replicas

  • Amazon Comprehend document classification and entity recognizer endpoints

  • Amazon DynamoDB tables and global secondary indexes throughput capacity

  • Amazon ECS services

  • Amazon ElastiCache for Redis clusters (replication groups)

  • Amazon EMR clusters

  • Amazon Keyspaces (for Apache Cassandra) tables

  • Lambda function provisioned concurrency

  • Amazon Managed Streaming for Apache Kafka broker storage

  • Amazon Neptune clusters

  • Amazon SageMaker endpoint variants

  • Amazon SageMaker Serverless endpoint provisioned concurrency

  • Amazon SageMaker inference components

  • Spot Fleets (Amazon EC2)

  • Custom resources provided by your own applications or services

To learn more about Application Auto Scaling, see the Application Auto Scaling User Guide.

API Summary

The Application Auto Scaling service API includes three key sets of actions:

  • Register and manage scalable targets - Register Amazon Web Services or custom resources as scalable targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and retrieve information on existing scalable targets.

  • Configure and manage automatic scaling - Define scaling policies to dynamically scale your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve your recent scaling activity history.

  • Suspend and resume scaling - Temporarily suspend and later resume automatic scaling by calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can suspend and resume (individually or in combination) scale-out activities that are triggered by a scaling policy, scale-in activities that are triggered by a scaling policy, and scheduled scaling.

" + "documentation":"

With Application Auto Scaling, you can configure automatic scaling for the following resources:

  • Amazon AppStream 2.0 fleets

  • Amazon Aurora Replicas

  • Amazon Comprehend document classification and entity recognizer endpoints

  • Amazon DynamoDB tables and global secondary indexes throughput capacity

  • Amazon ECS services

  • Amazon ElastiCache for Redis clusters (replication groups)

  • Amazon EMR clusters

  • Amazon Keyspaces (for Apache Cassandra) tables

  • Lambda function provisioned concurrency

  • Amazon Managed Streaming for Apache Kafka broker storage

  • Amazon Neptune clusters

  • Amazon SageMaker endpoint variants

  • Amazon SageMaker inference components

  • Amazon SageMaker serverless endpoint provisioned concurrency

  • Spot Fleets (Amazon EC2)

  • Pool of WorkSpaces

  • Custom resources provided by your own applications or services

To learn more about Application Auto Scaling, see the Application Auto Scaling User Guide.

API Summary

The Application Auto Scaling service API includes three key sets of actions:

  • Register and manage scalable targets - Register Amazon Web Services or custom resources as scalable targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and retrieve information on existing scalable targets.

  • Configure and manage automatic scaling - Define scaling policies to dynamically scale your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve your recent scaling activity history.

  • Suspend and resume scaling - Temporarily suspend and later resume automatic scaling by calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can suspend and resume (individually or in combination) scale-out activities that are triggered by a scaling policy, scale-in activities that are triggered by a scaling policy, and scheduled scaling.

" } diff --git a/botocore/data/application-signals/2024-04-15/endpoint-rule-set-1.json b/botocore/data/application-signals/2024-04-15/endpoint-rule-set-1.json new file mode 100644 index 0000000000..7ec23c86b6 --- /dev/null +++ b/botocore/data/application-signals/2024-04-15/endpoint-rule-set-1.json @@ -0,0 +1,137 @@ +{ + "version": "1.0", + "parameters": { + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://application-signals-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://application-signals.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/botocore/data/application-signals/2024-04-15/paginators-1.json b/botocore/data/application-signals/2024-04-15/paginators-1.json new file mode 100644 index 0000000000..f549f15d7f --- /dev/null +++ b/botocore/data/application-signals/2024-04-15/paginators-1.json @@ -0,0 +1,34 @@ +{ + "pagination": { + "ListServiceDependencies": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ServiceDependencies" + }, + "ListServiceDependents": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ServiceDependents" + }, + "ListServiceLevelObjectives": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "SloSummaries" + }, + "ListServiceOperations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ServiceOperations" + }, + "ListServices": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ServiceSummaries" + } + } +} diff --git a/botocore/data/application-signals/2024-04-15/paginators-1.sdk-extras.json b/botocore/data/application-signals/2024-04-15/paginators-1.sdk-extras.json new file mode 100644 index 0000000000..d6fcd51478 --- /dev/null +++ b/botocore/data/application-signals/2024-04-15/paginators-1.sdk-extras.json @@ -0,0 +1,31 @@ +{ + "version": 1.0, + "merge": { + "pagination": { + "ListServiceDependencies": { + "non_aggregate_keys": [ + "StartTime", + "EndTime" + ] + }, + "ListServiceDependents": { + "non_aggregate_keys": [ + "StartTime", + "EndTime" + ] + }, + "ListServiceOperations": { + "non_aggregate_keys": [ + "StartTime", + "EndTime" + ] + }, + "ListServices": { + "non_aggregate_keys": [ + "StartTime", + "EndTime" + ] + } + } + } + } diff --git a/botocore/data/application-signals/2024-04-15/service-2.json b/botocore/data/application-signals/2024-04-15/service-2.json new file mode 100644 index 0000000000..37a930da0f --- /dev/null +++ b/botocore/data/application-signals/2024-04-15/service-2.json @@ -0,0 +1,1824 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2024-04-15", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"application-signals", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"Amazon CloudWatch Application Signals", + "serviceId":"Application Signals", + "signatureVersion":"v4", + "signingName":"application-signals", + "uid":"application-signals-2024-04-15" + }, + "operations":{ + "BatchGetServiceLevelObjectiveBudgetReport":{ + "name":"BatchGetServiceLevelObjectiveBudgetReport", + "http":{ + "method":"POST", + "requestUri":"/budget-report", + "responseCode":200 + }, + "input":{"shape":"BatchGetServiceLevelObjectiveBudgetReportInput"}, + "output":{"shape":"BatchGetServiceLevelObjectiveBudgetReportOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Use this operation to retrieve one or more service level objective (SLO) budget reports.

An error budget is the amount of time in unhealthy periods that your service can accumulate during an interval before your overall SLO budget health is breached and the SLO is considered to be unmet. For example, an SLO with a threshold of 99.95% and a monthly interval translates to an error budget of 21.9 minutes of downtime in a 30-day month.

Budget reports include a health indicator, the attainment value, and remaining budget.

For more information about SLO error budgets, see SLO concepts.

" + }, + "CreateServiceLevelObjective":{ + "name":"CreateServiceLevelObjective", + "http":{ + "method":"POST", + "requestUri":"/slo", + "responseCode":200 + }, + "input":{"shape":"CreateServiceLevelObjectiveInput"}, + "output":{"shape":"CreateServiceLevelObjectiveOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Creates a service level objective (SLO), which can help you ensure that your critical business operations are meeting customer expectations. Use SLOs to set and track specific target levels for the reliability and availability of your applications and services. SLOs use service level indicators (SLIs) to calculate whether the application is performing at the level that you want.

Create an SLO to set a target for a service or operation’s availability or latency. CloudWatch measures this target frequently you can find whether it has been breached.

When you create an SLO, you set an attainment goal for it. An attainment goal is the ratio of good periods that meet the threshold requirements to the total periods within the interval. For example, an attainment goal of 99.9% means that within your interval, you are targeting 99.9% of the periods to be in healthy state.

After you have created an SLO, you can retrieve error budget reports for it. An error budget is the number of periods or amount of time that your service can accumulate during an interval before your overall SLO budget health is breached and the SLO is considered to be unmet. for example, an SLO with a threshold that 99.95% of requests must be completed under 2000ms every month translates to an error budget of 21.9 minutes of downtime per month.

When you call this operation, Application Signals creates the AWSServiceRoleForCloudWatchApplicationSignals service-linked role, if it doesn't already exist in your account. This service- linked role has the following permissions:

  • xray:GetServiceGraph

  • logs:StartQuery

  • logs:GetQueryResults

  • cloudwatch:GetMetricData

  • cloudwatch:ListMetrics

  • tag:GetResources

  • autoscaling:DescribeAutoScalingGroups

You can easily set SLO targets for your applications that are discovered by Application Signals, using critical metrics such as latency and availability. You can also set SLOs against any CloudWatch metric or math expression that produces a time series.

For more information about SLOs, see Service level objectives (SLOs).

" + }, + "DeleteServiceLevelObjective":{ + "name":"DeleteServiceLevelObjective", + "http":{ + "method":"DELETE", + "requestUri":"/slo/{Id}", + "responseCode":200 + }, + "input":{"shape":"DeleteServiceLevelObjectiveInput"}, + "output":{"shape":"DeleteServiceLevelObjectiveOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes the specified service level objective.

", + "idempotent":true + }, + "GetService":{ + "name":"GetService", + "http":{ + "method":"POST", + "requestUri":"/service", + "responseCode":200 + }, + "input":{"shape":"GetServiceInput"}, + "output":{"shape":"GetServiceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns information about a service discovered by Application Signals.

" + }, + "GetServiceLevelObjective":{ + "name":"GetServiceLevelObjective", + "http":{ + "method":"GET", + "requestUri":"/slo/{Id}", + "responseCode":200 + }, + "input":{"shape":"GetServiceLevelObjectiveInput"}, + "output":{"shape":"GetServiceLevelObjectiveOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns information about one SLO created in the account.

" + }, + "ListServiceDependencies":{ + "name":"ListServiceDependencies", + "http":{ + "method":"POST", + "requestUri":"/service-dependencies", + "responseCode":200 + }, + "input":{"shape":"ListServiceDependenciesInput"}, + "output":{"shape":"ListServiceDependenciesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of service dependencies of the service that you specify. A dependency is an infrastructure component that an operation of this service connects with. Dependencies can include Amazon Web Services services, Amazon Web Services resources, and third-party services.

" + }, + "ListServiceDependents":{ + "name":"ListServiceDependents", + "http":{ + "method":"POST", + "requestUri":"/service-dependents", + "responseCode":200 + }, + "input":{"shape":"ListServiceDependentsInput"}, + "output":{"shape":"ListServiceDependentsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns the list of dependents that invoked the specified service during the provided time range. Dependents include other services, CloudWatch Synthetics canaries, and clients that are instrumented with CloudWatch RUM app monitors.

" + }, + "ListServiceLevelObjectives":{ + "name":"ListServiceLevelObjectives", + "http":{ + "method":"POST", + "requestUri":"/slos", + "responseCode":200 + }, + "input":{"shape":"ListServiceLevelObjectivesInput"}, + "output":{"shape":"ListServiceLevelObjectivesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of SLOs created in this account.

" + }, + "ListServiceOperations":{ + "name":"ListServiceOperations", + "http":{ + "method":"POST", + "requestUri":"/service-operations", + "responseCode":200 + }, + "input":{"shape":"ListServiceOperationsInput"}, + "output":{"shape":"ListServiceOperationsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of the operations of this service that have been discovered by Application Signals. Only the operations that were invoked during the specified time range are returned.

" + }, + "ListServices":{ + "name":"ListServices", + "http":{ + "method":"GET", + "requestUri":"/services", + "responseCode":200 + }, + "input":{"shape":"ListServicesInput"}, + "output":{"shape":"ListServicesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of services that have been discovered by Application Signals. A service represents a minimum logical and transactional unit that completes a business function. Services are discovered through Application Signals instrumentation.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Displays the tags associated with a CloudWatch resource. Tags can be assigned to service level objectives.

" + }, + "StartDiscovery":{ + "name":"StartDiscovery", + "http":{ + "method":"POST", + "requestUri":"/start-discovery", + "responseCode":200 + }, + "input":{"shape":"StartDiscoveryInput"}, + "output":{"shape":"StartDiscoveryOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Enables this Amazon Web Services account to be able to use CloudWatch Application Signals by creating the AWSServiceRoleForCloudWatchApplicationSignals service-linked role. This service- linked role has the following permissions:

  • xray:GetServiceGraph

  • logs:StartQuery

  • logs:GetQueryResults

  • cloudwatch:GetMetricData

  • cloudwatch:ListMetrics

  • tag:GetResources

  • autoscaling:DescribeAutoScalingGroups

After completing this step, you still need to instrument your Java and Python applications to send data to Application Signals. For more information, see Enabling Application Signals.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tag-resource", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Assigns one or more tags (key-value pairs) to the specified CloudWatch resource, such as a service level objective.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

You can use the TagResource action with an alarm that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a CloudWatch resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/untag-resource", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Removes one or more tags from the specified resource.

" + }, + "UpdateServiceLevelObjective":{ + "name":"UpdateServiceLevelObjective", + "http":{ + "method":"PATCH", + "requestUri":"/slo/{Id}", + "responseCode":200 + }, + "input":{"shape":"UpdateServiceLevelObjectiveInput"}, + "output":{"shape":"UpdateServiceLevelObjectiveOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates an existing service level objective (SLO). If you omit parameters, the previous values of those parameters are retained.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ServiceErrorMessage"} + }, + "documentation":"

You don't have sufficient permissions to perform this action.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccountId":{ + "type":"string", + "max":255, + "min":1 + }, + "AmazonResourceName":{ + "type":"string", + "max":1024, + "min":1 + }, + "Attainment":{ + "type":"double", + "box":true + }, + "AttainmentGoal":{ + "type":"double", + "box":true + }, + "AttributeMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "AttributeMaps":{ + "type":"list", + "member":{"shape":"AttributeMap"} + }, + "Attributes":{ + "type":"map", + "key":{"shape":"KeyAttributeName"}, + "value":{"shape":"KeyAttributeValue"}, + "max":3, + "min":1 + }, + "BatchGetServiceLevelObjectiveBudgetReportInput":{ + "type":"structure", + "required":[ + "Timestamp", + "SloIds" + ], + "members":{ + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time that you want the report to be for. It is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.

" + }, + "SloIds":{ + "shape":"ServiceLevelObjectiveIds", + "documentation":"

An array containing the IDs of the service level objectives that you want to include in the report.

" + } + } + }, + "BatchGetServiceLevelObjectiveBudgetReportOutput":{ + "type":"structure", + "required":[ + "Timestamp", + "Reports", + "Errors" + ], + "members":{ + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time that the report is for. It is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.

" + }, + "Reports":{ + "shape":"ServiceLevelObjectiveBudgetReports", + "documentation":"

An array of structures, where each structure is one budget report.

" + }, + "Errors":{ + "shape":"ServiceLevelObjectiveBudgetReportErrors", + "documentation":"

An array of structures, where each structure includes an error indicating that one of the requests in the array was not valid.

" + } + } + }, + "BudgetSecondsRemaining":{ + "type":"integer", + "box":true + }, + "CalendarInterval":{ + "type":"structure", + "required":[ + "StartTime", + "DurationUnit", + "Duration" + ], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The date and time when you want the first interval to start. Be sure to choose a time that configures the intervals the way that you want. For example, if you want weekly intervals starting on Mondays at 6 a.m., be sure to specify a start time that is a Monday at 6 a.m.

When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

As soon as one calendar interval ends, another automatically begins.

" + }, + "DurationUnit":{ + "shape":"DurationUnit", + "documentation":"

Specifies the calendar interval unit.

" + }, + "Duration":{ + "shape":"CalendarIntervalDuration", + "documentation":"

Specifies the duration of each calendar interval. For example, if Duration is 1 and DurationUnit is MONTH, each interval is one month, aligned with the calendar.

" + } + }, + "documentation":"

If the interval for this service level objective is a calendar interval, this structure contains the interval specifications.

" + }, + "CalendarIntervalDuration":{ + "type":"integer", + "box":true, + "min":1 + }, + "ConflictException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

This operation attempted to create a resource that already exists.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateServiceLevelObjectiveInput":{ + "type":"structure", + "required":[ + "Name", + "SliConfig" + ], + "members":{ + "Name":{ + "shape":"ServiceLevelObjectiveName", + "documentation":"

A name for this SLO.

" + }, + "Description":{ + "shape":"ServiceLevelObjectiveDescription", + "documentation":"

An optional description for this SLO.

" + }, + "SliConfig":{ + "shape":"ServiceLevelIndicatorConfig", + "documentation":"

A structure that contains information about what service and what performance metric that this SLO will monitor.

" + }, + "Goal":{ + "shape":"Goal", + "documentation":"

A structure that contains the attributes that determine the goal of the SLO. This includes the time period for evaluation and the attainment threshold.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of key-value pairs to associate with the SLO. You can associate as many as 50 tags with an SLO. To be able to associate tags with the SLO when you create the SLO, you must have the cloudwatch:TagResource permission.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

" + } + } + }, + "CreateServiceLevelObjectiveOutput":{ + "type":"structure", + "required":["Slo"], + "members":{ + "Slo":{ + "shape":"ServiceLevelObjective", + "documentation":"

A structure that contains information about the SLO that you just created.

" + } + } + }, + "DeleteServiceLevelObjectiveInput":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ServiceLevelObjectiveId", + "documentation":"

The ARN or name of the service level objective to delete.

", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteServiceLevelObjectiveOutput":{ + "type":"structure", + "members":{ + } + }, + "Dimension":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"DimensionName", + "documentation":"

The name of the dimension. Dimension names must contain only ASCII characters, must include at least one non-whitespace character, and cannot start with a colon (:). ASCII control characters are not supported as part of dimension names.

" + }, + "Value":{ + "shape":"DimensionValue", + "documentation":"

The value of the dimension. Dimension values must contain only ASCII characters and must include at least one non-whitespace character. ASCII control characters are not supported as part of dimension values.

" + } + }, + "documentation":"

A dimension is a name/value pair that is part of the identity of a metric. Because dimensions are part of the unique identifier for a metric, whenever you add a unique name/value pair to one of your metrics, you are creating a new variation of that metric. For example, many Amazon EC2 metrics publish InstanceId as a dimension name, and the actual instance ID as the value for that dimension.

You can assign up to 30 dimensions to a metric.

" + }, + "DimensionName":{ + "type":"string", + "max":255, + "min":1 + }, + "DimensionValue":{ + "type":"string", + "max":1024, + "min":1 + }, + "Dimensions":{ + "type":"list", + "member":{"shape":"Dimension"}, + "max":30, + "min":0 + }, + "DurationUnit":{ + "type":"string", + "enum":[ + "DAY", + "MONTH" + ] + }, + "FaultDescription":{"type":"string"}, + "GetServiceInput":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime", + "KeyAttributes" + ], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

Your requested start time will be rounded to the nearest hour.

", + "location":"querystring", + "locationName":"StartTime" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

Your requested start time will be rounded to the nearest hour.

", + "location":"querystring", + "locationName":"EndTime" + }, + "KeyAttributes":{ + "shape":"Attributes", + "documentation":"

Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes.

This is a string-to-string map. It can include the following fields.

  • Type designates the type of object this is.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + } + } + }, + "GetServiceLevelObjectiveInput":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ServiceLevelObjectiveId", + "documentation":"

The ARN or name of the SLO that you want to retrieve information about. You can find the ARNs of SLOs by using the ListServiceLevelObjectives operation.

", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetServiceLevelObjectiveOutput":{ + "type":"structure", + "required":["Slo"], + "members":{ + "Slo":{ + "shape":"ServiceLevelObjective", + "documentation":"

A structure containing the information about the SLO.

" + } + } + }, + "GetServiceOutput":{ + "type":"structure", + "required":[ + "Service", + "StartTime", + "EndTime" + ], + "members":{ + "Service":{ + "shape":"Service", + "documentation":"

A structure containing information about the service.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the data included in the response. In a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057.

This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the data included in the response. In a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057.

This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour.

" + }, + "LogGroupReferences":{ + "shape":"LogGroupReferences", + "documentation":"

An array of string-to-string maps that each contain information about one log group associated with this service. Each string-to-string map includes the following fields:

  • \"Type\": \"AWS::Resource\"

  • \"ResourceType\": \"AWS::Logs::LogGroup\"

  • \"Identifier\": \"name-of-log-group\"

" + } + } + }, + "Goal":{ + "type":"structure", + "members":{ + "Interval":{ + "shape":"Interval", + "documentation":"

The time period used to evaluate the SLO. It can be either a calendar interval or rolling interval.

If you omit this parameter, a rolling interval of 7 days is used.

" + }, + "AttainmentGoal":{ + "shape":"AttainmentGoal", + "documentation":"

The threshold that determines if the goal is being met. An attainment goal is the ratio of good periods that meet the threshold requirements to the total periods within the interval. For example, an attainment goal of 99.9% means that within your interval, you are targeting 99.9% of the periods to be in healthy state.

If you omit this parameter, 99 is used to represent 99% as the attainment goal.

" + }, + "WarningThreshold":{ + "shape":"WarningThreshold", + "documentation":"

The percentage of remaining budget over total budget that you want to get warnings for. If you omit this parameter, the default of 50.0 is used.

" + } + }, + "documentation":"

This structure contains the attributes that determine the goal of an SLO. This includes the time period for evaluation and the attainment threshold.

" + }, + "Interval":{ + "type":"structure", + "members":{ + "RollingInterval":{ + "shape":"RollingInterval", + "documentation":"

If the interval is a rolling interval, this structure contains the interval specifications.

" + }, + "CalendarInterval":{ + "shape":"CalendarInterval", + "documentation":"

If the interval is a calendar interval, this structure contains the interval specifications.

" + } + }, + "documentation":"

The time period used to evaluate the SLO. It can be either a calendar interval or rolling interval.

", + "union":true + }, + "KeyAttributeName":{ + "type":"string", + "pattern":"[a-zA-Z]{1,50}" + }, + "KeyAttributeValue":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[ -~]*[!-~]+[ -~]*" + }, + "ListServiceDependenciesInput":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime", + "KeyAttributes" + ], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

Your requested start time will be rounded to the nearest hour.

", + "location":"querystring", + "locationName":"StartTime" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

Your requested end time will be rounded to the nearest hour.

", + "location":"querystring", + "locationName":"EndTime" + }, + "KeyAttributes":{ + "shape":"Attributes", + "documentation":"

Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes.

This is a string-to-string map. It can include the following fields.

  • Type designates the type of object this is.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + }, + "MaxResults":{ + "shape":"ListServiceDependenciesMaxResults", + "documentation":"

The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Include this value, if it was returned by the previous operation, to get the next set of service dependencies.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListServiceDependenciesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListServiceDependenciesOutput":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime", + "ServiceDependencies" + ], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour.

" + }, + "ServiceDependencies":{ + "shape":"ServiceDependencies", + "documentation":"

An array, where each object in the array contains information about one of the dependencies of this service.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Include this value in your next use of this API to get next set of service dependencies.

" + } + } + }, + "ListServiceDependentsInput":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime", + "KeyAttributes" + ], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

Your requested start time will be rounded to the nearest hour.

", + "location":"querystring", + "locationName":"StartTime" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

Your requested start time will be rounded to the nearest hour.

", + "location":"querystring", + "locationName":"EndTime" + }, + "KeyAttributes":{ + "shape":"Attributes", + "documentation":"

Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes.

This is a string-to-string map. It can include the following fields.

  • Type designates the type of object this is.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + }, + "MaxResults":{ + "shape":"ListServiceDependentsMaxResults", + "documentation":"

The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Include this value, if it was returned by the previous operation, to get the next set of service dependents.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListServiceDependentsMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListServiceDependentsOutput":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime", + "ServiceDependents" + ], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour.

" + }, + "ServiceDependents":{ + "shape":"ServiceDependents", + "documentation":"

An array, where each object in the array contains information about one of the dependents of this service.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Include this value in your next use of this API to get next set of service dependents.

" + } + } + }, + "ListServiceLevelObjectivesInput":{ + "type":"structure", + "members":{ + "KeyAttributes":{ + "shape":"Attributes", + "documentation":"

You can use this optional field to specify which services you want to retrieve SLO information for.

This is a string-to-string map. It can include the following fields.

  • Type designates the type of object this is.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + }, + "OperationName":{ + "shape":"OperationName", + "documentation":"

The name of the operation that this SLO is associated with.

", + "location":"querystring", + "locationName":"OperationName" + }, + "MaxResults":{ + "shape":"ListServiceLevelObjectivesMaxResults", + "documentation":"

The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Include this value, if it was returned by the previous operation, to get the next set of service level objectives.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListServiceLevelObjectivesMaxResults":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, + "ListServiceLevelObjectivesOutput":{ + "type":"structure", + "members":{ + "SloSummaries":{ + "shape":"ServiceLevelObjectiveSummaries", + "documentation":"

An array of structures, where each structure contains information about one SLO.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Include this value in your next use of this API to get next set of service level objectives.

" + } + } + }, + "ListServiceOperationMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListServiceOperationsInput":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime", + "KeyAttributes" + ], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

Your requested start time will be rounded to the nearest hour.

", + "location":"querystring", + "locationName":"StartTime" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

Your requested end time will be rounded to the nearest hour.

", + "location":"querystring", + "locationName":"EndTime" + }, + "KeyAttributes":{ + "shape":"Attributes", + "documentation":"

Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes.

This is a string-to-string map. It can include the following fields.

  • Type designates the type of object this is.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + }, + "MaxResults":{ + "shape":"ListServiceOperationMaxResults", + "documentation":"

The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Include this value, if it was returned by the previous operation, to get the next set of service operations.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListServiceOperationsOutput":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime", + "ServiceOperations" + ], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour.

" + }, + "ServiceOperations":{ + "shape":"ServiceOperations", + "documentation":"

An array of structures that each contain information about one operation of this service.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Include this value in your next use of this API to get next set of service operations.

" + } + } + }, + "ListServicesInput":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime" + ], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

Your requested start time will be rounded to the nearest hour.

", + "location":"querystring", + "locationName":"StartTime" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

Your requested start time will be rounded to the nearest hour.

", + "location":"querystring", + "locationName":"EndTime" + }, + "MaxResults":{ + "shape":"ListServicesMaxResults", + "documentation":"

The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Include this value, if it was returned by the previous operation, to get the next set of services.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListServicesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListServicesOutput":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime", + "ServiceSummaries" + ], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057

This displays the time that Application Signals used for the request. It might not match your request exactly, because it was rounded to the nearest hour.

" + }, + "ServiceSummaries":{ + "shape":"ServiceSummaries", + "documentation":"

An array of structures, where each structure contains some information about a service. To get complete information about a service, use GetService.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Include this value in your next use of this API to get next set of services.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the CloudWatch resource that you want to view tags for.

The ARN format of an Application Signals SLO is arn:aws:cloudwatch:Region:account-id:slo:slo-name

For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

", + "location":"querystring", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

The list of tag keys and values associated with the resource you specified.

" + } + } + }, + "LogGroupReferences":{ + "type":"list", + "member":{"shape":"Attributes"} + }, + "Metric":{ + "type":"structure", + "members":{ + "Namespace":{ + "shape":"Namespace", + "documentation":"

The namespace of the metric. For more information, see Namespaces.

" + }, + "MetricName":{ + "shape":"MetricName", + "documentation":"

The name of the metric to use.

" + }, + "Dimensions":{ + "shape":"Dimensions", + "documentation":"

An array of one or more dimensions to use to define the metric that you want to use. For more information, see Dimensions.

" + } + }, + "documentation":"

This structure defines the metric used for a service level indicator, including the metric name, namespace, and dimensions

" + }, + "MetricDataQueries":{ + "type":"list", + "member":{"shape":"MetricDataQuery"} + }, + "MetricDataQuery":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"MetricId", + "documentation":"

A short name used to tie this object to the results in the response. This Id must be unique within a MetricDataQueries array. If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the metric math expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter.

" + }, + "MetricStat":{ + "shape":"MetricStat", + "documentation":"

A metric to be used directly for the SLO, or to be used in the math expression that will be used for the SLO.

Within one MetricDataQuery object, you must specify either Expression or MetricStat but not both.

" + }, + "Expression":{ + "shape":"MetricExpression", + "documentation":"

This field can contain a metric math expression to be performed on the other metrics that you are retrieving within this MetricDataQueries structure.

A math expression can use the Id of the other metrics or queries to refer to those metrics, and can also use the Id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Within each MetricDataQuery object, you must specify either Expression or MetricStat but not both.

" + }, + "Label":{ + "shape":"MetricLabel", + "documentation":"

A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents. If the metric or expression is shown in a CloudWatch dashboard widget, the label is shown. If Label is omitted, CloudWatch generates a default.

You can put dynamic expressions into a label, so that it is more descriptive. For more information, see Using Dynamic Labels.

" + }, + "ReturnData":{ + "shape":"ReturnData", + "documentation":"

Use this only if you are using a metric math expression for the SLO. Specify true for ReturnData for only the one expression result to use as the alarm. For all other metrics and expressions in the same CreateServiceLevelObjective operation, specify ReturnData as false.

" + }, + "Period":{ + "shape":"Period", + "documentation":"

The granularity, in seconds, of the returned data points for this metric. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second.

If the StartTime parameter specifies a time stamp that is greater than 3 hours ago, you must specify the period as follows or no data points in that time range is returned:

  • Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 minute).

  • Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).

  • Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour).

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The ID of the account where this metric is located. If you are performing this operatiion in a monitoring account, use this to specify which source account to retrieve this metric from.

" + } + }, + "documentation":"

Use this structure to define a metric or metric math expression that you want to use as for a service level objective.

Each MetricDataQuery in the MetricDataQueries array specifies either a metric to retrieve, or a metric math expression to be performed on retrieved metrics. A single MetricDataQueries array can include as many as 20 MetricDataQuery structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat parameter to retrieve a metric, and as many as 10 structures that contain the Expression parameter to perform a math expression. Of those Expression structures, exactly one must have true as the value for ReturnData. The result of this expression used for the SLO.

For more information about metric math expressions, see CloudWatchUse metric math.

Within each MetricDataQuery object, you must specify either Expression or MetricStat but not both.

" + }, + "MetricExpression":{ + "type":"string", + "max":2048, + "min":1 + }, + "MetricId":{ + "type":"string", + "max":255, + "min":1 + }, + "MetricLabel":{"type":"string"}, + "MetricName":{ + "type":"string", + "max":255, + "min":1 + }, + "MetricReference":{ + "type":"structure", + "required":[ + "Namespace", + "MetricType", + "MetricName" + ], + "members":{ + "Namespace":{ + "shape":"Namespace", + "documentation":"

The namespace of the metric. For more information, see CloudWatchNamespaces.

" + }, + "MetricType":{ + "shape":"MetricType", + "documentation":"

Used to display the appropriate statistics in the CloudWatch console.

" + }, + "Dimensions":{ + "shape":"Dimensions", + "documentation":"

An array of one or more dimensions that further define the metric. For more information, see CloudWatchDimensions.

" + }, + "MetricName":{ + "shape":"MetricName", + "documentation":"

The name of the metric.

" + } + }, + "documentation":"

This structure contains information about one CloudWatch metric associated with this entity discovered by Application Signals.

" + }, + "MetricReferences":{ + "type":"list", + "member":{"shape":"MetricReference"} + }, + "MetricStat":{ + "type":"structure", + "required":[ + "Metric", + "Period", + "Stat" + ], + "members":{ + "Metric":{ + "shape":"Metric", + "documentation":"

The metric to use as the service level indicator, including the metric name, namespace, and dimensions.

" + }, + "Period":{ + "shape":"Period", + "documentation":"

The granularity, in seconds, to be used for the metric. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second.

" + }, + "Stat":{ + "shape":"Stat", + "documentation":"

The statistic to use for comparison to the threshold. It can be any CloudWatch statistic or extended statistic. For more information about statistics, see CloudWatch statistics definitions.

" + }, + "Unit":{ + "shape":"StandardUnit", + "documentation":"

If you omit Unit then all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

" + } + }, + "documentation":"

This structure defines the metric to be used as the service level indicator, along with the statistics, period, and unit.

" + }, + "MetricType":{ + "type":"string", + "pattern":"[A-Za-z0-9 -]+" + }, + "Namespace":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*[^:].*" + }, + "NextToken":{"type":"string"}, + "OperationName":{ + "type":"string", + "max":255, + "min":1 + }, + "Period":{ + "type":"integer", + "box":true, + "min":1 + }, + "ResourceId":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceId", + "Message" + ], + "members":{ + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The resource type is not valid.

" + }, + "ResourceId":{ + "shape":"ResourceId", + "documentation":"

Can't find the resource id.

" + }, + "Message":{"shape":"FaultDescription"} + }, + "documentation":"

Resource not found.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourceType":{"type":"string"}, + "ReturnData":{ + "type":"boolean", + "box":true + }, + "RollingInterval":{ + "type":"structure", + "required":[ + "DurationUnit", + "Duration" + ], + "members":{ + "DurationUnit":{ + "shape":"DurationUnit", + "documentation":"

Specifies the rolling interval unit.

" + }, + "Duration":{ + "shape":"RollingIntervalDuration", + "documentation":"

Specifies the duration of each rolling interval. For example, if Duration is 7 and DurationUnit is DAY, each rolling interval is seven days.

" + } + }, + "documentation":"

If the interval for this SLO is a rolling interval, this structure contains the interval specifications.

" + }, + "RollingIntervalDuration":{ + "type":"integer", + "box":true, + "min":1 + }, + "SLIPeriodSeconds":{ + "type":"integer", + "box":true, + "max":900, + "min":60 + }, + "Service":{ + "type":"structure", + "required":[ + "KeyAttributes", + "MetricReferences" + ], + "members":{ + "KeyAttributes":{ + "shape":"Attributes", + "documentation":"

This is a string-to-string map. It can include the following fields.

  • Type designates the type of object this is.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + }, + "AttributeMaps":{ + "shape":"AttributeMaps", + "documentation":"

This structure contains one or more string-to-string maps that help identify this service. It can include platform attributes, application attributes, and telemetry attributes.

Platform attributes contain information the service's platform.

  • PlatformType defines the hosted-in platform.

  • EKS.Cluster is the name of the Amazon EKS cluster.

  • K8s.Cluster is the name of the self-hosted Kubernetes cluster.

  • K8s.Namespace is the name of the Kubernetes namespace in either Amazon EKS or Kubernetes clusters.

  • K8s.Workload is the name of the Kubernetes workload in either Amazon EKS or Kubernetes clusters.

  • K8s.Node is the name of the Kubernetes node in either Amazon EKS or Kubernetes clusters.

  • K8s.Pod is the name of the Kubernetes pod in either Amazon EKS or Kubernetes clusters.

  • EC2.AutoScalingGroup is the name of the Amazon EC2 Auto Scaling group.

  • EC2.InstanceId is the ID of the Amazon EC2 instance.

  • Host is the name of the host, for all platform types.

Applciation attributes contain information about the application.

  • AWS.Application is the application's name in Amazon Web Services Service Catalog AppRegistry.

  • AWS.Application.ARN is the application's ARN in Amazon Web Services Service Catalog AppRegistry.

Telemetry attributes contain telemetry information.

  • Telemetry.SDK is the fingerprint of the OpenTelemetry SDK version for instrumented services.

  • Telemetry.Agent is the fingerprint of the agent used to collect and send telemetry data.

  • Telemetry.Source Specifies the point of application where the telemetry was collected or specifies what was used for the source of telemetry data.

" + }, + "MetricReferences":{ + "shape":"MetricReferences", + "documentation":"

An array of structures that each contain information about one metric associated with this service.

" + }, + "LogGroupReferences":{ + "shape":"LogGroupReferences", + "documentation":"

An array of string-to-string maps that each contain information about one log group associated with this service. Each string-to-string map includes the following fields:

  • \"Type\": \"AWS::Resource\"

  • \"ResourceType\": \"AWS::Logs::LogGroup\"

  • \"Identifier\": \"name-of-log-group\"

" + } + }, + "documentation":"

This structure contains information about one of your services that was discovered by Application Signals.

" + }, + "ServiceDependencies":{ + "type":"list", + "member":{"shape":"ServiceDependency"}, + "max":100, + "min":0 + }, + "ServiceDependency":{ + "type":"structure", + "required":[ + "OperationName", + "DependencyKeyAttributes", + "DependencyOperationName", + "MetricReferences" + ], + "members":{ + "OperationName":{ + "shape":"OperationName", + "documentation":"

The name of the operation in this service that calls the dependency.

" + }, + "DependencyKeyAttributes":{ + "shape":"Attributes", + "documentation":"

This is a string-to-string map. It can include the following fields.

  • Type designates the type of object this is.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + }, + "DependencyOperationName":{ + "shape":"OperationName", + "documentation":"

The name of the called operation in the dependency.

" + }, + "MetricReferences":{ + "shape":"MetricReferences", + "documentation":"

An array of structures that each contain information about one metric associated with this service dependency that was discovered by Application Signals.

" + } + }, + "documentation":"

This structure contains information about one dependency of this service.

" + }, + "ServiceDependent":{ + "type":"structure", + "required":[ + "DependentKeyAttributes", + "MetricReferences" + ], + "members":{ + "OperationName":{ + "shape":"OperationName", + "documentation":"

If the invoked entity is an operation on an entity, the name of that dependent operation is displayed here.

" + }, + "DependentKeyAttributes":{ + "shape":"Attributes", + "documentation":"

This is a string-to-string map. It can include the following fields.

  • Type designates the type of object this is.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + }, + "DependentOperationName":{ + "shape":"OperationName", + "documentation":"

If the dependent invoker was a service that invoked it from an operation, the name of that dependent operation is displayed here.

" + }, + "MetricReferences":{ + "shape":"MetricReferences", + "documentation":"

An array of structures that each contain information about one metric associated with this service dependent that was discovered by Application Signals.

" + } + }, + "documentation":"

This structure contains information about a service dependent that was discovered by Application Signals. A dependent is an entity that invoked the specified service during the provided time range. Dependents include other services, CloudWatch Synthetics canaries, and clients that are instrumented with CloudWatch RUM app monitors.

" + }, + "ServiceDependents":{ + "type":"list", + "member":{"shape":"ServiceDependent"}, + "max":100, + "min":0 + }, + "ServiceErrorMessage":{"type":"string"}, + "ServiceLevelIndicator":{ + "type":"structure", + "required":[ + "SliMetric", + "MetricThreshold", + "ComparisonOperator" + ], + "members":{ + "SliMetric":{ + "shape":"ServiceLevelIndicatorMetric", + "documentation":"

A structure that contains information about the metric that the SLO monitors.

" + }, + "MetricThreshold":{ + "shape":"ServiceLevelIndicatorMetricThreshold", + "documentation":"

The value that the SLI metric is compared to.

" + }, + "ComparisonOperator":{ + "shape":"ServiceLevelIndicatorComparisonOperator", + "documentation":"

The arithmetic operation used when comparing the specified metric to the threshold.

" + } + }, + "documentation":"

This structure contains information about the performance metric that an SLO monitors.

" + }, + "ServiceLevelIndicatorComparisonOperator":{ + "type":"string", + "enum":[ + "GreaterThanOrEqualTo", + "GreaterThan", + "LessThan", + "LessThanOrEqualTo" + ] + }, + "ServiceLevelIndicatorConfig":{ + "type":"structure", + "required":[ + "SliMetricConfig", + "MetricThreshold", + "ComparisonOperator" + ], + "members":{ + "SliMetricConfig":{ + "shape":"ServiceLevelIndicatorMetricConfig", + "documentation":"

Use this structure to specify the metric to be used for the SLO.

" + }, + "MetricThreshold":{ + "shape":"ServiceLevelIndicatorMetricThreshold", + "documentation":"

The value that the SLI metric is compared to.

" + }, + "ComparisonOperator":{ + "shape":"ServiceLevelIndicatorComparisonOperator", + "documentation":"

The arithmetic operation to use when comparing the specified metric to the threshold.

" + } + }, + "documentation":"

This structure specifies the information about the service and the performance metric that an SLO is to monitor.

" + }, + "ServiceLevelIndicatorMetric":{ + "type":"structure", + "required":["MetricDataQueries"], + "members":{ + "KeyAttributes":{ + "shape":"Attributes", + "documentation":"

This is a string-to-string map that contains information about the type of object that this SLO is related to. It can include the following fields.

  • Type designates the type of object that this SLO is related to.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + }, + "OperationName":{ + "shape":"OperationName", + "documentation":"

If the SLO monitors a specific operation of the service, this field displays that operation name.

" + }, + "MetricType":{ + "shape":"ServiceLevelIndicatorMetricType", + "documentation":"

If the SLO monitors either the LATENCY or AVAILABILITY metric that Application Signals collects, this field displays which of those metrics is used.

" + }, + "MetricDataQueries":{ + "shape":"MetricDataQueries", + "documentation":"

If this SLO monitors a CloudWatch metric or the result of a CloudWatch metric math expression, this structure includes the information about that metric or expression.

" + } + }, + "documentation":"

This structure contains the information about the metric that is used for the SLO.

" + }, + "ServiceLevelIndicatorMetricConfig":{ + "type":"structure", + "members":{ + "KeyAttributes":{ + "shape":"Attributes", + "documentation":"

If this SLO is related to a metric collected by Application Signals, you must use this field to specify which service the SLO metric is related to. To do so, you must specify at least the Type, Name, and Environment attributes.

This is a string-to-string map. It can include the following fields.

  • Type designates the type of object this is.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + }, + "OperationName":{ + "shape":"OperationName", + "documentation":"

If the SLO is to monitor a specific operation of the service, use this field to specify the name of that operation.

" + }, + "MetricType":{ + "shape":"ServiceLevelIndicatorMetricType", + "documentation":"

If the SLO is to monitor either the LATENCY or AVAILABILITY metric that Application Signals collects, use this field to specify which of those metrics is used.

" + }, + "Statistic":{ + "shape":"ServiceLevelIndicatorStatistic", + "documentation":"

The statistic to use for comparison to the threshold. It can be any CloudWatch statistic or extended statistic. For more information about statistics, see CloudWatch statistics definitions.

" + }, + "PeriodSeconds":{ + "shape":"SLIPeriodSeconds", + "documentation":"

The number of seconds to use as the period for SLO evaluation. Your application's performance is compared to the SLI during each period. For each period, the application is determined to have either achieved or not achieved the necessary performance.

" + }, + "MetricDataQueries":{ + "shape":"MetricDataQueries", + "documentation":"

If this SLO monitors a CloudWatch metric or the result of a CloudWatch metric math expression, use this structure to specify that metric or expression.

" + } + }, + "documentation":"

Use this structure to specify the information for the metric that the SLO will monitor.

" + }, + "ServiceLevelIndicatorMetricThreshold":{ + "type":"double", + "box":true + }, + "ServiceLevelIndicatorMetricType":{ + "type":"string", + "enum":[ + "LATENCY", + "AVAILABILITY" + ] + }, + "ServiceLevelIndicatorStatistic":{ + "type":"string", + "max":20, + "min":1, + "pattern":"[a-zA-Z0-9.]+" + }, + "ServiceLevelObjective":{ + "type":"structure", + "required":[ + "Arn", + "Name", + "CreatedTime", + "LastUpdatedTime", + "Sli", + "Goal" + ], + "members":{ + "Arn":{ + "shape":"ServiceLevelObjectiveArn", + "documentation":"

The ARN of this SLO.

" + }, + "Name":{ + "shape":"ServiceLevelObjectiveName", + "documentation":"

The name of this SLO.

" + }, + "Description":{ + "shape":"ServiceLevelObjectiveDescription", + "documentation":"

The description that you created for this SLO.

" + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that this SLO was created. When used in a raw HTTP Query API, it is formatted as yyyy-MM-dd'T'HH:mm:ss. For example, 2019-07-01T23:59:59.

" + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

The time that this SLO was most recently updated. When used in a raw HTTP Query API, it is formatted as yyyy-MM-dd'T'HH:mm:ss. For example, 2019-07-01T23:59:59.

" + }, + "Sli":{ + "shape":"ServiceLevelIndicator", + "documentation":"

A structure containing information about the performance metric that this SLO monitors.

" + }, + "Goal":{"shape":"Goal"} + }, + "documentation":"

A structure containing information about one service level objective (SLO) that has been created in Application Signals. Creating SLOs can help you ensure your services are performing to the level that you expect. SLOs help you set and track a specific target level for the reliability and availability of your applications and services. Each SLO uses a service level indicator (SLI), which is a key performance metric, to calculate how much underperformance can be tolerated before the goal that you set for the SLO is not achieved.

" + }, + "ServiceLevelObjectiveArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:aws:application-signals:[^:]*:[^:]*:slo/[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]" + }, + "ServiceLevelObjectiveBudgetReport":{ + "type":"structure", + "required":[ + "Arn", + "Name", + "BudgetStatus" + ], + "members":{ + "Arn":{ + "shape":"ServiceLevelObjectiveArn", + "documentation":"

The ARN of the SLO that this report is for.

" + }, + "Name":{ + "shape":"ServiceLevelObjectiveName", + "documentation":"

The name of the SLO that this report is for.

" + }, + "BudgetStatus":{ + "shape":"ServiceLevelObjectiveBudgetStatus", + "documentation":"

The status of this SLO, as it relates to the error budget for the entire time interval.

  • OK means that the SLO had remaining budget above the warning threshold, as of the time that you specified in TimeStamp.

  • WARNING means that the SLO's remaining budget was below the warning threshold, as of the time that you specified in TimeStamp.

  • BREACHED means that the SLO's budget was exhausted, as of the time that you specified in TimeStamp.

  • INSUFFICIENT_DATA means that the specifed start and end times were before the SLO was created, or that attainment data is missing.

" + }, + "Attainment":{ + "shape":"Attainment", + "documentation":"

A number between 0 and 100 that represents the percentage of time periods that the service has attained the SLO's attainment goal, as of the time of the request.

" + }, + "TotalBudgetSeconds":{ + "shape":"TotalBudgetSeconds", + "documentation":"

The total number of seconds in the error budget for the interval.

" + }, + "BudgetSecondsRemaining":{ + "shape":"BudgetSecondsRemaining", + "documentation":"

The budget amount remaining before the SLO status becomes BREACHING, at the time specified in the Timestemp parameter of the request. If this value is negative, then the SLO is already in BREACHING status.

" + }, + "Sli":{ + "shape":"ServiceLevelIndicator", + "documentation":"

A structure that contains information about the performance metric that this SLO monitors.

" + }, + "Goal":{"shape":"Goal"} + }, + "documentation":"

A structure containing an SLO budget report that you have requested.

" + }, + "ServiceLevelObjectiveBudgetReportError":{ + "type":"structure", + "required":[ + "Name", + "Arn", + "ErrorCode", + "ErrorMessage" + ], + "members":{ + "Name":{ + "shape":"ServiceLevelObjectiveName", + "documentation":"

The name of the SLO that this error is related to.

" + }, + "Arn":{ + "shape":"ServiceLevelObjectiveArn", + "documentation":"

The ARN of the SLO that this error is related to.

" + }, + "ErrorCode":{ + "shape":"ServiceLevelObjectiveBudgetReportErrorCode", + "documentation":"

The error code for this error.

" + }, + "ErrorMessage":{ + "shape":"ServiceLevelObjectiveBudgetReportErrorMessage", + "documentation":"

The message for this error.

" + } + }, + "documentation":"

A structure containing information about one error that occurred during a BatchGetServiceLevelObjectiveBudgetReport operation.

" + }, + "ServiceLevelObjectiveBudgetReportErrorCode":{"type":"string"}, + "ServiceLevelObjectiveBudgetReportErrorMessage":{"type":"string"}, + "ServiceLevelObjectiveBudgetReportErrors":{ + "type":"list", + "member":{"shape":"ServiceLevelObjectiveBudgetReportError"}, + "max":50, + "min":0 + }, + "ServiceLevelObjectiveBudgetReports":{ + "type":"list", + "member":{"shape":"ServiceLevelObjectiveBudgetReport"}, + "max":50, + "min":0 + }, + "ServiceLevelObjectiveBudgetStatus":{ + "type":"string", + "enum":[ + "OK", + "WARNING", + "BREACHED", + "INSUFFICIENT_DATA" + ] + }, + "ServiceLevelObjectiveDescription":{ + "type":"string", + "max":1024, + "min":1 + }, + "ServiceLevelObjectiveId":{ + "type":"string", + "pattern":"[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$|^arn:aws:application-signals:[^:]*:[^:]*:slo/[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]" + }, + "ServiceLevelObjectiveIds":{ + "type":"list", + "member":{"shape":"String"}, + "max":50, + "min":1 + }, + "ServiceLevelObjectiveName":{ + "type":"string", + "pattern":"[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]" + }, + "ServiceLevelObjectiveSummaries":{ + "type":"list", + "member":{"shape":"ServiceLevelObjectiveSummary"} + }, + "ServiceLevelObjectiveSummary":{ + "type":"structure", + "required":[ + "Arn", + "Name" + ], + "members":{ + "Arn":{ + "shape":"ServiceLevelObjectiveArn", + "documentation":"

The ARN of this service level objective.

" + }, + "Name":{ + "shape":"ServiceLevelObjectiveName", + "documentation":"

The name of the service level objective.

" + }, + "KeyAttributes":{ + "shape":"Attributes", + "documentation":"

This is a string-to-string map. It can include the following fields.

  • Type designates the type of object this service level objective is for.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + }, + "OperationName":{ + "shape":"OperationName", + "documentation":"

If this service level objective is specific to a single operation, this field displays the name of that operation.

" + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that this service level objective was created. It is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.

" + } + }, + "documentation":"

A structure that contains information about one service level objective (SLO) created in Application Signals.

" + }, + "ServiceOperation":{ + "type":"structure", + "required":[ + "Name", + "MetricReferences" + ], + "members":{ + "Name":{ + "shape":"OperationName", + "documentation":"

The name of the operation, discovered by Application Signals.

" + }, + "MetricReferences":{ + "shape":"MetricReferences", + "documentation":"

An array of structures that each contain information about one metric associated with this service operation that was discovered by Application Signals.

" + } + }, + "documentation":"

This structure contains information about an operation discovered by Application Signals. An operation is a specific function performed by a service that was discovered by Application Signals, and is often an API that is called by an upstream dependent.

" + }, + "ServiceOperations":{ + "type":"list", + "member":{"shape":"ServiceOperation"}, + "max":100, + "min":0 + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

This request exceeds a service quota.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "ServiceSummaries":{ + "type":"list", + "member":{"shape":"ServiceSummary"} + }, + "ServiceSummary":{ + "type":"structure", + "required":[ + "KeyAttributes", + "MetricReferences" + ], + "members":{ + "KeyAttributes":{ + "shape":"Attributes", + "documentation":"

This is a string-to-string map that help identify the objects discovered by Application Signals. It can include the following fields.

  • Type designates the type of object this is.

  • ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource.

  • Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service.

  • Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource.

  • Environment specifies the location where this object is hosted, or what it belongs to.

" + }, + "AttributeMaps":{ + "shape":"AttributeMaps", + "documentation":"

This structure contains one or more string-to-string maps that help identify this service. It can include platform attributes, application attributes, and telemetry attributes.

Platform attributes contain information the service's platform.

  • PlatformType defines the hosted-in platform.

  • EKS.Cluster is the name of the Amazon EKS cluster.

  • K8s.Cluster is the name of the self-hosted Kubernetes cluster.

  • K8s.Namespace is the name of the Kubernetes namespace in either Amazon EKS or Kubernetes clusters.

  • K8s.Workload is the name of the Kubernetes workload in either Amazon EKS or Kubernetes clusters.

  • K8s.Node is the name of the Kubernetes node in either Amazon EKS or Kubernetes clusters.

  • K8s.Pod is the name of the Kubernetes pod in either Amazon EKS or Kubernetes clusters.

  • EC2.AutoScalingGroup is the name of the Amazon EC2 Auto Scaling group.

  • EC2.InstanceId is the ID of the Amazon EC2 instance.

  • Host is the name of the host, for all platform types.

Applciation attributes contain information about the application.

  • AWS.Application is the application's name in Amazon Web Services Service Catalog AppRegistry.

  • AWS.Application.ARN is the application's ARN in Amazon Web Services Service Catalog AppRegistry.

Telemetry attributes contain telemetry information.

  • Telemetry.SDK is the fingerprint of the OpenTelemetry SDK version for instrumented services.

  • Telemetry.Agent is the fingerprint of the agent used to collect and send telemetry data.

  • Telemetry.Source Specifies the point of application where the telemetry was collected or specifies what was used for the source of telemetry data.

" + }, + "MetricReferences":{ + "shape":"MetricReferences", + "documentation":"

An array of structures that each contain information about one metric associated with this service.

" + } + }, + "documentation":"

This structure contains information about one of your services that was discoverd by Application Signals

" + }, + "StandardUnit":{ + "type":"string", + "enum":[ + "Microseconds", + "Milliseconds", + "Seconds", + "Bytes", + "Kilobytes", + "Megabytes", + "Gigabytes", + "Terabytes", + "Bits", + "Kilobits", + "Megabits", + "Gigabits", + "Terabits", + "Percent", + "Count", + "Bytes/Second", + "Kilobytes/Second", + "Megabytes/Second", + "Gigabytes/Second", + "Terabytes/Second", + "Bits/Second", + "Kilobits/Second", + "Megabits/Second", + "Gigabits/Second", + "Terabits/Second", + "Count/Second", + "None" + ] + }, + "StartDiscoveryInput":{ + "type":"structure", + "members":{ + } + }, + "StartDiscoveryOutput":{ + "type":"structure", + "members":{ + } + }, + "Stat":{"type":"string"}, + "String":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

A string that you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value for the specified tag key.

" + } + }, + "documentation":"

A key-value pair associated with a resource. Tags can help you organize and categorize your resources.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the CloudWatch resource that you want to set tags for.

The ARN format of an Application Signals SLO is arn:aws:cloudwatch:Region:account-id:slo:slo-name

For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The list of key-value pairs to associate with the alarm.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The request was throttled because of quota limits.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "TotalBudgetSeconds":{ + "type":"integer", + "box":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the CloudWatch resource that you want to delete tags from.

The ARN format of an Application Signals SLO is arn:aws:cloudwatch:Region:account-id:slo:slo-name

For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The list of tag keys to remove from the resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateServiceLevelObjectiveInput":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ServiceLevelObjectiveId", + "documentation":"

The Amazon Resource Name (ARN) or name of the service level objective that you want to update.

", + "location":"uri", + "locationName":"Id" + }, + "Description":{ + "shape":"ServiceLevelObjectiveDescription", + "documentation":"

An optional description for the SLO.

" + }, + "SliConfig":{ + "shape":"ServiceLevelIndicatorConfig", + "documentation":"

A structure that contains information about what performance metric this SLO will monitor.

" + }, + "Goal":{ + "shape":"Goal", + "documentation":"

A structure that contains the attributes that determine the goal of the SLO. This includes the time period for evaluation and the attainment threshold.

" + } + } + }, + "UpdateServiceLevelObjectiveOutput":{ + "type":"structure", + "required":["Slo"], + "members":{ + "Slo":{ + "shape":"ServiceLevelObjective", + "documentation":"

A structure that contains information about the SLO that you just updated.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ValidationExceptionMessage"} + }, + "documentation":"

The resource is not valid.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionMessage":{"type":"string"}, + "WarningThreshold":{ + "type":"double", + "box":true + } + }, + "documentation":"

Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. The application-centric view provides you with unified visibility across your applications, services, and dependencies, so you can proactively monitor and efficiently triage any issues that may arise, ensuring optimal customer experience.

Application Signals provides the following benefits:

  • Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors.

  • Create and monitor service level objectives (SLOs).

  • See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity.

Application Signals works with CloudWatch RUM, CloudWatch Synthetics canaries, and Amazon Web Services Service Catalog AppRegistry, to display your client pages, Synthetics canaries, and application names within dashboards and maps.

" +} diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index befae56f04..7e7b9238fc 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"appstream2", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon AppStream", "serviceId":"AppStream", "signatureVersion":"v4", "signingName":"appstream", "targetPrefix":"PhotonAdminProxyService", - "uid":"appstream-2016-12-01" + "uid":"appstream-2016-12-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateAppBlockBuilderAppBlock":{ @@ -327,6 +329,24 @@ ], "documentation":"

Creates a temporary URL to start an AppStream 2.0 streaming session for the specified user. A streaming URL enables application streaming to be tested without user setup.

" }, + "CreateThemeForStack":{ + "name":"CreateThemeForStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateThemeForStackRequest"}, + "output":{"shape":"CreateThemeForStackResult"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidAccountStatusException"}, + {"shape":"OperationNotPermittedException"} + ], + "documentation":"

Creates custom branding that customizes the appearance of the streaming application catalog page.

" + }, "CreateUpdatedImage":{ "name":"CreateUpdatedImage", "http":{ @@ -532,6 +552,21 @@ ], "documentation":"

Deletes the specified stack. After the stack is deleted, the application streaming environment provided by the stack is no longer available to users. Also, any reservations made for application streaming sessions for the stack are released.

" }, + "DeleteThemeForStack":{ + "name":"DeleteThemeForStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteThemeForStackRequest"}, + "output":{"shape":"DeleteThemeForStackResult"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationNotPermittedException"} + ], + "documentation":"

Deletes custom branding that customizes the appearance of the streaming application catalog page.

" + }, "DeleteUsageReportSubscription":{ "name":"DeleteUsageReportSubscription", "http":{ @@ -736,6 +771,20 @@ ], "documentation":"

Retrieves a list that describes one or more specified stacks, if the stack names are provided. Otherwise, all stacks in the account are described.

" }, + "DescribeThemeForStack":{ + "name":"DescribeThemeForStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeThemeForStackRequest"}, + "output":{"shape":"DescribeThemeForStackResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationNotPermittedException"} + ], + "documentation":"

Retrieves a list that describes the theme for a specified stack. A theme is custom branding that customizes the appearance of the streaming application catalog page.

" + }, "DescribeUsageReportSubscriptions":{ "name":"DescribeUsageReportSubscriptions", "http":{ @@ -1183,6 +1232,24 @@ {"shape":"ConcurrentModificationException"} ], "documentation":"

Updates the specified fields for the specified stack.

" + }, + "UpdateThemeForStack":{ + "name":"UpdateThemeForStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateThemeForStackRequest"}, + "output":{"shape":"UpdateThemeForStackResult"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidAccountStatusException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"OperationNotPermittedException"} + ], + "documentation":"

Updates custom branding that customizes the appearance of the streaming application catalog page.

" } }, "shapes":{ @@ -2169,7 +2236,7 @@ }, "InstanceType":{ "shape":"String", - "documentation":"

The instance type to use when launching fleet instances. The following instance types are available:

  • stream.standard.small

  • stream.standard.medium

  • stream.standard.large

  • stream.standard.xlarge

  • stream.standard.2xlarge

  • stream.compute.large

  • stream.compute.xlarge

  • stream.compute.2xlarge

  • stream.compute.4xlarge

  • stream.compute.8xlarge

  • stream.memory.large

  • stream.memory.xlarge

  • stream.memory.2xlarge

  • stream.memory.4xlarge

  • stream.memory.8xlarge

  • stream.memory.z1d.large

  • stream.memory.z1d.xlarge

  • stream.memory.z1d.2xlarge

  • stream.memory.z1d.3xlarge

  • stream.memory.z1d.6xlarge

  • stream.memory.z1d.12xlarge

  • stream.graphics-design.large

  • stream.graphics-design.xlarge

  • stream.graphics-design.2xlarge

  • stream.graphics-design.4xlarge

  • stream.graphics-desktop.2xlarge

  • stream.graphics.g4dn.xlarge

  • stream.graphics.g4dn.2xlarge

  • stream.graphics.g4dn.4xlarge

  • stream.graphics.g4dn.8xlarge

  • stream.graphics.g4dn.12xlarge

  • stream.graphics.g4dn.16xlarge

  • stream.graphics-pro.4xlarge

  • stream.graphics-pro.8xlarge

  • stream.graphics-pro.16xlarge

The following instance types are available for Elastic fleets:

  • stream.standard.small

  • stream.standard.medium

  • stream.standard.large

  • stream.standard.xlarge

  • stream.standard.2xlarge

" + "documentation":"

The instance type to use when launching fleet instances. The following instance types are available:

  • stream.standard.small

  • stream.standard.medium

  • stream.standard.large

  • stream.standard.xlarge

  • stream.standard.2xlarge

  • stream.compute.large

  • stream.compute.xlarge

  • stream.compute.2xlarge

  • stream.compute.4xlarge

  • stream.compute.8xlarge

  • stream.memory.large

  • stream.memory.xlarge

  • stream.memory.2xlarge

  • stream.memory.4xlarge

  • stream.memory.8xlarge

  • stream.memory.z1d.large

  • stream.memory.z1d.xlarge

  • stream.memory.z1d.2xlarge

  • stream.memory.z1d.3xlarge

  • stream.memory.z1d.6xlarge

  • stream.memory.z1d.12xlarge

  • stream.graphics-design.large

  • stream.graphics-design.xlarge

  • stream.graphics-design.2xlarge

  • stream.graphics-design.4xlarge

  • stream.graphics-desktop.2xlarge

  • stream.graphics.g4dn.xlarge

  • stream.graphics.g4dn.2xlarge

  • stream.graphics.g4dn.4xlarge

  • stream.graphics.g4dn.8xlarge

  • stream.graphics.g4dn.12xlarge

  • stream.graphics.g4dn.16xlarge

  • stream.graphics.g5.xlarge

  • stream.graphics.g5.2xlarge

  • stream.graphics.g5.4xlarge

  • stream.graphics.g5.8xlarge

  • stream.graphics.g5.12xlarge

  • stream.graphics.g5.16xlarge

  • stream.graphics.g5.24xlarge

  • stream.graphics-pro.4xlarge

  • stream.graphics-pro.8xlarge

  • stream.graphics-pro.16xlarge

The following instance types are available for Elastic fleets:

  • stream.standard.small

  • stream.standard.medium

  • stream.standard.large

  • stream.standard.xlarge

  • stream.standard.2xlarge

" }, "FleetType":{ "shape":"FleetType", @@ -2189,7 +2256,7 @@ }, "DisconnectTimeoutInSeconds":{ "shape":"Integer", - "documentation":"

The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

Specify a value between 60 and 360000.

" + "documentation":"

The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

Specify a value between 60 and 36000.

" }, "Description":{ "shape":"Description", @@ -2213,7 +2280,7 @@ }, "IdleDisconnectTimeoutInSeconds":{ "shape":"Integer", - "documentation":"

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

" + "documentation":"

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

" }, "IamRoleArn":{ "shape":"Arn", @@ -2461,6 +2528,51 @@ } } }, + "CreateThemeForStackRequest":{ + "type":"structure", + "required":[ + "StackName", + "TitleText", + "ThemeStyling", + "OrganizationLogoS3Location", + "FaviconS3Location" + ], + "members":{ + "StackName":{ + "shape":"Name", + "documentation":"

The name of the stack for the theme.

" + }, + "FooterLinks":{ + "shape":"ThemeFooterLinks", + "documentation":"

The links that are displayed in the footer of the streaming application catalog page. These links are helpful resources for users, such as the organization's IT support and product marketing sites.

" + }, + "TitleText":{ + "shape":"ThemeTitleText", + "documentation":"

The title that is displayed at the top of the browser tab during users' application streaming sessions.

" + }, + "ThemeStyling":{ + "shape":"ThemeStyling", + "documentation":"

The color theme that is applied to website links, text, and buttons. These colors are also applied as accents in the background for the streaming application catalog page.

" + }, + "OrganizationLogoS3Location":{ + "shape":"S3Location", + "documentation":"

The organization logo that appears on the streaming application catalog page.

" + }, + "FaviconS3Location":{ + "shape":"S3Location", + "documentation":"

The S3 location of the favicon. The favicon enables users to recognize their application streaming site in a browser full of tabs or bookmarks. It is displayed at the top of the browser tab for the application streaming site during users' streaming sessions.

" + } + } + }, + "CreateThemeForStackResult":{ + "type":"structure", + "members":{ + "Theme":{ + "shape":"Theme", + "documentation":"

The theme object that contains the metadata of the custom branding.

" + } + } + }, "CreateUpdatedImageRequest":{ "type":"structure", "required":[ @@ -2728,6 +2840,21 @@ "members":{ } }, + "DeleteThemeForStackRequest":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{ + "shape":"Name", + "documentation":"

The name of the stack for the theme.

" + } + } + }, + "DeleteThemeForStackResult":{ + "type":"structure", + "members":{ + } + }, "DeleteUsageReportSubscriptionRequest":{ "type":"structure", "members":{ @@ -3198,6 +3325,25 @@ } } }, + "DescribeThemeForStackRequest":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{ + "shape":"Name", + "documentation":"

The name of the stack for the theme.

" + } + } + }, + "DescribeThemeForStackResult":{ + "type":"structure", + "members":{ + "Theme":{ + "shape":"Theme", + "documentation":"

The theme object that contains the metadata of the custom branding.

" + } + } + }, "DescribeUsageReportSubscriptionsRequest":{ "type":"structure", "members":{ @@ -3477,6 +3623,13 @@ "member":{"shape":"Domain"}, "max":50 }, + "DynamicAppProvidersEnabled":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "EmbedHostDomain":{ "type":"string", "documentation":"Specifies a valid domain that can embed AppStream. Valid examples include: [\"testorigin.tt--com\", \"testingorigin.com.us\", \"test.com.us\"] Invalid examples include: [\"test,com\", \".com\", \"h*llo.com\". \"\"]", @@ -3702,7 +3855,7 @@ }, "DisconnectTimeoutInSeconds":{ "shape":"Integer", - "documentation":"

The amount of time that a streaming session remains active after users disconnect. If they try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

Specify a value between 60 and 360000.

" + "documentation":"

The amount of time that a streaming session remains active after users disconnect. If they try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

Specify a value between 60 and 36000.

" }, "State":{ "shape":"FleetState", @@ -3730,7 +3883,7 @@ }, "IdleDisconnectTimeoutInSeconds":{ "shape":"Integer", - "documentation":"

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

" + "documentation":"

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

" }, "IamRoleArn":{ "shape":"Arn", @@ -3927,6 +4080,22 @@ "ImageErrors":{ "shape":"ResourceErrors", "documentation":"

Describes the errors that are returned when a new image can't be created.

" + }, + "LatestAppstreamAgentVersion":{ + "shape":"LatestAppstreamAgentVersion", + "documentation":"

Indicates whether the image is using the latest AppStream 2.0 agent version or not.

" + }, + "SupportedInstanceFamilies":{ + "shape":"StringList", + "documentation":"

The supported instances families that determine which image a customer can use when the customer launches a fleet or image builder. The following instances families are supported:

  • General Purpose

  • Compute Optimized

  • Memory Optimized

  • Graphics

  • Graphics Design

  • Graphics Pro

  • Graphics G4

  • Graphics G5

" + }, + "DynamicAppProvidersEnabled":{ + "shape":"DynamicAppProvidersEnabled", + "documentation":"

Indicates whether dynamic app providers are enabled within an AppStream 2.0 image or not.

" + }, + "ImageSharedWithOthers":{ + "shape":"ImageSharedWithOthers", + "documentation":"

Indicates whether the image is shared with another account ID.

" } }, "documentation":"

Describes an image.

" @@ -4003,6 +4172,10 @@ "AccessEndpoints":{ "shape":"AccessEndpointList", "documentation":"

The list of virtual private cloud (VPC) interface endpoint objects. Administrators can connect to the image builder only through the specified endpoints.

" + }, + "LatestAppstreamAgentVersion":{ + "shape":"LatestAppstreamAgentVersion", + "documentation":"

Indicates whether the image builder is using the latest AppStream 2.0 agent version or not.

" } }, "documentation":"

Describes a virtual machine that is used to create an image.

" @@ -4066,6 +4239,13 @@ }, "documentation":"

Describes the permissions for an image.

" }, + "ImageSharedWithOthers":{ + "type":"string", + "enum":[ + "TRUE", + "FALSE" + ] + }, "ImageState":{ "type":"string", "enum":[ @@ -4151,6 +4331,13 @@ "type":"list", "member":{"shape":"LastReportGenerationExecutionError"} }, + "LatestAppstreamAgentVersion":{ + "type":"string", + "enum":[ + "TRUE", + "FALSE" + ] + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -4344,7 +4531,8 @@ "WINDOWS_SERVER_2016", "WINDOWS_SERVER_2019", "WINDOWS_SERVER_2022", - "AMAZON_LINUX2" + "AMAZON_LINUX2", + "RHEL8" ] }, "Platforms":{ @@ -4952,6 +5140,103 @@ "max":50, "min":1 }, + "Theme":{ + "type":"structure", + "members":{ + "StackName":{ + "shape":"Name", + "documentation":"

The stack that has the custom branding theme.

" + }, + "State":{ + "shape":"ThemeState", + "documentation":"

The state of the theme.

" + }, + "ThemeTitleText":{ + "shape":"ThemeTitleText", + "documentation":"

The browser tab page title.

" + }, + "ThemeStyling":{ + "shape":"ThemeStyling", + "documentation":"

The color that is used for the website links, text, buttons, and catalog page background.

" + }, + "ThemeFooterLinks":{ + "shape":"ThemeFooterLinks", + "documentation":"

The website links that display in the catalog page footer.

" + }, + "ThemeOrganizationLogoURL":{ + "shape":"String", + "documentation":"

The URL of the logo that displays in the catalog page header.

" + }, + "ThemeFaviconURL":{ + "shape":"String", + "documentation":"

The URL of the icon that displays at the top of a user's browser tab during streaming sessions.

" + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

The time the theme was created.

" + } + }, + "documentation":"

The custom branding theme, which might include a custom logo, website links, and other branding to display to users.

" + }, + "ThemeAttribute":{ + "type":"string", + "enum":["FOOTER_LINKS"] + }, + "ThemeAttributes":{ + "type":"list", + "member":{"shape":"ThemeAttribute"} + }, + "ThemeFooterLink":{ + "type":"structure", + "members":{ + "DisplayName":{ + "shape":"ThemeFooterLinkDisplayName", + "documentation":"

The name of the websites that display in the catalog page footer.

" + }, + "FooterLinkURL":{ + "shape":"ThemeFooterLinkURL", + "documentation":"

The URL of the websites that display in the catalog page footer.

" + } + }, + "documentation":"

The website links that display in the catalog page footer.

" + }, + "ThemeFooterLinkDisplayName":{ + "type":"string", + "max":300, + "min":1, + "pattern":"^[-@./#&+\\w\\s]*$" + }, + "ThemeFooterLinkURL":{ + "type":"string", + "max":1000, + "min":1 + }, + "ThemeFooterLinks":{ + "type":"list", + "member":{"shape":"ThemeFooterLink"} + }, + "ThemeState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "ThemeStyling":{ + "type":"string", + "enum":[ + "LIGHT_BLUE", + "BLUE", + "PINK", + "RED" + ] + }, + "ThemeTitleText":{ + "type":"string", + "max":300, + "min":1, + "pattern":"^[-@./#&+\\w\\s]*$" + }, "Timestamp":{"type":"timestamp"}, "UntagResourceRequest":{ "type":"structure", @@ -5177,7 +5462,7 @@ }, "DisconnectTimeoutInSeconds":{ "shape":"Integer", - "documentation":"

The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

Specify a value between 60 and 360000.

" + "documentation":"

The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

Specify a value between 60 and 36000.

" }, "DeleteVpcConfig":{ "shape":"Boolean", @@ -5202,7 +5487,7 @@ }, "IdleDisconnectTimeoutInSeconds":{ "shape":"Integer", - "documentation":"

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

" + "documentation":"

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

" }, "AttributesToDelete":{ "shape":"FleetAttributes", @@ -5342,6 +5627,53 @@ } } }, + "UpdateThemeForStackRequest":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{ + "shape":"Name", + "documentation":"

The name of the stack for the theme.

" + }, + "FooterLinks":{ + "shape":"ThemeFooterLinks", + "documentation":"

The links that are displayed in the footer of the streaming application catalog page. These links are helpful resources for users, such as the organization's IT support and product marketing sites.

" + }, + "TitleText":{ + "shape":"ThemeTitleText", + "documentation":"

The title that is displayed at the top of the browser tab during users' application streaming sessions.

" + }, + "ThemeStyling":{ + "shape":"ThemeStyling", + "documentation":"

The color theme that is applied to website links, text, and buttons. These colors are also applied as accents in the background for the streaming application catalog page.

" + }, + "OrganizationLogoS3Location":{ + "shape":"S3Location", + "documentation":"

The organization logo that appears on the streaming application catalog page.

" + }, + "FaviconS3Location":{ + "shape":"S3Location", + "documentation":"

The S3 location of the favicon. The favicon enables users to recognize their application streaming site in a browser full of tabs or bookmarks. It is displayed at the top of the browser tab for the application streaming site during users' streaming sessions.

" + }, + "State":{ + "shape":"ThemeState", + "documentation":"

Specifies whether custom branding should be applied to catalog page or not.

" + }, + "AttributesToDelete":{ + "shape":"ThemeAttributes", + "documentation":"

The attributes to delete.

" + } + } + }, + "UpdateThemeForStackResult":{ + "type":"structure", + "members":{ + "Theme":{ + "shape":"Theme", + "documentation":"

The theme object that contains the metadata of the custom branding.

" + } + } + }, "UsageReportExecutionErrorCode":{ "type":"string", "enum":[ @@ -5461,7 +5793,7 @@ }, "MaximumLength":{ "shape":"Integer", - "documentation":"

Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session.

This can be specified only for the CLIPBOARD_COPY_FROM_LOCAL_DEVICE and CLIPBOARD_COPY_TO_LOCAL_DEVICE actions.

This defaults to 20,971,520 (20 MB) when unspecified and the permission is ENABLED. This can't be specified when the permission is DISABLED.

This can only be specified for AlwaysOn and OnDemand fleets. The attribute is not supported on Elastic fleets.

The value can be between 1 and 20,971,520 (20 MB).

" + "documentation":"

Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session.

This can be specified only for the CLIPBOARD_COPY_FROM_LOCAL_DEVICE and CLIPBOARD_COPY_TO_LOCAL_DEVICE actions.

This defaults to 20,971,520 (20 MB) when unspecified and the permission is ENABLED. This can't be specified when the permission is DISABLED.

The value can be between 1 and 20,971,520 (20 MB).

" } }, "documentation":"

Describes an action and whether the action is enabled or disabled for users during their streaming sessions.

" diff --git a/botocore/data/appsync/2017-07-25/paginators-1.json b/botocore/data/appsync/2017-07-25/paginators-1.json index 487d71e6b8..0da53b21f4 100644 --- a/botocore/data/appsync/2017-07-25/paginators-1.json +++ b/botocore/data/appsync/2017-07-25/paginators-1.json @@ -41,6 +41,24 @@ "limit_key": "maxResults", "output_token": "nextToken", "result_key": "types" + }, + "ListDomainNames": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "domainNameConfigs" + }, + "ListSourceApiAssociations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "sourceApiAssociationSummaries" + }, + "ListTypesByAssociation": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "types" } } } diff --git a/botocore/data/appsync/2017-07-25/service-2.json b/botocore/data/appsync/2017-07-25/service-2.json index a6f9f63e72..184a7c7ab9 100644 --- a/botocore/data/appsync/2017-07-25/service-2.json +++ b/botocore/data/appsync/2017-07-25/service-2.json @@ -11,7 +11,8 @@ "serviceId":"AppSync", "signatureVersion":"v4", "signingName":"appsync", - "uid":"appsync-2017-07-25" + "uid":"appsync-2017-07-25", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateApi":{ diff --git a/botocore/data/apptest/2022-12-06/endpoint-rule-set-1.json b/botocore/data/apptest/2022-12-06/endpoint-rule-set-1.json new file mode 100644 index 0000000000..82abed332e --- /dev/null +++ b/botocore/data/apptest/2022-12-06/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apptest-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apptest-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apptest.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apptest.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/botocore/data/apptest/2022-12-06/paginators-1.json b/botocore/data/apptest/2022-12-06/paginators-1.json new file mode 100644 index 0000000000..56c715f68d --- /dev/null +++ b/botocore/data/apptest/2022-12-06/paginators-1.json @@ -0,0 +1,40 @@ +{ + "pagination": { + "ListTestCases": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "testCases" + }, + "ListTestConfigurations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "testConfigurations" + }, + "ListTestRunSteps": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "testRunSteps" + }, + "ListTestRunTestCases": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "testRunTestCases" + }, + "ListTestRuns": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "testRuns" + }, + "ListTestSuites": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "testSuites" + } + } +} diff --git a/botocore/data/apptest/2022-12-06/service-2.json b/botocore/data/apptest/2022-12-06/service-2.json new file mode 100644 index 0000000000..fe14ff269e --- /dev/null +++ b/botocore/data/apptest/2022-12-06/service-2.json @@ -0,0 +1,3510 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2022-12-06", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"apptest", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"AWS Mainframe Modernization Application Testing", + "serviceId":"AppTest", + "signatureVersion":"v4", + "signingName":"apptest", + "uid":"apptest-2022-12-06" + }, + "operations":{ + "CreateTestCase":{ + "name":"CreateTestCase", + "http":{ + "method":"POST", + "requestUri":"/testcase", + "responseCode":201 + }, + "input":{"shape":"CreateTestCaseRequest"}, + "output":{"shape":"CreateTestCaseResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a test case.

", + "idempotent":true + }, + "CreateTestConfiguration":{ + "name":"CreateTestConfiguration", + "http":{ + "method":"POST", + "requestUri":"/testconfiguration", + "responseCode":201 + }, + "input":{"shape":"CreateTestConfigurationRequest"}, + "output":{"shape":"CreateTestConfigurationResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a test configuration.

", + "idempotent":true + }, + "CreateTestSuite":{ + "name":"CreateTestSuite", + "http":{ + "method":"POST", + "requestUri":"/testsuite", + "responseCode":201 + }, + "input":{"shape":"CreateTestSuiteRequest"}, + "output":{"shape":"CreateTestSuiteResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a test suite.

", + "idempotent":true + }, + "DeleteTestCase":{ + "name":"DeleteTestCase", + "http":{ + "method":"DELETE", + "requestUri":"/testcases/{testCaseId}", + "responseCode":204 + }, + "input":{"shape":"DeleteTestCaseRequest"}, + "output":{"shape":"DeleteTestCaseResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes a test case.

", + "idempotent":true + }, + "DeleteTestConfiguration":{ + "name":"DeleteTestConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/testconfigurations/{testConfigurationId}", + "responseCode":204 + }, + "input":{"shape":"DeleteTestConfigurationRequest"}, + "output":{"shape":"DeleteTestConfigurationResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes a test configuration.

", + "idempotent":true + }, + "DeleteTestRun":{ + "name":"DeleteTestRun", + "http":{ + "method":"DELETE", + "requestUri":"/testruns/{testRunId}", + "responseCode":204 + }, + "input":{"shape":"DeleteTestRunRequest"}, + "output":{"shape":"DeleteTestRunResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes a test run.

", + "idempotent":true + }, + "DeleteTestSuite":{ + "name":"DeleteTestSuite", + "http":{ + "method":"DELETE", + "requestUri":"/testsuites/{testSuiteId}", + "responseCode":204 + }, + "input":{"shape":"DeleteTestSuiteRequest"}, + "output":{"shape":"DeleteTestSuiteResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes a test suite.

", + "idempotent":true + }, + "GetTestCase":{ + "name":"GetTestCase", + "http":{ + "method":"GET", + "requestUri":"/testcases/{testCaseId}", + "responseCode":200 + }, + "input":{"shape":"GetTestCaseRequest"}, + "output":{"shape":"GetTestCaseResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a test case.

" + }, + "GetTestConfiguration":{ + "name":"GetTestConfiguration", + "http":{ + "method":"GET", + "requestUri":"/testconfigurations/{testConfigurationId}", + "responseCode":200 + }, + "input":{"shape":"GetTestConfigurationRequest"}, + "output":{"shape":"GetTestConfigurationResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a test configuration.

" + }, + "GetTestRunStep":{ + "name":"GetTestRunStep", + "http":{ + "method":"GET", + "requestUri":"/testruns/{testRunId}/steps/{stepName}", + "responseCode":200 + }, + "input":{"shape":"GetTestRunStepRequest"}, + "output":{"shape":"GetTestRunStepResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a test run step.

" + }, + "GetTestSuite":{ + "name":"GetTestSuite", + "http":{ + "method":"GET", + "requestUri":"/testsuites/{testSuiteId}", + "responseCode":200 + }, + "input":{"shape":"GetTestSuiteRequest"}, + "output":{"shape":"GetTestSuiteResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a test suite.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists tags for a resource.

" + }, + "ListTestCases":{ + "name":"ListTestCases", + "http":{ + "method":"GET", + "requestUri":"/testcases", + "responseCode":200 + }, + "input":{"shape":"ListTestCasesRequest"}, + "output":{"shape":"ListTestCasesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists test cases.

" + }, + "ListTestConfigurations":{ + "name":"ListTestConfigurations", + "http":{ + "method":"GET", + "requestUri":"/testconfigurations", + "responseCode":200 + }, + "input":{"shape":"ListTestConfigurationsRequest"}, + "output":{"shape":"ListTestConfigurationsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists test configurations.

" + }, + "ListTestRunSteps":{ + "name":"ListTestRunSteps", + "http":{ + "method":"GET", + "requestUri":"/testruns/{testRunId}/steps", + "responseCode":200 + }, + "input":{"shape":"ListTestRunStepsRequest"}, + "output":{"shape":"ListTestRunStepsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists test run steps.

" + }, + "ListTestRunTestCases":{ + "name":"ListTestRunTestCases", + "http":{ + "method":"GET", + "requestUri":"/testruns/{testRunId}/testcases", + "responseCode":200 + }, + "input":{"shape":"ListTestRunTestCasesRequest"}, + "output":{"shape":"ListTestRunTestCasesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists test run test cases.

" + }, + "ListTestRuns":{ + "name":"ListTestRuns", + "http":{ + "method":"GET", + "requestUri":"/testruns", + "responseCode":200 + }, + "input":{"shape":"ListTestRunsRequest"}, + "output":{"shape":"ListTestRunsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists test runs.

" + }, + "ListTestSuites":{ + "name":"ListTestSuites", + "http":{ + "method":"GET", + "requestUri":"/testsuites", + "responseCode":200 + }, + "input":{"shape":"ListTestSuitesRequest"}, + "output":{"shape":"ListTestSuitesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists test suites.

" + }, + "StartTestRun":{ + "name":"StartTestRun", + "http":{ + "method":"POST", + "requestUri":"/testrun", + "responseCode":200 + }, + "input":{"shape":"StartTestRunRequest"}, + "output":{"shape":"StartTestRunResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts a test run.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Specifies tags of a resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Untags a resource.

", + "idempotent":true + }, + "UpdateTestCase":{ + "name":"UpdateTestCase", + "http":{ + "method":"PATCH", + "requestUri":"/testcases/{testCaseId}", + "responseCode":200 + }, + "input":{"shape":"UpdateTestCaseRequest"}, + "output":{"shape":"UpdateTestCaseResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates a test case.

" + }, + "UpdateTestConfiguration":{ + "name":"UpdateTestConfiguration", + "http":{ + "method":"PATCH", + "requestUri":"/testconfigurations/{testConfigurationId}", + "responseCode":200 + }, + "input":{"shape":"UpdateTestConfigurationRequest"}, + "output":{"shape":"UpdateTestConfigurationResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates a test configuration.

" + }, + "UpdateTestSuite":{ + "name":"UpdateTestSuite", + "http":{ + "method":"PATCH", + "requestUri":"/testsuites/{testSuiteId}", + "responseCode":200 + }, + "input":{"shape":"UpdateTestSuiteRequest"}, + "output":{"shape":"UpdateTestSuiteResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates a test suite.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The account or role doesn't have the right permissions to make the request.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Arn":{ + "type":"string", + "pattern":"arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+=,@.-]{0,1023}" + }, + "Batch":{ + "type":"structure", + "required":["batchJobName"], + "members":{ + "batchJobName":{ + "shape":"Variable", + "documentation":"

The job name of the batch.

" + }, + "batchJobParameters":{ + "shape":"BatchJobParameters", + "documentation":"

The batch job parameters of the batch.

" + }, + "exportDataSetNames":{ + "shape":"ExportDataSetNames", + "documentation":"

The export data set names of the batch.

" + } + }, + "documentation":"

Defines a batch.

" + }, + "BatchJobParameters":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "BatchStepInput":{ + "type":"structure", + "required":[ + "resource", + "batchJobName" + ], + "members":{ + "resource":{ + "shape":"MainframeResourceSummary", + "documentation":"

The resource of the batch step input.

" + }, + "batchJobName":{ + "shape":"ResourceName", + "documentation":"

The batch job name of the batch step input.

" + }, + "batchJobParameters":{ + "shape":"BatchJobParameters", + "documentation":"

The batch job parameters of the batch step input.

" + }, + "exportDataSetNames":{ + "shape":"ExportDataSetNames", + "documentation":"

The export data set names of the batch step input.

" + }, + "properties":{ + "shape":"MainframeActionProperties", + "documentation":"

The properties of the batch step input.

" + } + }, + "documentation":"

Defines a batch step input.

" + }, + "BatchStepOutput":{ + "type":"structure", + "members":{ + "dataSetExportLocation":{ + "shape":"S3Uri", + "documentation":"

The data set export location of the batch step output.

" + }, + "dmsOutputLocation":{ + "shape":"S3Uri", + "documentation":"

The Database Migration Service (DMS) output location of the batch step output.

" + }, + "dataSetDetails":{ + "shape":"DataSetList", + "documentation":"

The data set details of the batch step output.

" + } + }, + "documentation":"

Defines a batch step output.

" + }, + "BatchSummary":{ + "type":"structure", + "required":["stepInput"], + "members":{ + "stepInput":{ + "shape":"BatchStepInput", + "documentation":"

The step input of the batch summary.

" + }, + "stepOutput":{ + "shape":"BatchStepOutput", + "documentation":"

The step output of the batch summary.

" + } + }, + "documentation":"

Summarizes a batch job.

" + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "CaptureTool":{ + "type":"string", + "enum":[ + "Precisely", + "AWS DMS" + ] + }, + "CloudFormation":{ + "type":"structure", + "required":["templateLocation"], + "members":{ + "templateLocation":{ + "shape":"S3Uri", + "documentation":"

The template location of the CloudFormation template.

" + }, + "parameters":{ + "shape":"Properties", + "documentation":"

The CloudFormation properties in the CloudFormation template.

" + } + }, + "documentation":"

Specifies the CloudFormation template and its parameters.

" + }, + "CloudFormationAction":{ + "type":"structure", + "required":["resource"], + "members":{ + "resource":{ + "shape":"Variable", + "documentation":"

The resource of the CloudFormation action.

" + }, + "actionType":{ + "shape":"CloudFormationActionType", + "documentation":"

The action type of the CloudFormation action.

" + } + }, + "documentation":"

Specifies the CloudFormation action.

" + }, + "CloudFormationActionType":{ + "type":"string", + "enum":[ + "Create", + "Delete" + ] + }, + "CloudFormationStepSummary":{ + "type":"structure", + "members":{ + "createCloudformation":{ + "shape":"CreateCloudFormationSummary", + "documentation":"

Creates the CloudFormation summary of the step.

" + }, + "deleteCloudformation":{ + "shape":"DeleteCloudFormationSummary", + "documentation":"

Deletes the CloudFormation summary of the CloudFormation step summary.

" + } + }, + "documentation":"

Specifies the CloudFormation step summary.

", + "union":true + }, + "CompareAction":{ + "type":"structure", + "required":["input"], + "members":{ + "input":{ + "shape":"Input", + "documentation":"

The input of the compare action.

" + }, + "output":{ + "shape":"Output", + "documentation":"

The output of the compare action.

" + } + }, + "documentation":"

Compares the action.

" + }, + "CompareActionSummary":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"File", + "documentation":"

The type of the compare action summary.

" + } + }, + "documentation":"

Specifies the compare action summary.

" + }, + "CompareDataSetsStepInput":{ + "type":"structure", + "required":[ + "sourceLocation", + "targetLocation", + "sourceDataSets", + "targetDataSets" + ], + "members":{ + "sourceLocation":{ + "shape":"S3Uri", + "documentation":"

The source location of the compare data sets step input location.

" + }, + "targetLocation":{ + "shape":"S3Uri", + "documentation":"

The target location of the compare data sets step input location.

" + }, + "sourceDataSets":{ + "shape":"DataSetList", + "documentation":"

The source data sets of the compare data sets step input location.

" + }, + "targetDataSets":{ + "shape":"DataSetList", + "documentation":"

The target data sets of the compare data sets step input location.

" + } + }, + "documentation":"

Specifies the compare data sets step input.

" + }, + "CompareDataSetsStepOutput":{ + "type":"structure", + "required":[ + "comparisonOutputLocation", + "comparisonStatus" + ], + "members":{ + "comparisonOutputLocation":{ + "shape":"S3Uri", + "documentation":"

The comparison output location of the compare data sets step output.

" + }, + "comparisonStatus":{ + "shape":"ComparisonStatusEnum", + "documentation":"

The comparison status of the compare data sets step output.

" + } + }, + "documentation":"

Specifies the compare data sets step output.

" + }, + "CompareDataSetsSummary":{ + "type":"structure", + "required":["stepInput"], + "members":{ + "stepInput":{ + "shape":"CompareDataSetsStepInput", + "documentation":"

The step input of the compare data sets summary.

" + }, + "stepOutput":{ + "shape":"CompareDataSetsStepOutput", + "documentation":"

The step output of the compare data sets summary.

" + } + }, + "documentation":"

Compares data sets summary.

" + }, + "CompareDatabaseCDCStepInput":{ + "type":"structure", + "required":[ + "sourceLocation", + "targetLocation", + "sourceMetadata", + "targetMetadata" + ], + "members":{ + "sourceLocation":{ + "shape":"String", + "documentation":"

The source location of the compare database CDC step input.

" + }, + "targetLocation":{ + "shape":"String", + "documentation":"

The target location of the compare database CDC step input.

" + }, + "outputLocation":{ + "shape":"String", + "documentation":"

The output location of the compare database CDC step input.

" + }, + "sourceMetadata":{ + "shape":"SourceDatabaseMetadata", + "documentation":"

The source metadata of the compare database CDC step input.

" + }, + "targetMetadata":{ + "shape":"TargetDatabaseMetadata", + "documentation":"

The target metadata location of the compare database CDC step input.

" + } + }, + "documentation":"

Compares the database Change Data Capture (CDC) step input.

" + }, + "CompareDatabaseCDCStepOutput":{ + "type":"structure", + "required":[ + "comparisonOutputLocation", + "comparisonStatus" + ], + "members":{ + "comparisonOutputLocation":{ + "shape":"String", + "documentation":"

The comparison output of the compare database CDC step output.

" + }, + "comparisonStatus":{ + "shape":"ComparisonStatusEnum", + "documentation":"

The comparison status of the compare database CDC step output.

" + } + }, + "documentation":"

Compares the database CDC step output.

" + }, + "CompareDatabaseCDCSummary":{ + "type":"structure", + "required":["stepInput"], + "members":{ + "stepInput":{ + "shape":"CompareDatabaseCDCStepInput", + "documentation":"

The step input of the compare database CDC summary.

" + }, + "stepOutput":{ + "shape":"CompareDatabaseCDCStepOutput", + "documentation":"

The step output of the compare database CDC summary.

" + } + }, + "documentation":"

Compares the database CDC summary.

" + }, + "CompareFileType":{ + "type":"structure", + "members":{ + "datasets":{ + "shape":"CompareDataSetsSummary", + "documentation":"

The data sets in the compare file type.

" + }, + "databaseCDC":{ + "shape":"CompareDatabaseCDCSummary", + "documentation":"

The database CDC of the compare file type.

" + } + }, + "documentation":"

Compares the file type.

", + "union":true + }, + "ComparisonStatusEnum":{ + "type":"string", + "enum":[ + "Different", + "Equivalent", + "Equal" + ] + }, + "ConflictException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The resource ID of the conflicts with existing resources.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

The resource type of the conflicts with existing resources.

" + } + }, + "documentation":"

The parameters provided in the request conflict with existing resources.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateCloudFormationStepInput":{ + "type":"structure", + "required":["templateLocation"], + "members":{ + "templateLocation":{ + "shape":"S3Uri", + "documentation":"

The template location of the CloudFormation step input.

" + }, + "parameters":{ + "shape":"Properties", + "documentation":"

The CloudFormation properties of the CloudFormation step input.

" + } + }, + "documentation":"

Creates the CloudFormation step input.

" + }, + "CreateCloudFormationStepOutput":{ + "type":"structure", + "required":["stackId"], + "members":{ + "stackId":{ + "shape":"String", + "documentation":"

The stack ID of the CloudFormation step output.

" + }, + "exports":{ + "shape":"Properties", + "documentation":"

The exports of the CloudFormation step output.

" + } + }, + "documentation":"

Creates a CloudFormation step output.

" + }, + "CreateCloudFormationSummary":{ + "type":"structure", + "required":["stepInput"], + "members":{ + "stepInput":{ + "shape":"CreateCloudFormationStepInput", + "documentation":"

The step input of the CloudFormation summary.

" + }, + "stepOutput":{ + "shape":"CreateCloudFormationStepOutput", + "documentation":"

The step output of the CloudFormation summary.

" + } + }, + "documentation":"

Creates a CloudFormation summary.

" + }, + "CreateTestCaseRequest":{ + "type":"structure", + "required":[ + "name", + "steps" + ], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the test case.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the test case.

" + }, + "steps":{ + "shape":"StepList", + "documentation":"

The steps in the test case.

" + }, + "clientToken":{ + "shape":"IdempotencyTokenString", + "documentation":"

The client token of the test case.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The specified tags of the test case.

" + } + } + }, + "CreateTestCaseResponse":{ + "type":"structure", + "required":[ + "testCaseId", + "testCaseVersion" + ], + "members":{ + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The test case ID of the test case.

" + }, + "testCaseVersion":{ + "shape":"Version", + "documentation":"

The test case version of the test case.

" + } + } + }, + "CreateTestConfigurationRequest":{ + "type":"structure", + "required":[ + "name", + "resources" + ], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the test configuration.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the test configuration.

" + }, + "resources":{ + "shape":"ResourceList", + "documentation":"

The defined resources of the test configuration.

" + }, + "properties":{ + "shape":"Properties", + "documentation":"

The properties of the test configuration.

" + }, + "clientToken":{ + "shape":"IdempotencyTokenString", + "documentation":"

The client token of the test configuration.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of the test configuration.

" + }, + "serviceSettings":{ + "shape":"ServiceSettings", + "documentation":"

The service settings of the test configuration.

" + } + } + }, + "CreateTestConfigurationResponse":{ + "type":"structure", + "required":[ + "testConfigurationId", + "testConfigurationVersion" + ], + "members":{ + "testConfigurationId":{ + "shape":"Identifier", + "documentation":"

The test configuration ID.

" + }, + "testConfigurationVersion":{ + "shape":"Version", + "documentation":"

The test configuration version.

" + } + } + }, + "CreateTestSuiteRequest":{ + "type":"structure", + "required":[ + "name", + "testCases" + ], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the test suite.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the test suite.

" + }, + "beforeSteps":{ + "shape":"StepList", + "documentation":"

The before steps of the test suite.

" + }, + "afterSteps":{ + "shape":"StepList", + "documentation":"

The after steps of the test suite.

" + }, + "testCases":{ + "shape":"TestCases", + "documentation":"

The test cases in the test suite.

" + }, + "clientToken":{ + "shape":"IdempotencyTokenString", + "documentation":"

The client token of the test suite.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of the test suite.

" + } + } + }, + "CreateTestSuiteResponse":{ + "type":"structure", + "required":[ + "testSuiteId", + "testSuiteVersion" + ], + "members":{ + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The suite ID of the test suite.

" + }, + "testSuiteVersion":{ + "shape":"Version", + "documentation":"

The suite version of the test suite.

" + } + } + }, + "DataSet":{ + "type":"structure", + "required":[ + "type", + "name", + "ccsid", + "format", + "length" + ], + "members":{ + "type":{ + "shape":"DataSetType", + "documentation":"

The type of the data set.

" + }, + "name":{ + "shape":"String100", + "documentation":"

The name of the data set.

" + }, + "ccsid":{ + "shape":"String50", + "documentation":"

The CCSID of the data set.

" + }, + "format":{ + "shape":"Format", + "documentation":"

The format of the data set.

" + }, + "length":{ + "shape":"Integer", + "documentation":"

The length of the data set.

" + } + }, + "documentation":"

Defines a data set.

" + }, + "DataSetList":{ + "type":"list", + "member":{"shape":"DataSet"} + }, + "DataSetType":{ + "type":"string", + "enum":["PS"] + }, + "DatabaseCDC":{ + "type":"structure", + "required":[ + "sourceMetadata", + "targetMetadata" + ], + "members":{ + "sourceMetadata":{ + "shape":"SourceDatabaseMetadata", + "documentation":"

The source metadata of the database CDC.

" + }, + "targetMetadata":{ + "shape":"TargetDatabaseMetadata", + "documentation":"

The target metadata of the database CDC.

" + } + }, + "documentation":"

Defines the Change Data Capture (CDC) of the database.

" + }, + "DeleteCloudFormationStepInput":{ + "type":"structure", + "required":["stackId"], + "members":{ + "stackId":{ + "shape":"String", + "documentation":"

The stack ID of the deleted CloudFormation step input.

" + } + }, + "documentation":"

Deletes the CloudFormation step input.

" + }, + "DeleteCloudFormationStepOutput":{ + "type":"structure", + "members":{ + }, + "documentation":"

Deletes the CloudFormation summary step output.

" + }, + "DeleteCloudFormationSummary":{ + "type":"structure", + "required":["stepInput"], + "members":{ + "stepInput":{ + "shape":"DeleteCloudFormationStepInput", + "documentation":"

The step input of the deleted CloudFormation summary.

" + }, + "stepOutput":{ + "shape":"DeleteCloudFormationStepOutput", + "documentation":"

The step output of the deleted CloudFormation summary.

" + } + }, + "documentation":"

Deletes the CloudFormation summary.

" + }, + "DeleteTestCaseRequest":{ + "type":"structure", + "required":["testCaseId"], + "members":{ + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The test case ID of the test case.

", + "location":"uri", + "locationName":"testCaseId" + } + } + }, + "DeleteTestCaseResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTestConfigurationRequest":{ + "type":"structure", + "required":["testConfigurationId"], + "members":{ + "testConfigurationId":{ + "shape":"Identifier", + "documentation":"

The test ID of the test configuration.

", + "location":"uri", + "locationName":"testConfigurationId" + } + } + }, + "DeleteTestConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTestRunRequest":{ + "type":"structure", + "required":["testRunId"], + "members":{ + "testRunId":{ + "shape":"Identifier", + "documentation":"

The run ID of the test run.

", + "location":"uri", + "locationName":"testRunId" + } + } + }, + "DeleteTestRunResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTestSuiteRequest":{ + "type":"structure", + "required":["testSuiteId"], + "members":{ + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The test ID of the test suite.

", + "location":"uri", + "locationName":"testSuiteId" + } + } + }, + "DeleteTestSuiteResponse":{ + "type":"structure", + "members":{ + } + }, + "ExportDataSetNames":{ + "type":"list", + "member":{"shape":"String100"} + }, + "File":{ + "type":"structure", + "members":{ + "fileType":{ + "shape":"CompareFileType", + "documentation":"

The file type of the file.

" + } + }, + "documentation":"

Defines a file.

", + "union":true + }, + "FileMetadata":{ + "type":"structure", + "members":{ + "dataSets":{ + "shape":"DataSetList", + "documentation":"

The data sets of the file metadata.

" + }, + "databaseCDC":{ + "shape":"DatabaseCDC", + "documentation":"

The database CDC of the file metadata.

" + } + }, + "documentation":"

Specifies a file metadata.

", + "union":true + }, + "Format":{ + "type":"string", + "enum":[ + "FIXED", + "VARIABLE", + "LINE_SEQUENTIAL" + ] + }, + "GetTestCaseRequest":{ + "type":"structure", + "required":["testCaseId"], + "members":{ + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The request test ID of the test case.

", + "location":"uri", + "locationName":"testCaseId" + }, + "testCaseVersion":{ + "shape":"Version", + "documentation":"

The test case version of the test case.

", + "location":"querystring", + "locationName":"testCaseVersion" + } + } + }, + "GetTestCaseResponse":{ + "type":"structure", + "required":[ + "testCaseId", + "testCaseArn", + "name", + "latestVersion", + "testCaseVersion", + "status", + "creationTime", + "lastUpdateTime", + "steps" + ], + "members":{ + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The response test ID of the test case.

" + }, + "testCaseArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the test case.

" + }, + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the test case.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the test case.

" + }, + "latestVersion":{ + "shape":"TestCaseLatestVersion", + "documentation":"

The latest version of the test case.

" + }, + "testCaseVersion":{ + "shape":"Version", + "documentation":"

The case version of the test case.

" + }, + "status":{ + "shape":"TestCaseLifecycle", + "documentation":"

The status of the test case.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test case.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of the test case.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The last update time of the test case.

" + }, + "steps":{ + "shape":"StepList", + "documentation":"

The steps of the test case.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of the test case.

" + } + } + }, + "GetTestConfigurationRequest":{ + "type":"structure", + "required":["testConfigurationId"], + "members":{ + "testConfigurationId":{ + "shape":"Identifier", + "documentation":"

The request test configuration ID.

", + "location":"uri", + "locationName":"testConfigurationId" + }, + "testConfigurationVersion":{ + "shape":"Version", + "documentation":"

The test configuration version.

", + "location":"querystring", + "locationName":"testConfigurationVersion" + } + } + }, + "GetTestConfigurationResponse":{ + "type":"structure", + "required":[ + "testConfigurationId", + "name", + "testConfigurationArn", + "latestVersion", + "testConfigurationVersion", + "status", + "creationTime", + "lastUpdateTime", + "resources", + "properties" + ], + "members":{ + "testConfigurationId":{ + "shape":"Identifier", + "documentation":"

The response test configuration ID.

" + }, + "name":{ + "shape":"ResourceName", + "documentation":"

The test configuration name

" + }, + "testConfigurationArn":{ + "shape":"Arn", + "documentation":"

The test configuration Amazon Resource Name (ARN).

" + }, + "latestVersion":{ + "shape":"TestConfigurationLatestVersion", + "documentation":"

The latest version of the test configuration.

" + }, + "testConfigurationVersion":{ + "shape":"Version", + "documentation":"

The test configuration version.

" + }, + "status":{ + "shape":"TestConfigurationLifecycle", + "documentation":"

The status of the test configuration.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test configuration.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of the test configuration.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The last update time of the test configuration.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the test configuration.

" + }, + "resources":{ + "shape":"ResourceList", + "documentation":"

The resources of the test configuration.

" + }, + "properties":{ + "shape":"Properties", + "documentation":"

The properties of the test configuration.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of the test configuration.

" + }, + "serviceSettings":{ + "shape":"ServiceSettings", + "documentation":"

The service settings of the test configuration.

" + } + } + }, + "GetTestRunStepRequest":{ + "type":"structure", + "required":[ + "testRunId", + "stepName" + ], + "members":{ + "testRunId":{ + "shape":"Identifier", + "documentation":"

The test run ID of the test run step.

", + "location":"uri", + "locationName":"testRunId" + }, + "stepName":{ + "shape":"ResourceName", + "documentation":"

The step name of the test run step.

", + "location":"uri", + "locationName":"stepName" + }, + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The test case ID of a test run step.

", + "location":"querystring", + "locationName":"testCaseId" + }, + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The test suite ID of a test run step.

", + "location":"querystring", + "locationName":"testSuiteId" + } + } + }, + "GetTestRunStepResponse":{ + "type":"structure", + "required":[ + "stepName", + "testRunId", + "status", + "runStartTime" + ], + "members":{ + "stepName":{ + "shape":"ResourceName", + "documentation":"

The step name of the test run step.

" + }, + "testRunId":{ + "shape":"Identifier", + "documentation":"

The test run ID of the test run step.

" + }, + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The test case ID of the test run step.

" + }, + "testCaseVersion":{ + "shape":"Version", + "documentation":"

The test case version of the test run step.

" + }, + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The test suite ID of the test run step.

" + }, + "testSuiteVersion":{ + "shape":"Version", + "documentation":"

The test suite version of the test run step.

" + }, + "beforeStep":{ + "shape":"Boolean", + "documentation":"

The before steps of the test run step.

" + }, + "afterStep":{ + "shape":"Boolean", + "documentation":"

The after steps of the test run step.

" + }, + "status":{ + "shape":"StepRunStatus", + "documentation":"

The status of the test run step.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test run step.

" + }, + "runStartTime":{ + "shape":"Timestamp", + "documentation":"

The run start time of the test run step.

" + }, + "runEndTime":{ + "shape":"Timestamp", + "documentation":"

The run end time of the test run step.

" + }, + "stepRunSummary":{ + "shape":"StepRunSummary", + "documentation":"

The step run summary of the test run step.

" + } + } + }, + "GetTestSuiteRequest":{ + "type":"structure", + "required":["testSuiteId"], + "members":{ + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The ID of the test suite.

", + "location":"uri", + "locationName":"testSuiteId" + }, + "testSuiteVersion":{ + "shape":"Version", + "documentation":"

The version of the test suite.

", + "location":"querystring", + "locationName":"testSuiteVersion" + } + } + }, + "GetTestSuiteResponse":{ + "type":"structure", + "required":[ + "testSuiteId", + "name", + "latestVersion", + "testSuiteVersion", + "testSuiteArn", + "creationTime", + "lastUpdateTime", + "beforeSteps", + "afterSteps", + "testCases" + ], + "members":{ + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The response ID of the test suite.

" + }, + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the test suite.

" + }, + "latestVersion":{ + "shape":"TestSuiteLatestVersion", + "documentation":"

The latest version of the test suite.

" + }, + "testSuiteVersion":{ + "shape":"Version", + "documentation":"

The version of the test suite.

" + }, + "status":{ + "shape":"TestSuiteLifecycle", + "documentation":"

The status of the test suite.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test suite.

" + }, + "testSuiteArn":{ + "shape":"Arn", + "documentation":"

The test suite Amazon Resource Name (ARN).

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of the test suite.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The last update time of the test suite.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the test suite.

" + }, + "beforeSteps":{ + "shape":"StepList", + "documentation":"

The before steps of the test suite.

" + }, + "afterSteps":{ + "shape":"StepList", + "documentation":"

The after steps of the test suite.

" + }, + "testCases":{ + "shape":"TestCases", + "documentation":"

The test cases of the test suite.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of the test suite.

" + } + } + }, + "IdempotencyTokenString":{ + "type":"string", + "pattern":"[A-Za-z0-9\\-]{1,64}" + }, + "Identifier":{ + "type":"string", + "pattern":"[A-Za-z0-9:/\\-]{1,100}" + }, + "Input":{ + "type":"structure", + "members":{ + "file":{ + "shape":"InputFile", + "documentation":"

The file in the input.

" + } + }, + "documentation":"

Specifies the input.

", + "union":true + }, + "InputFile":{ + "type":"structure", + "required":[ + "sourceLocation", + "targetLocation", + "fileMetadata" + ], + "members":{ + "sourceLocation":{ + "shape":"Variable", + "documentation":"

The source location of the input file.

" + }, + "targetLocation":{ + "shape":"Variable", + "documentation":"

The target location of the input file.

" + }, + "fileMetadata":{ + "shape":"FileMetadata", + "documentation":"

The file metadata of the input file.

" + } + }, + "documentation":"

Specifies the input file.

" + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

The number of seconds to retry the query.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

An unexpected error occurred during the processing of the request.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["tags"], + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of the resource.

" + } + } + }, + "ListTestCasesRequest":{ + "type":"structure", + "members":{ + "testCaseIds":{ + "shape":"TestCaseIdList", + "documentation":"

The IDs of the test cases.

", + "location":"querystring", + "locationName":"testCaseIds" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The next token of the test cases.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum results of the test case.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListTestCasesResponse":{ + "type":"structure", + "required":["testCases"], + "members":{ + "testCases":{ + "shape":"TestCaseSummaryList", + "documentation":"

The test cases in an application.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The next token in test cases.

" + } + } + }, + "ListTestConfigurationsRequest":{ + "type":"structure", + "members":{ + "testConfigurationIds":{ + "shape":"TestConfigurationIdList", + "documentation":"

The configuration IDs of the test configurations.

", + "location":"querystring", + "locationName":"testConfigurationIds" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The next token for the test configurations.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum results of the test configuration.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListTestConfigurationsResponse":{ + "type":"structure", + "required":["testConfigurations"], + "members":{ + "testConfigurations":{ + "shape":"TestConfigurationList", + "documentation":"

The test configurations.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The next token in the test configurations.

" + } + } + }, + "ListTestRunStepsRequest":{ + "type":"structure", + "required":["testRunId"], + "members":{ + "testRunId":{ + "shape":"Identifier", + "documentation":"

The test run ID of the test run steps.

", + "location":"uri", + "locationName":"testRunId" + }, + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The test case ID of the test run steps.

", + "location":"querystring", + "locationName":"testCaseId" + }, + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The test suite ID of the test run steps.

", + "location":"querystring", + "locationName":"testSuiteId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token from a previous step to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of test run steps to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListTestRunStepsResponse":{ + "type":"structure", + "required":["testRunSteps"], + "members":{ + "testRunSteps":{ + "shape":"TestRunStepSummaryList", + "documentation":"

The test run steps of the response query.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token from a previous request to retrieve the next page of results.

" + } + } + }, + "ListTestRunTestCasesRequest":{ + "type":"structure", + "required":["testRunId"], + "members":{ + "testRunId":{ + "shape":"Identifier", + "documentation":"

The test run ID of the test cases.

", + "location":"uri", + "locationName":"testRunId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of test run test cases to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListTestRunTestCasesResponse":{ + "type":"structure", + "required":["testRunTestCases"], + "members":{ + "testRunTestCases":{ + "shape":"TestCaseRunSummaryList", + "documentation":"

The test run of the test cases.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token from a previous request to retrieve the next page of results.

" + } + } + }, + "ListTestRunsRequest":{ + "type":"structure", + "members":{ + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The test suite ID of the test runs.

", + "location":"querystring", + "locationName":"testSuiteId" + }, + "testRunIds":{ + "shape":"TestRunIdList", + "documentation":"

The test run IDs of the test runs.

", + "location":"querystring", + "locationName":"testrunIds" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token from the previous request to retrieve the next page of test run results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of test runs to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListTestRunsResponse":{ + "type":"structure", + "required":["testRuns"], + "members":{ + "testRuns":{ + "shape":"TestRunSummaryList", + "documentation":"

The test runs of the response query.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token from the previous request to retrieve the next page of results.

" + } + } + }, + "ListTestSuitesRequest":{ + "type":"structure", + "members":{ + "testSuiteIds":{ + "shape":"TestSuiteIdList", + "documentation":"

The suite ID of the test suites.

", + "location":"querystring", + "locationName":"testSuiteIds" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of test suites to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListTestSuitesResponse":{ + "type":"structure", + "required":["testSuites"], + "members":{ + "testSuites":{ + "shape":"TestSuiteList", + "documentation":"

The test suites returned with the response query.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token from a previous request to retrieve the next page of test suites results.

" + } + } + }, + "M2ManagedActionProperties":{ + "type":"structure", + "members":{ + "forceStop":{ + "shape":"Boolean", + "documentation":"

Force stops the AWS Mainframe Modernization managed action properties.

" + }, + "importDataSetLocation":{ + "shape":"Variable", + "documentation":"

The import data set location of the AWS Mainframe Modernization managed action properties.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization managed action properties.

" + }, + "M2ManagedActionType":{ + "type":"string", + "enum":[ + "Configure", + "Deconfigure" + ] + }, + "M2ManagedApplication":{ + "type":"structure", + "required":[ + "applicationId", + "runtime" + ], + "members":{ + "applicationId":{ + "shape":"Variable", + "documentation":"

The application ID of the AWS Mainframe Modernization managed application.

" + }, + "runtime":{ + "shape":"M2ManagedRuntime", + "documentation":"

The runtime of the AWS Mainframe Modernization managed application.

" + }, + "vpcEndpointServiceName":{ + "shape":"Variable", + "documentation":"

The VPC endpoint service name of the AWS Mainframe Modernization managed application.

" + }, + "listenerPort":{ + "shape":"Variable", + "documentation":"

The listener port of the AWS Mainframe Modernization managed application.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization managed application.

" + }, + "M2ManagedApplicationAction":{ + "type":"structure", + "required":[ + "resource", + "actionType" + ], + "members":{ + "resource":{ + "shape":"Variable", + "documentation":"

The resource of the AWS Mainframe Modernization managed application action.

" + }, + "actionType":{ + "shape":"M2ManagedActionType", + "documentation":"

The action type of the AWS Mainframe Modernization managed application action.

" + }, + "properties":{ + "shape":"M2ManagedActionProperties", + "documentation":"

The properties of the AWS Mainframe Modernization managed application action.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization managed application action.

" + }, + "M2ManagedApplicationStepInput":{ + "type":"structure", + "required":[ + "applicationId", + "runtime", + "actionType" + ], + "members":{ + "applicationId":{ + "shape":"String", + "documentation":"

The application ID of the AWS Mainframe Modernization managed application step input.

" + }, + "runtime":{ + "shape":"String", + "documentation":"

The runtime of the AWS Mainframe Modernization managed application step input.

" + }, + "vpcEndpointServiceName":{ + "shape":"String", + "documentation":"

The VPC endpoint service name of the AWS Mainframe Modernization managed application step input.

" + }, + "listenerPort":{ + "shape":"Integer", + "documentation":"

The listener port of the AWS Mainframe Modernization managed application step input.

" + }, + "actionType":{ + "shape":"M2ManagedActionType", + "documentation":"

The action type of the AWS Mainframe Modernization managed application step input.

" + }, + "properties":{ + "shape":"M2ManagedActionProperties", + "documentation":"

The properties of the AWS Mainframe Modernization managed application step input.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization managed application step input.

" + }, + "M2ManagedApplicationStepOutput":{ + "type":"structure", + "members":{ + "importDataSetSummary":{ + "shape":"Properties", + "documentation":"

The import data set summary of the AWS Mainframe Modernization managed application step output.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization managed application step output.

" + }, + "M2ManagedApplicationStepSummary":{ + "type":"structure", + "required":["stepInput"], + "members":{ + "stepInput":{ + "shape":"M2ManagedApplicationStepInput", + "documentation":"

The step input of the AWS Mainframe Modernization managed application step summary.

" + }, + "stepOutput":{ + "shape":"M2ManagedApplicationStepOutput", + "documentation":"

The step output of the AWS Mainframe Modernization managed application step summary.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization managed application step summary.

" + }, + "M2ManagedApplicationSummary":{ + "type":"structure", + "required":[ + "applicationId", + "runtime" + ], + "members":{ + "applicationId":{ + "shape":"Identifier", + "documentation":"

The application ID of the AWS Mainframe Modernization managed application summary.

" + }, + "runtime":{ + "shape":"M2ManagedRuntime", + "documentation":"

The runtime of the AWS Mainframe Modernization managed application summary.

" + }, + "listenerPort":{ + "shape":"Integer", + "documentation":"

The listener port of the AWS Mainframe Modernization managed application summary.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization managed application summary.

" + }, + "M2ManagedRuntime":{ + "type":"string", + "enum":["MicroFocus"] + }, + "M2NonManagedActionType":{ + "type":"string", + "enum":[ + "Configure", + "Deconfigure" + ] + }, + "M2NonManagedApplication":{ + "type":"structure", + "required":[ + "vpcEndpointServiceName", + "listenerPort", + "runtime" + ], + "members":{ + "vpcEndpointServiceName":{ + "shape":"Variable", + "documentation":"

The VPC endpoint service name of the AWS Mainframe Modernization non-managed application.

" + }, + "listenerPort":{ + "shape":"Variable", + "documentation":"

The listener port of the AWS Mainframe Modernization non-managed application.

" + }, + "runtime":{ + "shape":"M2NonManagedRuntime", + "documentation":"

The runtime of the AWS Mainframe Modernization non-managed application.

" + }, + "webAppName":{ + "shape":"Variable", + "documentation":"

The web application name of the AWS Mainframe Modernization non-managed application.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization non-managed application.

" + }, + "M2NonManagedApplicationAction":{ + "type":"structure", + "required":[ + "resource", + "actionType" + ], + "members":{ + "resource":{ + "shape":"Variable", + "documentation":"

The resource of the AWS Mainframe Modernization non-managed application action.

" + }, + "actionType":{ + "shape":"M2NonManagedActionType", + "documentation":"

The action type of the AWS Mainframe Modernization non-managed application action.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization non-managed application action.

" + }, + "M2NonManagedApplicationStepInput":{ + "type":"structure", + "required":[ + "vpcEndpointServiceName", + "listenerPort", + "runtime", + "actionType" + ], + "members":{ + "vpcEndpointServiceName":{ + "shape":"String", + "documentation":"

The VPC endpoint service name of the AWS Mainframe Modernization non-managed application step input.

" + }, + "listenerPort":{ + "shape":"Integer", + "documentation":"

The listener port of the AWS Mainframe Modernization non-managed application step input.

" + }, + "runtime":{ + "shape":"M2NonManagedRuntime", + "documentation":"

The runtime of the AWS Mainframe Modernization non-managed application step input.

" + }, + "webAppName":{ + "shape":"String", + "documentation":"

The web app name of the AWS Mainframe Modernization non-managed application step input.

" + }, + "actionType":{ + "shape":"M2NonManagedActionType", + "documentation":"

The action type of the AWS Mainframe Modernization non-managed application step input.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization non-managed application step input.

" + }, + "M2NonManagedApplicationStepOutput":{ + "type":"structure", + "members":{ + }, + "documentation":"

Specifies the AWS Mainframe Modernization non-managed application step output.

" + }, + "M2NonManagedApplicationStepSummary":{ + "type":"structure", + "required":["stepInput"], + "members":{ + "stepInput":{ + "shape":"M2NonManagedApplicationStepInput", + "documentation":"

The step input of the AWS Mainframe Modernization non-managed application step summary.

" + }, + "stepOutput":{ + "shape":"M2NonManagedApplicationStepOutput", + "documentation":"

The step output of the AWS Mainframe Modernization non-managed application step summary.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization non-managed application step summary.

" + }, + "M2NonManagedApplicationSummary":{ + "type":"structure", + "required":[ + "vpcEndpointServiceName", + "listenerPort", + "runtime" + ], + "members":{ + "vpcEndpointServiceName":{ + "shape":"String", + "documentation":"

The VPC endpoint service name of the AWS Mainframe Modernization non-managed application summary.

" + }, + "listenerPort":{ + "shape":"Integer", + "documentation":"

The listener port of the AWS Mainframe Modernization non-managed application summary.

" + }, + "runtime":{ + "shape":"M2NonManagedRuntime", + "documentation":"

The runtime of the AWS Mainframe Modernization non-managed application summary.

" + }, + "webAppName":{ + "shape":"String", + "documentation":"

The web application name of the AWS Mainframe Modernization non-managed application summary.

" + } + }, + "documentation":"

Specifies the AWS Mainframe Modernization non-managed application summary.

" + }, + "M2NonManagedRuntime":{ + "type":"string", + "enum":["BluAge"] + }, + "MainframeAction":{ + "type":"structure", + "required":[ + "resource", + "actionType" + ], + "members":{ + "resource":{ + "shape":"Variable", + "documentation":"

The resource of the mainframe action.

" + }, + "actionType":{ + "shape":"MainframeActionType", + "documentation":"

The action type of the mainframe action.

" + }, + "properties":{ + "shape":"MainframeActionProperties", + "documentation":"

The properties of the mainframe action.

" + } + }, + "documentation":"

Specifies the mainframe action.

" + }, + "MainframeActionProperties":{ + "type":"structure", + "members":{ + "dmsTaskArn":{ + "shape":"Variable", + "documentation":"

The DMS task ARN of the mainframe action properties.

" + } + }, + "documentation":"

Specifies the mainframe action properties.

" + }, + "MainframeActionSummary":{ + "type":"structure", + "members":{ + "batch":{ + "shape":"BatchSummary", + "documentation":"

The batch of the mainframe action summary.

" + }, + "tn3270":{ + "shape":"TN3270Summary", + "documentation":"

The tn3270 port of the mainframe action summary.

" + } + }, + "documentation":"

Specifies the mainframe action summary.

", + "union":true + }, + "MainframeActionType":{ + "type":"structure", + "members":{ + "batch":{ + "shape":"Batch", + "documentation":"

The batch of the mainframe action type.

" + }, + "tn3270":{ + "shape":"TN3270", + "documentation":"

The tn3270 port of the mainframe action type.

" + } + }, + "documentation":"

Specifies the mainframe action type.

", + "union":true + }, + "MainframeResourceSummary":{ + "type":"structure", + "members":{ + "m2ManagedApplication":{ + "shape":"M2ManagedApplicationSummary", + "documentation":"

The AWS Mainframe Modernization managed application in the mainframe resource summary.

" + }, + "m2NonManagedApplication":{ + "shape":"M2NonManagedApplicationSummary", + "documentation":"

The AWS Mainframe Modernization non-managed application in the mainframe resource summary.

" + } + }, + "documentation":"

Specifies the mainframe resource summary.

", + "union":true + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "NextToken":{ + "type":"string", + "pattern":"\\S{1,2000}" + }, + "Output":{ + "type":"structure", + "members":{ + "file":{ + "shape":"OutputFile", + "documentation":"

The file of the output.

" + } + }, + "documentation":"

Specifies an output.

", + "union":true + }, + "OutputFile":{ + "type":"structure", + "members":{ + "fileLocation":{ + "shape":"S3Uri", + "documentation":"

The file location of the output file.

" + } + }, + "documentation":"

Specifies an output file.

" + }, + "Properties":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Resource":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the resource.

" + }, + "type":{ + "shape":"ResourceType", + "documentation":"

The type of the resource.

" + } + }, + "documentation":"

Specifies a resource.

" + }, + "ResourceAction":{ + "type":"structure", + "members":{ + "m2ManagedApplicationAction":{ + "shape":"M2ManagedApplicationAction", + "documentation":"

The AWS Mainframe Modernization managed application action of the resource action.

" + }, + "m2NonManagedApplicationAction":{ + "shape":"M2NonManagedApplicationAction", + "documentation":"

The AWS Mainframe Modernization non-managed application action of the resource action.

" + }, + "cloudFormationAction":{ + "shape":"CloudFormationAction", + "documentation":"

The CloudFormation action of the resource action.

" + } + }, + "documentation":"

Specifies a resource action.

", + "union":true + }, + "ResourceActionSummary":{ + "type":"structure", + "members":{ + "cloudFormation":{ + "shape":"CloudFormationStepSummary", + "documentation":"

The CloudFormation template of the resource action summary.

" + }, + "m2ManagedApplication":{ + "shape":"M2ManagedApplicationStepSummary", + "documentation":"

The AWS Mainframe Modernization managed application of the resource action summary.

" + }, + "m2NonManagedApplication":{ + "shape":"M2NonManagedApplicationStepSummary", + "documentation":"

The AWS Mainframe Modernization non-managed application of the resource action summary.

" + } + }, + "documentation":"

Specifies the resource action summary.

", + "union":true + }, + "ResourceDescription":{ + "type":"string", + "max":1000, + "min":0 + }, + "ResourceList":{ + "type":"list", + "member":{"shape":"Resource"}, + "max":20, + "min":1 + }, + "ResourceName":{ + "type":"string", + "pattern":"[A-Za-z][A-Za-z0-9_\\-]{1,59}" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The resource ID of the resource not found.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

The resource type of the resource not found.

" + } + }, + "documentation":"

The specified resource was not found.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourceType":{ + "type":"structure", + "members":{ + "cloudFormation":{ + "shape":"CloudFormation", + "documentation":"

The CloudFormation template of the resource type.

" + }, + "m2ManagedApplication":{ + "shape":"M2ManagedApplication", + "documentation":"

The AWS Mainframe Modernization managed application of the resource type.

" + }, + "m2NonManagedApplication":{ + "shape":"M2NonManagedApplication", + "documentation":"

The AWS Mainframe Modernization non-managed application of the resource type.

" + } + }, + "documentation":"

Specifies the resource type.

", + "union":true + }, + "S3Uri":{ + "type":"string", + "max":1024, + "min":0 + }, + "Script":{ + "type":"structure", + "required":[ + "scriptLocation", + "type" + ], + "members":{ + "scriptLocation":{ + "shape":"S3Uri", + "documentation":"

The script location of the scripts.

" + }, + "type":{ + "shape":"ScriptType", + "documentation":"

The type of the scripts.

" + } + }, + "documentation":"

Specifies the script.

" + }, + "ScriptSummary":{ + "type":"structure", + "required":[ + "scriptLocation", + "type" + ], + "members":{ + "scriptLocation":{ + "shape":"S3Uri", + "documentation":"

The script location of the script summary.

" + }, + "type":{ + "shape":"ScriptType", + "documentation":"

The type of the script summary.

" + } + }, + "documentation":"

Specifies the scripts summary.

" + }, + "ScriptType":{ + "type":"string", + "enum":["Selenium"] + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The resource ID of AWS Application Testing that exceeded the limit.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

The resource type of AWS Application Testing that exceeded the limit.

" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

The service code of AWS Application Testing that exceeded the limit.

" + }, + "quotaCode":{ + "shape":"String", + "documentation":"

The quote codes of AWS Application Testing that exceeded the limit.

" + } + }, + "documentation":"

One or more quotas for AWS Application Testing exceeds the limit.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "ServiceSettings":{ + "type":"structure", + "members":{ + "kmsKeyId":{ + "shape":"String", + "documentation":"

The KMS key ID of the service settings.

" + } + }, + "documentation":"

Specifies the service settings.

" + }, + "SourceDatabase":{ + "type":"string", + "enum":["z/OS-DB2"] + }, + "SourceDatabaseMetadata":{ + "type":"structure", + "required":[ + "type", + "captureTool" + ], + "members":{ + "type":{ + "shape":"SourceDatabase", + "documentation":"

The type of the source database metadata.

" + }, + "captureTool":{ + "shape":"CaptureTool", + "documentation":"

The capture tool of the source database metadata.

" + } + }, + "documentation":"

Specifies the source database metadata.

" + }, + "StartTestRunRequest":{ + "type":"structure", + "required":["testSuiteId"], + "members":{ + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The test suite ID of the test run.

" + }, + "testConfigurationId":{ + "shape":"Identifier", + "documentation":"

The configuration ID of the test run.

" + }, + "clientToken":{ + "shape":"IdempotencyTokenString", + "documentation":"

The client token of the test run.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of the test run.

" + } + } + }, + "StartTestRunResponse":{ + "type":"structure", + "required":[ + "testRunId", + "testRunStatus" + ], + "members":{ + "testRunId":{ + "shape":"Identifier", + "documentation":"

The test run ID of the test run.

" + }, + "testRunStatus":{ + "shape":"TestRunStatus", + "documentation":"

The test run status of the test run.

" + } + } + }, + "Step":{ + "type":"structure", + "required":[ + "name", + "action" + ], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the step.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the step.

" + }, + "action":{ + "shape":"StepAction", + "documentation":"

The action of the step.

" + } + }, + "documentation":"

Defines a step.

" + }, + "StepAction":{ + "type":"structure", + "members":{ + "resourceAction":{ + "shape":"ResourceAction", + "documentation":"

The resource action of the step action.

" + }, + "mainframeAction":{ + "shape":"MainframeAction", + "documentation":"

The mainframe action of the step action.

" + }, + "compareAction":{ + "shape":"CompareAction", + "documentation":"

The compare action of the step action.

" + } + }, + "documentation":"

Specifies a step action.

", + "union":true + }, + "StepList":{ + "type":"list", + "member":{"shape":"Step"}, + "max":20, + "min":1 + }, + "StepRunStatus":{ + "type":"string", + "enum":[ + "Success", + "Failed", + "Running" + ] + }, + "StepRunSummary":{ + "type":"structure", + "members":{ + "mainframeAction":{ + "shape":"MainframeActionSummary", + "documentation":"

The mainframe action of the step run summary.

" + }, + "compareAction":{ + "shape":"CompareActionSummary", + "documentation":"

The compare action of the step run summary.

" + }, + "resourceAction":{ + "shape":"ResourceActionSummary", + "documentation":"

The resource action of the step run summary.

" + } + }, + "documentation":"

Defines the step run summary.

", + "union":true + }, + "String":{"type":"string"}, + "String100":{ + "type":"string", + "pattern":"\\S{1,100}" + }, + "String50":{ + "type":"string", + "pattern":"\\S{1,50}" + }, + "TN3270":{ + "type":"structure", + "required":["script"], + "members":{ + "script":{ + "shape":"Script", + "documentation":"

The script of the TN3270 protocol.

" + }, + "exportDataSetNames":{ + "shape":"ExportDataSetNames", + "documentation":"

The data set names of the TN3270 protocol.

" + } + }, + "documentation":"

Specifies the TN3270 protocol.

" + }, + "TN3270StepInput":{ + "type":"structure", + "required":[ + "resource", + "script" + ], + "members":{ + "resource":{ + "shape":"MainframeResourceSummary", + "documentation":"

The resource of the TN3270 step input.

" + }, + "script":{ + "shape":"ScriptSummary", + "documentation":"

The script of the TN3270 step input.

" + }, + "exportDataSetNames":{ + "shape":"ExportDataSetNames", + "documentation":"

The export data set names of the TN3270 step input.

" + }, + "properties":{ + "shape":"MainframeActionProperties", + "documentation":"

The properties of the TN3270 step input.

" + } + }, + "documentation":"

Specifies a TN3270 step input.

" + }, + "TN3270StepOutput":{ + "type":"structure", + "required":["scriptOutputLocation"], + "members":{ + "dataSetExportLocation":{ + "shape":"S3Uri", + "documentation":"

The data set export location of the TN3270 step output.

" + }, + "dmsOutputLocation":{ + "shape":"S3Uri", + "documentation":"

The output location of the TN3270 step output.

" + }, + "dataSetDetails":{ + "shape":"DataSetList", + "documentation":"

The data set details of the TN3270 step output.

" + }, + "scriptOutputLocation":{ + "shape":"S3Uri", + "documentation":"

The script output location of the TN3270 step output.

" + } + }, + "documentation":"

Specifies a TN3270 step output.

" + }, + "TN3270Summary":{ + "type":"structure", + "required":["stepInput"], + "members":{ + "stepInput":{ + "shape":"TN3270StepInput", + "documentation":"

The step input of the TN3270 summary.

" + }, + "stepOutput":{ + "shape":"TN3270StepOutput", + "documentation":"

The step output of the TN3270 summary.

" + } + }, + "documentation":"

Specifies a TN3270 summary.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"(?!aws:).+" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the tag resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of the resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TargetDatabase":{ + "type":"string", + "enum":["PostgreSQL"] + }, + "TargetDatabaseMetadata":{ + "type":"structure", + "required":[ + "type", + "captureTool" + ], + "members":{ + "type":{ + "shape":"TargetDatabase", + "documentation":"

The type of the target database metadata.

" + }, + "captureTool":{ + "shape":"CaptureTool", + "documentation":"

The capture tool of the target database metadata.

" + } + }, + "documentation":"

Specifies a target database metadata.

" + }, + "TestCaseIdList":{ + "type":"list", + "member":{"shape":"Identifier"} + }, + "TestCaseLatestVersion":{ + "type":"structure", + "required":[ + "version", + "status" + ], + "members":{ + "version":{ + "shape":"Version", + "documentation":"

The version of the test case latest version.

" + }, + "status":{ + "shape":"TestCaseLifecycle", + "documentation":"

The status of the test case latest version.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test case latest version.

" + } + }, + "documentation":"

Specifies the latest version of a test case.

" + }, + "TestCaseLifecycle":{ + "type":"string", + "enum":[ + "Active", + "Deleting" + ] + }, + "TestCaseList":{ + "type":"list", + "member":{"shape":"Identifier"} + }, + "TestCaseRunStatus":{ + "type":"string", + "enum":[ + "Success", + "Running", + "Failed" + ] + }, + "TestCaseRunSummary":{ + "type":"structure", + "required":[ + "testCaseId", + "testCaseVersion", + "testRunId", + "status", + "runStartTime" + ], + "members":{ + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The test case id of the test case run summary.

" + }, + "testCaseVersion":{ + "shape":"Version", + "documentation":"

The test case version of the test case run summary.

" + }, + "testRunId":{ + "shape":"Identifier", + "documentation":"

The test run id of the test case run summary.

" + }, + "status":{ + "shape":"TestCaseRunStatus", + "documentation":"

The status of the test case run summary.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test case run summary.

" + }, + "runStartTime":{ + "shape":"Timestamp", + "documentation":"

The run start time of the test case run summary.

" + }, + "runEndTime":{ + "shape":"Timestamp", + "documentation":"

The run end time of the test case run summary.

" + } + }, + "documentation":"

Specifies the test case run summary.

" + }, + "TestCaseRunSummaryList":{ + "type":"list", + "member":{"shape":"TestCaseRunSummary"} + }, + "TestCaseSummary":{ + "type":"structure", + "required":[ + "testCaseId", + "testCaseArn", + "name", + "latestVersion", + "status", + "creationTime", + "lastUpdateTime" + ], + "members":{ + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The test case ID of the test case summary.

" + }, + "testCaseArn":{ + "shape":"Arn", + "documentation":"

The test case Amazon Resource Name (ARN) of the test case summary.

" + }, + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the test case summary.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test case summary.

" + }, + "latestVersion":{ + "shape":"Version", + "documentation":"

The latest version of the test case summary.

" + }, + "status":{ + "shape":"TestCaseLifecycle", + "documentation":"

The status of the test case summary.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of the test case summary.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The last update time of the test case summary.

" + } + }, + "documentation":"

Specifies a test case summary.

" + }, + "TestCaseSummaryList":{ + "type":"list", + "member":{"shape":"TestCaseSummary"} + }, + "TestCases":{ + "type":"structure", + "members":{ + "sequential":{ + "shape":"TestCaseList", + "documentation":"

The sequential of the test case.

" + } + }, + "documentation":"

Specifies test cases.

", + "union":true + }, + "TestConfigurationIdList":{ + "type":"list", + "member":{"shape":"Identifier"} + }, + "TestConfigurationLatestVersion":{ + "type":"structure", + "required":[ + "version", + "status" + ], + "members":{ + "version":{ + "shape":"Version", + "documentation":"

The version of the test configuration latest version.

" + }, + "status":{ + "shape":"TestConfigurationLifecycle", + "documentation":"

The status of the test configuration latest version.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test configuration latest version.

" + } + }, + "documentation":"

Specifies the latest version of the test configuration.

" + }, + "TestConfigurationLifecycle":{ + "type":"string", + "enum":[ + "Active", + "Deleting" + ] + }, + "TestConfigurationList":{ + "type":"list", + "member":{"shape":"TestConfigurationSummary"} + }, + "TestConfigurationSummary":{ + "type":"structure", + "required":[ + "testConfigurationId", + "name", + "latestVersion", + "testConfigurationArn", + "status", + "creationTime", + "lastUpdateTime" + ], + "members":{ + "testConfigurationId":{ + "shape":"Identifier", + "documentation":"

The test configuration ID of the test configuration summary.

" + }, + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the test configuration summary.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test configuration summary.

" + }, + "latestVersion":{ + "shape":"Version", + "documentation":"

The latest version of the test configuration summary.

" + }, + "testConfigurationArn":{ + "shape":"Arn", + "documentation":"

The test configuration ARN of the test configuration summary.

" + }, + "status":{ + "shape":"TestConfigurationLifecycle", + "documentation":"

The status of the test configuration summary.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of the test configuration summary.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The last update time of the test configuration summary.

" + } + }, + "documentation":"

Specifies a test configuration summary.

" + }, + "TestRunIdList":{ + "type":"list", + "member":{"shape":"Identifier"} + }, + "TestRunStatus":{ + "type":"string", + "enum":[ + "Success", + "Running", + "Failed", + "Deleting" + ] + }, + "TestRunStepSummary":{ + "type":"structure", + "required":[ + "stepName", + "testRunId", + "status", + "runStartTime" + ], + "members":{ + "stepName":{ + "shape":"ResourceName", + "documentation":"

The step name of the test run step summary.

" + }, + "testRunId":{ + "shape":"Identifier", + "documentation":"

The test run ID of the test run step summary.

" + }, + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The test case ID of the test run step summary.

" + }, + "testCaseVersion":{ + "shape":"Version", + "documentation":"

The test case version of the test run step summary.

" + }, + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The test suite ID of the test run step summary.

" + }, + "testSuiteVersion":{ + "shape":"Version", + "documentation":"

The test suite version of the test run step summary.

" + }, + "beforeStep":{ + "shape":"Boolean", + "documentation":"

The before step of the test run step summary.

" + }, + "afterStep":{ + "shape":"Boolean", + "documentation":"

The after step of the test run step summary.

" + }, + "status":{ + "shape":"StepRunStatus", + "documentation":"

The status of the test run step summary.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test run step summary.

" + }, + "runStartTime":{ + "shape":"Timestamp", + "documentation":"

The run start time of the test run step summary.

" + }, + "runEndTime":{ + "shape":"Timestamp", + "documentation":"

The run end time of the test run step summary.

" + } + }, + "documentation":"

Specifies a test run step summary.

" + }, + "TestRunStepSummaryList":{ + "type":"list", + "member":{"shape":"TestRunStepSummary"} + }, + "TestRunSummary":{ + "type":"structure", + "required":[ + "testRunId", + "testRunArn", + "testSuiteId", + "testSuiteVersion", + "status", + "runStartTime" + ], + "members":{ + "testRunId":{ + "shape":"Identifier", + "documentation":"

The test run ID of the test run summary.

" + }, + "testRunArn":{ + "shape":"Arn", + "documentation":"

The test run ARN of the test run summary.

" + }, + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The test suite ID of the test run summary.

" + }, + "testSuiteVersion":{ + "shape":"Version", + "documentation":"

The test suite version of the test run summary.

" + }, + "testConfigurationId":{ + "shape":"Identifier", + "documentation":"

The test configuration ID of the test run summary.

" + }, + "testConfigurationVersion":{ + "shape":"Version", + "documentation":"

The test configuration version of the test run summary.

" + }, + "status":{ + "shape":"TestRunStatus", + "documentation":"

The status of the test run summary.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test run summary.

" + }, + "runStartTime":{ + "shape":"Timestamp", + "documentation":"

The run start time of the test run summary.

" + }, + "runEndTime":{ + "shape":"Timestamp", + "documentation":"

The run end time of the test run summary.

" + } + }, + "documentation":"

Specifies a test run summary.

" + }, + "TestRunSummaryList":{ + "type":"list", + "member":{"shape":"TestRunSummary"} + }, + "TestSuiteIdList":{ + "type":"list", + "member":{"shape":"Identifier"} + }, + "TestSuiteLatestVersion":{ + "type":"structure", + "required":[ + "version", + "status" + ], + "members":{ + "version":{ + "shape":"Version", + "documentation":"

The version of the test suite latest version.

" + }, + "status":{ + "shape":"TestSuiteLifecycle", + "documentation":"

The status of the test suite latest version.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test suite latest version.

" + } + }, + "documentation":"

Specifies the latest version of a test suite.

" + }, + "TestSuiteLifecycle":{ + "type":"string", + "enum":[ + "Creating", + "Updating", + "Active", + "Failed", + "Deleting" + ] + }, + "TestSuiteList":{ + "type":"list", + "member":{"shape":"TestSuiteSummary"} + }, + "TestSuiteSummary":{ + "type":"structure", + "required":[ + "testSuiteId", + "name", + "latestVersion", + "testSuiteArn", + "status", + "creationTime", + "lastUpdateTime" + ], + "members":{ + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The test suite ID of the test suite summary.

" + }, + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the test suite summary.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The status reason of the test suite summary.

" + }, + "latestVersion":{ + "shape":"Version", + "documentation":"

The latest version of the test suite summary.

" + }, + "testSuiteArn":{ + "shape":"Arn", + "documentation":"

The test suite Amazon Resource Name (ARN) of the test suite summary.

" + }, + "status":{ + "shape":"TestSuiteLifecycle", + "documentation":"

The status of the test suite summary.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of the test suite summary.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The last update time of the test suite summary.

" + } + }, + "documentation":"

Specifies the test suite summary.

" + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "serviceCode":{ + "shape":"String", + "documentation":"

The service code of requests that exceed the limit.

" + }, + "quotaCode":{ + "shape":"String", + "documentation":"

The quota code of requests that exceed the limit.

" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

The number of seconds to retry after for requests that exceed the limit.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

The number of requests made exceeds the limit.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag keys of the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateTestCaseRequest":{ + "type":"structure", + "required":["testCaseId"], + "members":{ + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The test case ID of the test case.

", + "location":"uri", + "locationName":"testCaseId" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the test case.

" + }, + "steps":{ + "shape":"StepList", + "documentation":"

The steps of the test case.

" + } + } + }, + "UpdateTestCaseResponse":{ + "type":"structure", + "required":[ + "testCaseId", + "testCaseVersion" + ], + "members":{ + "testCaseId":{ + "shape":"Identifier", + "documentation":"

The test case ID of the test case.

" + }, + "testCaseVersion":{ + "shape":"Version", + "documentation":"

The test case version of the test case.

" + } + } + }, + "UpdateTestConfigurationRequest":{ + "type":"structure", + "required":["testConfigurationId"], + "members":{ + "testConfigurationId":{ + "shape":"Identifier", + "documentation":"

The test configuration ID of the test configuration.

", + "location":"uri", + "locationName":"testConfigurationId" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the test configuration.

" + }, + "resources":{ + "shape":"ResourceList", + "documentation":"

The resources of the test configuration.

" + }, + "properties":{ + "shape":"Properties", + "documentation":"

The properties of the test configuration.

" + }, + "serviceSettings":{ + "shape":"ServiceSettings", + "documentation":"

The service settings of the test configuration.

" + } + } + }, + "UpdateTestConfigurationResponse":{ + "type":"structure", + "required":[ + "testConfigurationId", + "testConfigurationVersion" + ], + "members":{ + "testConfigurationId":{ + "shape":"Identifier", + "documentation":"

The configuration ID of the test configuration.

" + }, + "testConfigurationVersion":{ + "shape":"Version", + "documentation":"

The configuration version of the test configuration.

" + } + } + }, + "UpdateTestSuiteRequest":{ + "type":"structure", + "required":["testSuiteId"], + "members":{ + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The test suite ID of the test suite.

", + "location":"uri", + "locationName":"testSuiteId" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the test suite.

" + }, + "beforeSteps":{ + "shape":"StepList", + "documentation":"

The before steps for the test suite.

" + }, + "afterSteps":{ + "shape":"StepList", + "documentation":"

The after steps of the test suite.

" + }, + "testCases":{ + "shape":"TestCases", + "documentation":"

The test cases in the test suite.

" + } + } + }, + "UpdateTestSuiteResponse":{ + "type":"structure", + "required":["testSuiteId"], + "members":{ + "testSuiteId":{ + "shape":"Identifier", + "documentation":"

The test suite ID of the test suite.

" + }, + "testSuiteVersion":{ + "shape":"Version", + "documentation":"

The test suite version of the test suite.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

The reason for the validation exception.

" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

The field list of the validation exception.

" + } + }, + "documentation":"

One or more parameter provided in the request is not valid.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of the validation exception field.

" + }, + "message":{ + "shape":"String", + "documentation":"

The message stating reason for why service validation failed.

" + } + }, + "documentation":"

Specifies a validation exception field.

" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other" + ] + }, + "Variable":{ + "type":"string", + "pattern":"\\S{1,1000}" + }, + "Version":{ + "type":"integer", + "box":true + } + }, + "documentation":"

AWS Mainframe Modernization Application Testing provides tools and resources for automated functional equivalence testing for your migration projects.

" +} diff --git a/botocore/data/apptest/2022-12-06/waiters-2.json b/botocore/data/apptest/2022-12-06/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/apptest/2022-12-06/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/arc-zonal-shift/2022-10-30/service-2.json b/botocore/data/arc-zonal-shift/2022-10-30/service-2.json index c04fb3b03b..4e90fa1451 100644 --- a/botocore/data/arc-zonal-shift/2022-10-30/service-2.json +++ b/botocore/data/arc-zonal-shift/2022-10-30/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"arc-zonal-shift", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS ARC - Zonal Shift", "serviceId":"ARC Zonal Shift", "signatureVersion":"v4", "signingName":"arc-zonal-shift", - "uid":"arc-zonal-shift-2022-10-30" + "uid":"arc-zonal-shift-2022-10-30", + "auth":["aws.auth#sigv4"] }, "operations":{ "CancelZonalShift":{ @@ -48,7 +50,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

A practice run configuration for zonal autoshift is required when you enable zonal autoshift. A practice run configuration includes specifications for blocked dates and blocked time windows, and for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an outcome alarm, to monitor application health during practice runs and, optionally, a blocking alarm, to block practice runs from starting.

For more information, see Considerations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

" + "documentation":"

A practice run configuration for zonal autoshift is required when you enable zonal autoshift. A practice run configuration includes specifications for blocked dates and blocked time windows, and for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an outcome alarm, to monitor application health during practice runs and, optionally, a blocking alarm, to block practice runs from starting.

When a resource has a practice run configuration, Route 53 ARC starts zonal shifts for the resource weekly, to shift traffic for practice runs. Practice runs help you to ensure that shifting away traffic from an Availability Zone during an autoshift is safe for your application.

For more information, see Considerations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

" }, "DeletePracticeRunConfiguration":{ "name":"DeletePracticeRunConfiguration", @@ -70,6 +72,22 @@ "documentation":"

Deletes the practice run configuration for a resource. Before you can delete a practice run configuration for a resource., you must disable zonal autoshift for the resource. Practice runs must be configured for zonal autoshift to be enabled.

", "idempotent":true }, + "GetAutoshiftObserverNotificationStatus":{ + "name":"GetAutoshiftObserverNotificationStatus", + "http":{ + "method":"GET", + "requestUri":"/autoshift-observer-notification", + "responseCode":200 + }, + "input":{"shape":"GetAutoshiftObserverNotificationStatusRequest"}, + "output":{"shape":"GetAutoshiftObserverNotificationStatusResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Returns the status of autoshift observer notification. Autoshift observer notification enables you to be notified, through Amazon EventBridge, when there is an autoshift event for zonal autoshift.

If the status is ENABLED, Route 53 ARC includes all autoshift events when you use the EventBridge pattern Autoshift In Progress. When the status is DISABLED, Route 53 ARC includes only autoshift events for autoshifts when one or more of your resources is included in the autoshift.

For more information, see Notifications for practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller Developer Guide.

" + }, "GetManagedResource":{ "name":"GetManagedResource", "http":{ @@ -103,7 +121,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Returns the active autoshifts for a specified resource.

" + "documentation":"

Returns a list of autoshifts for an Amazon Web Services Region. By default, the call returns only ACTIVE autoshifts. Optionally, you can specify the status parameter to return COMPLETED autoshifts.

" }, "ListManagedResources":{ "name":"ListManagedResources", @@ -137,7 +155,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region. ListZonalShifts returns customer-started zonal shifts, as well as practice run zonal shifts that Route 53 ARC started on your behalf for zonal autoshift.

The ListZonalShifts operation does not list autoshifts. For more information about listing autoshifts, see \">ListAutoshifts.

" + "documentation":"

Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region. ListZonalShifts returns customer-initiated zonal shifts, as well as practice run zonal shifts that Route 53 ARC started on your behalf for zonal autoshift.

The ListZonalShifts operation does not list autoshifts. For more information about listing autoshifts, see \">ListAutoshifts.

" }, "StartZonalShift":{ "name":"StartZonalShift", @@ -158,6 +176,24 @@ ], "documentation":"

You start a zonal shift to temporarily move load balancer traffic away from an Availability Zone in an Amazon Web Services Region, to help your application recover immediately, for example, from a developer's bad code deployment or from an Amazon Web Services infrastructure failure in a single Availability Zone. You can start a zonal shift in Route 53 ARC only for managed resources in your Amazon Web Services account in an Amazon Web Services Region. Resources are automatically registered with Route 53 ARC by Amazon Web Services services.

At this time, you can only start a zonal shift for Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

When you start a zonal shift, traffic for the resource is no longer routed to the Availability Zone. The zonal shift is created immediately in Route 53 ARC. However, it can take a short time, typically up to a few minutes, for existing, in-progress connections in the Availability Zone to complete.

For more information, see Zonal shift in the Amazon Route 53 Application Recovery Controller Developer Guide.

" }, + "UpdateAutoshiftObserverNotificationStatus":{ + "name":"UpdateAutoshiftObserverNotificationStatus", + "http":{ + "method":"PUT", + "requestUri":"/autoshift-observer-notification", + "responseCode":200 + }, + "input":{"shape":"UpdateAutoshiftObserverNotificationStatusRequest"}, + "output":{"shape":"UpdateAutoshiftObserverNotificationStatusResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Update the status of autoshift observer notification. Autoshift observer notification enables you to be notified, through Amazon EventBridge, when there is an autoshift event for zonal autoshift.

If the status is ENABLED, Route 53 ARC includes all autoshift events when you use the EventBridge pattern Autoshift In Progress. When the status is DISABLED, Route 53 ARC includes only autoshift events for autoshifts when one or more of your resources is included in the autoshift.

For more information, see Notifications for practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller Developer Guide.

", + "idempotent":true + }, "UpdatePracticeRunConfiguration":{ "name":"UpdatePracticeRunConfiguration", "http":{ @@ -194,7 +230,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

You can update the zonal autoshift status for a resource, to enable or disable zonal autoshift. When zonal autoshift is ENABLED, Amazon Web Services shifts away resource traffic from an Availability Zone, on your behalf, when Amazon Web Services determines that there's an issue in the Availability Zone that could potentially affect customers.

", + "documentation":"

The zonal autoshift configuration for a resource includes the practice run configuration and the status for running autoshifts, zonal autoshift status. When a resource has a practice run configuation, Route 53 ARC starts weekly zonal shifts for the resource, to shift traffic away from an Availability Zone. Weekly practice runs help you to make sure that your application can continue to operate normally with the loss of one Availability Zone.

You can update the zonal autoshift autoshift status to enable or disable zonal autoshift. When zonal autoshift is ENABLED, you authorize Amazon Web Services to shift away resource traffic for an application from an Availability Zone during events, on your behalf, to help reduce time to recovery. Traffic is also shifted away for the required weekly practice runs.

", "idempotent":true }, "UpdateZonalShift":{ @@ -266,11 +302,11 @@ "members":{ "appliedStatus":{ "shape":"AutoshiftAppliedStatus", - "documentation":"

The appliedStatus field specifies which application traffic shift is in effect for a resource when there is more than one traffic shift active. There can be more than one application traffic shift in progress at the same time - that is, practice run zonal shifts, customer-started zonal shifts, or an autoshift. The appliedStatus field for an autoshift for a resource can have one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift that is currently in effect for the resource has an applied status set to APPLIED.

The overall principle for precedence is that zonal shifts that you start as a customer take precedence autoshifts, which take precedence over practice runs. That is, customer-started zonal shifts > autoshifts > practice run zonal shifts.

For more information, see How zonal autoshift and practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

" + "documentation":"

The appliedStatus field specifies which application traffic shift is in effect for a resource when there is more than one active traffic shift. There can be more than one application traffic shift in progress at the same time - that is, practice run zonal shifts, customer-initiated zonal shifts, or an autoshift. The appliedStatus field for a shift that is in progress for a resource can have one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift that is currently in effect for the resource has an appliedStatus set to APPLIED.

The overall principle for precedence is that zonal shifts that you start as a customer take precedence autoshifts, which take precedence over practice runs. That is, customer-initiated zonal shifts > autoshifts > practice run zonal shifts.

For more information, see How zonal autoshift and practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

" }, "awayFrom":{ "shape":"AvailabilityZone", - "documentation":"

The Availability Zone that traffic is shifted away from for a resource, when Amazon Web Services starts an autoshift. Until the autoshift ends, traffic for the resource is instead directed to other Availability Zones in the Amazon Web Services Region. An autoshift can end for a resource, for example, when Amazon Web Services ends the autoshift for the Availability Zone or when you disable zonal autoshift for the resource.

" + "documentation":"

The Availability Zone (for example, use1-az1) that traffic is shifted away from for a resource, when Amazon Web Services starts an autoshift. Until the autoshift ends, traffic for the resource is instead directed to other Availability Zones in the Amazon Web Services Region. An autoshift can end for a resource, for example, when Amazon Web Services ends the autoshift for the Availability Zone or when you disable zonal autoshift for the resource.

" }, "startTime":{ "shape":"StartTime", @@ -279,6 +315,13 @@ }, "documentation":"

A complex structure that lists an autoshift that is currently active for a managed resource and information about the autoshift.

For more information, see How zonal autoshift and practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

" }, + "AutoshiftObserverNotificationStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "AutoshiftSummaries":{ "type":"list", "member":{"shape":"AutoshiftSummary"} @@ -294,7 +337,7 @@ "members":{ "awayFrom":{ "shape":"AvailabilityZone", - "documentation":"

The Availability Zone that traffic is shifted away from for a resource when Amazon Web Services starts an autoshift. Until the autoshift ends, traffic for the resource is instead directed to other Availability Zones in the Amazon Web Services Region. An autoshift can end for a resource, for example, when Amazon Web Services ends the autoshift for the Availability Zone or when you disable zonal autoshift for the resource.

" + "documentation":"

The Availability Zone (for example, use1-az1) that traffic is shifted away from for a resource when Amazon Web Services starts an autoshift. Until the autoshift ends, traffic for the resource is instead directed to other Availability Zones in the Amazon Web Services Region. An autoshift can end for a resource, for example, when Amazon Web Services ends the autoshift for the Availability Zone or when you disable zonal autoshift for the resource.

" }, "endTime":{ "shape":"ExpiryTime", @@ -404,14 +447,14 @@ "members":{ "alarmIdentifier":{ "shape":"ResourceArn", - "documentation":"

The Amazon Resource Name (ARN) for the Amazon CloudWatch alarm that you specify as a control condition for a practice run.

" + "documentation":"

The Amazon Resource Name (ARN) for an Amazon CloudWatch alarm that you specify as a control condition for a practice run.

" }, "type":{ "shape":"ControlConditionType", - "documentation":"

The type of alarm specified for a practice run. The only valid value is CLOUDWATCH.

" + "documentation":"

The type of alarm specified for a practice run. You can only specify Amazon CloudWatch alarms for practice runs, so the only valid value is CLOUDWATCH.

" } }, - "documentation":"

A control condition is an alarm that you specify for a practice run. When you configure practice runs with zonal autoshift for a resource, you specify Amazon CloudWatch alarms, which you create in CloudWatch to use with the practice run. The alarms that you specify are an outcome alarm, to monitor application health during practice runs and, optionally, a blocking alarm, to block practice runs from starting.

Control condition alarms do not apply for autoshifts.

For more information, see Considerations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

" + "documentation":"

A control condition is an alarm that you specify for a practice run. When you configure practice runs with zonal autoshift for a resource, you specify Amazon CloudWatch alarms, which you create in CloudWatch to use with the practice run. The alarms that you specify are an outcome alarm, to monitor application health during practice runs and, optionally, a blocking alarm, to block practice runs from starting or to interrupt a practice run in progress.

Control condition alarms do not apply for autoshifts.

For more information, see Considerations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

" }, "ControlConditionType":{ "type":"string", @@ -448,7 +491,7 @@ }, "resourceIdentifier":{ "shape":"ResourceIdentifier", - "documentation":"

The identifier of the resource to shift away traffic for when a practice run starts a zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource.

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

" + "documentation":"

The identifier of the resource that Amazon Web Services shifts traffic for with a practice run zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource.

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

" } } }, @@ -475,7 +518,7 @@ }, "zonalAutoshiftStatus":{ "shape":"ZonalAutoshiftStatus", - "documentation":"

The status for zonal autoshift for a resource. When you specify the autoshift status as ENABLED, Amazon Web Services shifts traffic away from shifts away application resource traffic from an Availability Zone, on your behalf, when Amazon Web Services determines that there's an issue in the Availability Zone that could potentially affect customers.

When you enable zonal autoshift, you must also configure practice runs for the resource.

" + "documentation":"

The status for zonal autoshift for a resource. When you specify ENABLED for the autoshift status, Amazon Web Services shifts traffic away from shifts away application resource traffic from an Availability Zone, on your behalf, when internal telemetry indicates that there is an Availability Zone impairment that could potentially impact customers.

When you enable zonal autoshift, you must also configure practice runs for the resource.

" } } }, @@ -520,13 +563,28 @@ "pattern":"^([1-9][0-9]*)(m|h)$" }, "ExpiryTime":{"type":"timestamp"}, + "GetAutoshiftObserverNotificationStatusRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAutoshiftObserverNotificationStatusResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"AutoshiftObserverNotificationStatus", + "documentation":"

The status of autoshift observer notification. If the status is ENABLED, Route 53 ARC includes all autoshift events when you use the Amazon EventBridge pattern Autoshift In Progress. When the status is DISABLED, Route 53 ARC includes only autoshift events for autoshifts when one or more of your resources is included in the autoshift.

" + } + } + }, "GetManagedResourceRequest":{ "type":"structure", "required":["resourceIdentifier"], "members":{ "resourceIdentifier":{ "shape":"ResourceIdentifier", - "documentation":"

The identifier for the resource to shift away traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

", + "documentation":"

The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

", "location":"uri", "locationName":"resourceIdentifier" } @@ -758,7 +816,7 @@ "documentation":"

The outcome alarm for practice runs is an alarm that you specify that ends a practice run when the alarm is in an ALARM state.

" } }, - "documentation":"

A practice run configuration for a resource includes the Amazon CloudWatch alarms that you've specified for a practice run, as well as any blocked dates or blocked windows for the practice run.

You can update or delete a practice run configuration. Before you delete a practice run configuration, you must disable zonal autoshift for the resource. A practice run configuration is required when zonal autoshift is enabled.

" + "documentation":"

A practice run configuration for a resource includes the Amazon CloudWatch alarms that you've specified for a practice run, as well as any blocked dates or blocked windows for the practice run. When a resource has a practice run configuration, Route 53 ARC shifts traffic for the resource weekly for practice runs.

Practice runs are required for zonal autoshift. The zonal shifts that Route 53 ARC starts for practice runs help you to ensure that shifting away traffic from an Availability Zone during an autoshift is safe for your application.

You can update or delete a practice run configuration. Before you delete a practice run configuration, you must disable zonal autoshift for the resource. A practice run configuration is required when zonal autoshift is enabled.

" }, "PracticeRunOutcome":{ "type":"string", @@ -810,7 +868,7 @@ "members":{ "awayFrom":{ "shape":"AvailabilityZone", - "documentation":"

The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

" + "documentation":"

The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

" }, "comment":{ "shape":"ZonalShiftComment", @@ -822,7 +880,7 @@ }, "resourceIdentifier":{ "shape":"ResourceIdentifier", - "documentation":"

The identifier for the resource to shift away traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

" + "documentation":"

The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

" } } }, @@ -839,6 +897,26 @@ }, "exception":true }, + "UpdateAutoshiftObserverNotificationStatusRequest":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"AutoshiftObserverNotificationStatus", + "documentation":"

The status to set for autoshift observer notification. If the status is ENABLED, Route 53 ARC includes all autoshift events when you use the Amazon EventBridge pattern Autoshift In Progress. When the status is DISABLED, Route 53 ARC includes only autoshift events for autoshifts when one or more of your resources is included in the autoshift.

" + } + } + }, + "UpdateAutoshiftObserverNotificationStatusResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"AutoshiftObserverNotificationStatus", + "documentation":"

The status for autoshift observer notification.

" + } + } + }, "UpdatePracticeRunConfigurationRequest":{ "type":"structure", "required":["resourceIdentifier"], @@ -909,7 +987,7 @@ }, "zonalAutoshiftStatus":{ "shape":"ZonalAutoshiftStatus", - "documentation":"

The zonal autoshift status for the resource that you want to update the zonal autoshift configuration for.

" + "documentation":"

The zonal autoshift status for the resource that you want to update the zonal autoshift configuration for. Choose ENABLED to authorize Amazon Web Services to shift away resource traffic for an application from an Availability Zone during events, on your behalf, to help reduce time to recovery.

" } } }, @@ -926,7 +1004,7 @@ }, "zonalAutoshiftStatus":{ "shape":"ZonalAutoshiftStatus", - "documentation":"

The zonal autoshift status for the resource that you updated the zonal autoshift configuration for.

" + "documentation":"

The updated zonal autoshift status for the resource.

" } } }, @@ -1012,7 +1090,7 @@ "members":{ "awayFrom":{ "shape":"AvailabilityZone", - "documentation":"

The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

" + "documentation":"

The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

" }, "comment":{ "shape":"ZonalShiftComment", @@ -1020,11 +1098,11 @@ }, "expiryTime":{ "shape":"ExpiryTime", - "documentation":"

The expiry time (expiration time) for a customer-started zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time.

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

" + "documentation":"

The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time.

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

" }, "resourceIdentifier":{ "shape":"ResourceIdentifier", - "documentation":"

The identifier for the resource to shift away traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

" + "documentation":"

The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

" }, "startTime":{ "shape":"StartTime", @@ -1065,19 +1143,19 @@ "members":{ "appliedStatus":{ "shape":"AppliedStatus", - "documentation":"

The appliedStatus field specifies which application traffic shift is in effect for a resource when there is more than one traffic shift active. There can be more than one application traffic shift in progress at the same time - that is, practice run zonal shifts, customer-started zonal shifts, or an autoshift. The appliedStatus field for an autoshift for a resource can have one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift that is currently in effect for the resource has an applied status set to APPLIED.

The overall principle for precedence is that zonal shifts that you start as a customer take precedence autoshifts, which take precedence over practice runs. That is, customer-started zonal shifts > autoshifts > practice run zonal shifts.

For more information, see How zonal autoshift and practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

" + "documentation":"

The appliedStatus field specifies which application traffic shift is in effect for a resource when there is more than one active traffic shift. There can be more than one application traffic shift in progress at the same time - that is, practice run zonal shifts, customer-initiated zonal shifts, or an autoshift. The appliedStatus field for a shift that is in progress for a resource can have one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift that is currently in effect for the resource has an appliedStatus set to APPLIED.

The overall principle for precedence is that zonal shifts that you start as a customer take precedence autoshifts, which take precedence over practice runs. That is, customer-initiated zonal shifts > autoshifts > practice run zonal shifts.

For more information, see How zonal autoshift and practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

" }, "awayFrom":{ "shape":"AvailabilityZone", - "documentation":"

The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

" + "documentation":"

The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

" }, "comment":{ "shape":"ZonalShiftComment", - "documentation":"

A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. That is, a new comment overwrites any existing comment string.

" + "documentation":"

A comment that you enter for a customer-initiated zonal shift. Only the latest comment is retained; no comment history is maintained. That is, a new comment overwrites any existing comment string.

" }, "expiryTime":{ "shape":"ExpiryTime", - "documentation":"

The expiry time (expiration time) for a customer-started zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time.

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

" + "documentation":"

The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time.

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

" }, "practiceRunOutcome":{ "shape":"PracticeRunOutcome", @@ -1124,7 +1202,7 @@ "members":{ "awayFrom":{ "shape":"AvailabilityZone", - "documentation":"

The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

" + "documentation":"

The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

" }, "comment":{ "shape":"ZonalShiftComment", @@ -1132,7 +1210,7 @@ }, "expiryTime":{ "shape":"ExpiryTime", - "documentation":"

The expiry time (expiration time) for a customer-started zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time.

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

" + "documentation":"

The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time.

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

" }, "practiceRunOutcome":{ "shape":"PracticeRunOutcome", @@ -1155,12 +1233,12 @@ "documentation":"

The identifier of a zonal shift.

" } }, - "documentation":"

Lists information about zonal shifts in Amazon Route 53 Application Recovery Controller, including zonal shifts that you start yourself and zonal shifts that Route 53 ARC starts on your behalf for practice runs with zonal autoshift.

Zonal shifts are temporary, including customer-started zonal shifts and the zonal autoshift practice run zonal shifts that Route 53 ARC starts weekly, on your behalf. A zonal shift that a customer starts can be active for up to three days (72 hours). A practice run zonal shift has a 30 minute duration.

" + "documentation":"

Lists information about zonal shifts in Amazon Route 53 Application Recovery Controller, including zonal shifts that you start yourself and zonal shifts that Route 53 ARC starts on your behalf for practice runs with zonal autoshift.

Zonal shifts are temporary, including customer-initiated zonal shifts and the zonal autoshift practice run zonal shifts that Route 53 ARC starts weekly, on your behalf. A zonal shift that a customer starts can be active for up to three days (72 hours). A practice run zonal shift has a 30 minute duration.

" }, "ZonalShiftsInResource":{ "type":"list", "member":{"shape":"ZonalShiftInResource"} } }, - "documentation":"

Welcome to the Zonal Shift API Reference Guide for Amazon Route 53 Application Recovery Controller (Route 53 ARC).

You can start a zonal shift to move traffic for a load balancer resource away from an Availability Zone to help your application recover quickly from an impairment in an Availability Zone. For example, you can recover your application from a developer's bad code deployment or from an Amazon Web Services infrastructure failure in a single Availability Zone.

You can also configure zonal autoshift for a load balancer resource. Zonal autoshift is a capability in Route 53 ARC where Amazon Web Services shifts away application resource traffic from an Availability Zone, on your behalf, to help reduce your time to recovery during events. Amazon Web Services shifts away traffic for resources that are enabled for zonal autoshift whenever Amazon Web Services determines that there's an issue in the Availability Zone that could potentially affect customers.

To ensure that zonal autoshift is safe for your application, you must also configure practice runs when you enable zonal autoshift for a resource. Practice runs start weekly zonal shifts for a resource, to shift traffic for the resource out of an Availability Zone. Practice runs make sure, on a regular basis, that you have enough capacity in all the Availability Zones in an Amazon Web Services Region for your application to continue to operate normally when traffic for a resource is shifted away from one Availability Zone.

You must prescale resource capacity in all Availability Zones in the Region where your application is deployed, before you configure practice runs or enable zonal autoshift for a resource. You should not rely on scaling on demand when an autoshift or practice run starts.

For more information about using zonal shift and zonal autoshift, see the Amazon Route 53 Application Recovery Controller Developer Guide.

" + "documentation":"

Welcome to the API Reference Guide for zonal shift and zonal autoshift in Amazon Route 53 Application Recovery Controller (Route 53 ARC).

You can start a zonal shift to move traffic for a load balancer resource away from an Availability Zone to help your application recover quickly from an impairment in an Availability Zone. For example, you can recover your application from a developer's bad code deployment or from an Amazon Web Services infrastructure failure in a single Availability Zone.

You can also configure zonal autoshift for supported load balancer resources. Zonal autoshift is a capability in Route 53 ARC where you authorize Amazon Web Services to shift away application resource traffic from an Availability Zone during events, on your behalf, to help reduce your time to recovery. Amazon Web Services starts an autoshift when internal telemetry indicates that there is an Availability Zone impairment that could potentially impact customers.

To help make sure that zonal autoshift is safe for your application, you must also configure practice runs when you enable zonal autoshift for a resource. Practice runs start weekly zonal shifts for a resource, to shift traffic for the resource away from an Availability Zone. Practice runs help you to make sure, on a regular basis, that you have enough capacity in all the Availability Zones in an Amazon Web Services Region for your application to continue to operate normally when traffic for a resource is shifted away from one Availability Zone.

Before you configure practice runs or enable zonal autoshift, we strongly recommend that you prescale your application resource capacity in all Availability Zones in the Region where your application resources are deployed. You should not rely on scaling on demand when an autoshift or practice run starts. Zonal autoshift, including practice runs, works independently, and does not wait for auto scaling actions to complete. Relying on auto scaling, instead of pre-scaling, can result in loss of availability.

If you use auto scaling to handle regular cycles of traffic, we strongly recommend that you configure the minimum capacity of your auto scaling to continue operating normally with the loss of an Availability Zone.

Be aware that Route 53 ARC does not inspect the health of individual resources. Amazon Web Services only starts an autoshift when Amazon Web Services telemetry detects that there is an Availability Zone impairment that could potentially impact customers. In some cases, resources might be shifted away that are not experiencing impact.

For more information about using zonal shift and zonal autoshift, see the Amazon Route 53 Application Recovery Controller Developer Guide.

" } diff --git a/botocore/data/artifact/2018-05-10/service-2.json b/botocore/data/artifact/2018-05-10/service-2.json index 690eb4ed9e..21144648ac 100644 --- a/botocore/data/artifact/2018-05-10/service-2.json +++ b/botocore/data/artifact/2018-05-10/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2018-05-10", + "auth":["aws.auth#sigv4"], "endpointPrefix":"artifact", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Artifact", "serviceId":"Artifact", "signatureVersion":"v4", @@ -345,13 +346,13 @@ "ListReportsResponse":{ "type":"structure", "members":{ - "nextToken":{ - "shape":"NextTokenAttribute", - "documentation":"

Pagination token to request the next page of resources.

" - }, "reports":{ "shape":"ReportsList", "documentation":"

List of report resources.

" + }, + "nextToken":{ + "shape":"NextTokenAttribute", + "documentation":"

Pagination token to request the next page of resources.

" } } }, @@ -359,7 +360,7 @@ "type":"string", "max":1024, "min":1, - "pattern":"^[^<>]*$" + "pattern":"[^<>]*" }, "MaxResultsAttribute":{ "type":"integer", @@ -404,151 +405,155 @@ "ReportDetail":{ "type":"structure", "members":{ - "acceptanceType":{ - "shape":"AcceptanceType", - "documentation":"

Acceptance type for report.

" - }, - "arn":{ - "shape":"LongStringAttribute", - "documentation":"

ARN for the report resource.

" - }, - "category":{ - "shape":"ShortStringAttribute", - "documentation":"

Category for the report resource.

" + "id":{ + "shape":"ReportId", + "documentation":"

Unique resource ID for the report resource.

" }, - "companyName":{ + "name":{ "shape":"ShortStringAttribute", - "documentation":"

Associated company name for the report resource.

" - }, - "createdAt":{ - "shape":"TimestampAttribute", - "documentation":"

Timestamp indicating when the report resource was created.

" - }, - "deletedAt":{ - "shape":"TimestampAttribute", - "documentation":"

Timestamp indicating when the report resource was deleted.

" + "documentation":"

Name for the report resource.

" }, "description":{ "shape":"LongStringAttribute", "documentation":"

Description for the report resource.

" }, - "id":{ - "shape":"ReportId", - "documentation":"

Unique resource ID for the report resource.

" - }, - "lastModifiedAt":{ + "periodStart":{ "shape":"TimestampAttribute", - "documentation":"

Timestamp indicating when the report resource was last modified.

" - }, - "name":{ - "shape":"ShortStringAttribute", - "documentation":"

Name for the report resource.

" + "documentation":"

Timestamp indicating the report resource effective start.

" }, "periodEnd":{ "shape":"TimestampAttribute", "documentation":"

Timestamp indicating the report resource effective end.

" }, - "periodStart":{ + "createdAt":{ "shape":"TimestampAttribute", - "documentation":"

Timestamp indicating the report resource effective start.

" + "documentation":"

Timestamp indicating when the report resource was created.

" }, - "productName":{ - "shape":"ShortStringAttribute", - "documentation":"

Associated product name for the report resource.

" + "lastModifiedAt":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating when the report resource was last modified.

" }, - "sequenceNumber":{ - "shape":"SequenceNumberAttribute", - "documentation":"

Sequence number to enforce optimistic locking.

" + "deletedAt":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating when the report resource was deleted.

" + }, + "state":{ + "shape":"PublishedState", + "documentation":"

Current state of the report resource

" + }, + "arn":{ + "shape":"LongStringAttribute", + "documentation":"

ARN for the report resource.

" }, "series":{ "shape":"ShortStringAttribute", "documentation":"

Series for the report resource.

" }, - "state":{ - "shape":"PublishedState", - "documentation":"

Current state of the report resource

" + "category":{ + "shape":"ShortStringAttribute", + "documentation":"

Category for the report resource.

" }, - "statusMessage":{ - "shape":"StatusMessage", - "documentation":"

The message associated with the current upload state.

" + "companyName":{ + "shape":"ShortStringAttribute", + "documentation":"

Associated company name for the report resource.

" + }, + "productName":{ + "shape":"ShortStringAttribute", + "documentation":"

Associated product name for the report resource.

" }, "termArn":{ "shape":"LongStringAttribute", "documentation":"

Unique resource ARN for term resource.

" }, + "version":{ + "shape":"VersionAttribute", + "documentation":"

Version for the report resource.

" + }, + "acceptanceType":{ + "shape":"AcceptanceType", + "documentation":"

Acceptance type for report.

" + }, + "sequenceNumber":{ + "shape":"SequenceNumberAttribute", + "documentation":"

Sequence number to enforce optimistic locking.

" + }, "uploadState":{ "shape":"UploadState", "documentation":"

The current state of the document upload.

" }, - "version":{ - "shape":"VersionAttribute", - "documentation":"

Version for the report resource.

" + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

The message associated with the current upload state.

" } }, "documentation":"

Full detail for report resource metadata.

" }, "ReportId":{ "type":"string", - "pattern":"^report-[a-zA-Z0-9]{16}$" + "pattern":"report-[a-zA-Z0-9]{16}" }, "ReportSummary":{ "type":"structure", "members":{ + "id":{ + "shape":"ReportId", + "documentation":"

Unique resource ID for the report resource.

" + }, + "name":{ + "shape":"ShortStringAttribute", + "documentation":"

Name for the report resource.

" + }, + "state":{ + "shape":"PublishedState", + "documentation":"

Current state of the report resource.

" + }, "arn":{ "shape":"LongStringAttribute", "documentation":"

ARN for the report resource.

" }, - "category":{ - "shape":"ShortStringAttribute", - "documentation":"

Category for the report resource.

" + "version":{ + "shape":"VersionAttribute", + "documentation":"

Version for the report resource.

" }, - "companyName":{ - "shape":"ShortStringAttribute", - "documentation":"

Associated company name for the report resource.

" + "uploadState":{ + "shape":"UploadState", + "documentation":"

The current state of the document upload.

" }, "description":{ "shape":"LongStringAttribute", "documentation":"

Description for the report resource.

" }, - "id":{ - "shape":"ReportId", - "documentation":"

Unique resource ID for the report resource.

" - }, - "name":{ - "shape":"ShortStringAttribute", - "documentation":"

Name for the report resource.

" + "periodStart":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating the report resource effective start.

" }, "periodEnd":{ "shape":"TimestampAttribute", "documentation":"

Timestamp indicating the report resource effective end.

" }, - "periodStart":{ - "shape":"TimestampAttribute", - "documentation":"

Timestamp indicating the report resource effective start.

" + "series":{ + "shape":"ShortStringAttribute", + "documentation":"

Series for the report resource.

" }, - "productName":{ + "category":{ "shape":"ShortStringAttribute", - "documentation":"

Associated product name for the report resource.

" + "documentation":"

Category for the report resource.

" }, - "series":{ + "companyName":{ "shape":"ShortStringAttribute", - "documentation":"

Series for the report resource.

" + "documentation":"

Associated company name for the report resource.

" }, - "state":{ - "shape":"PublishedState", - "documentation":"

Current state of the report resource.

" + "productName":{ + "shape":"ShortStringAttribute", + "documentation":"

Associated product name for the report resource.

" }, "statusMessage":{ "shape":"StatusMessage", "documentation":"

The message associated with the current upload state.

" }, - "uploadState":{ - "shape":"UploadState", - "documentation":"

The current state of the document upload.

" - }, - "version":{ - "shape":"VersionAttribute", - "documentation":"

Version for the report resource.

" + "acceptanceType":{ + "shape":"AcceptanceType", + "documentation":"

Acceptance type for report.

" } }, "documentation":"

Summary for report resource.

" @@ -591,17 +596,13 @@ "type":"structure", "required":[ "message", - "quotaCode", "resourceId", "resourceType", - "serviceCode" + "serviceCode", + "quotaCode" ], "members":{ "message":{"shape":"String"}, - "quotaCode":{ - "shape":"String", - "documentation":"

Code for the affected quota.

" - }, "resourceId":{ "shape":"String", "documentation":"

Identifier of the affected resource.

" @@ -613,6 +614,10 @@ "serviceCode":{ "shape":"String", "documentation":"

Code for the affected service.

" + }, + "quotaCode":{ + "shape":"String", + "documentation":"

Code for the affected quota.

" } }, "documentation":"

Request would cause a service quota to be exceeded.

", @@ -626,7 +631,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9_\\-\\s]*$" + "pattern":"[a-zA-Z0-9_\\-\\s]*" }, "StatusMessage":{"type":"string"}, "String":{"type":"string"}, @@ -635,6 +640,10 @@ "required":["message"], "members":{ "message":{"shape":"String"}, + "serviceCode":{ + "shape":"String", + "documentation":"

Code for the affected service.

" + }, "quotaCode":{ "shape":"String", "documentation":"

Code for the affected quota.

" @@ -644,10 +653,6 @@ "documentation":"

Number of seconds in which the caller can retry the request.

", "location":"header", "locationName":"Retry-After" - }, - "serviceCode":{ - "shape":"String", - "documentation":"

Code for the affected service.

" } }, "documentation":"

Request was denied due to request throttling.

", @@ -678,14 +683,14 @@ "reason" ], "members":{ - "fieldList":{ - "shape":"ValidationExceptionFieldList", - "documentation":"

The field that caused the error, if applicable.

" - }, "message":{"shape":"String"}, "reason":{ "shape":"ValidationExceptionReason", "documentation":"

Reason the request failed validation.

" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

The field that caused the error, if applicable.

" } }, "documentation":"

Request fails to satisfy the constraints specified by an AWS service.

", @@ -698,17 +703,17 @@ "ValidationExceptionField":{ "type":"structure", "required":[ - "message", - "name" + "name", + "message" ], "members":{ - "message":{ - "shape":"String", - "documentation":"

Message describing why the field failed validation.

" - }, "name":{ "shape":"String", "documentation":"

Name of validation exception.

" + }, + "message":{ + "shape":"String", + "documentation":"

Message describing why the field failed validation.

" } }, "documentation":"

Validation exception message and name.

" diff --git a/botocore/data/artifact/2018-05-10/waiters-2.json b/botocore/data/artifact/2018-05-10/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/artifact/2018-05-10/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/athena/2017-05-18/service-2.json b/botocore/data/athena/2017-05-18/service-2.json index 0a70cfe039..66375fe17b 100644 --- a/botocore/data/athena/2017-05-18/service-2.json +++ b/botocore/data/athena/2017-05-18/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"athena", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon Athena", "serviceId":"Athena", "signatureVersion":"v4", "targetPrefix":"AmazonAthena", - "uid":"athena-2017-05-18" + "uid":"athena-2017-05-18", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchGetNamedQuery":{ @@ -462,7 +464,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Returns query execution runtime statistics related to a single execution of a query if you have access to the workgroup in which the query ran. Query execution runtime statistics are returned only when QueryExecutionStatus$State is in a SUCCEEDED or FAILED state. Stage-level input and output row count and data size statistics are not shown when a query has row-level filters defined in Lake Formation.

" + "documentation":"

Returns query execution runtime statistics related to a single execution of a query if you have access to the workgroup in which the query ran. Statistics from the Timeline section of the response object are available as soon as QueryExecutionStatus$State is in a SUCCEEDED or FAILED state. The remaining non-timeline statistics in the response (like stage-level input and output row count and data size) are updated asynchronously and may not be available immediately after a query completes. The non-timeline statistics are also not included when a query has row-level filters defined in Lake Formation.

" }, "GetSession":{ "name":"GetSession", @@ -3453,7 +3455,7 @@ "type":"string", "max":255, "min":1, - "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+" + "pattern":"(?!.*[/:\\\\])[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+" }, "NotebookSessionSummary":{ "type":"structure", @@ -3916,7 +3918,7 @@ "members":{ "OutputLocation":{ "shape":"ResultOutputLocation", - "documentation":"

The location in Amazon S3 where your query and calculation results are stored, such as s3://path/to/query/bucket/. To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration. If none of them is set, Athena issues an error that no output location is provided. For more information, see Working with query results, recent queries, and output files. If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "documentation":"

The location in Amazon S3 where your query and calculation results are stored, such as s3://path/to/query/bucket/. To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration. If none of them is set, Athena issues an error that no output location is provided. If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "EncryptionConfiguration":{ "shape":"EncryptionConfiguration", @@ -3938,7 +3940,7 @@ "members":{ "OutputLocation":{ "shape":"ResultOutputLocation", - "documentation":"

The location in Amazon S3 where your query and calculation results are stored, such as s3://path/to/query/bucket/. For more information, see Working with query results, recent queries, and output files. If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "documentation":"

The location in Amazon S3 where your query and calculation results are stored, such as s3://path/to/query/bucket/. If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "RemoveOutputLocation":{ "shape":"BoxedBoolean", @@ -4850,7 +4852,7 @@ "members":{ "ResultConfiguration":{ "shape":"ResultConfiguration", - "documentation":"

The configuration for the workgroup, which includes the location in Amazon S3 where query and calculation results are stored and the encryption option, if any, used for query and calculation results. To run the query, you must specify the query results location using one of the ways: either in the workgroup using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation. If none of them is set, Athena issues an error that no output location is provided. For more information, see Working with query results, recent queries, and output files.

" + "documentation":"

The configuration for the workgroup, which includes the location in Amazon S3 where query and calculation results are stored and the encryption option, if any, used for query and calculation results. To run the query, you must specify the query results location using one of the ways: either in the workgroup using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation. If none of them is set, Athena issues an error that no output location is provided.

" }, "EnforceWorkGroupConfiguration":{ "shape":"BoxedBoolean", diff --git a/botocore/data/auditmanager/2017-07-25/endpoint-rule-set-1.json b/botocore/data/auditmanager/2017-07-25/endpoint-rule-set-1.json index b38eb1c9a6..3208bdbf6a 100644 --- a/botocore/data/auditmanager/2017-07-25/endpoint-rule-set-1.json +++ b/botocore/data/auditmanager/2017-07-25/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/auditmanager/2017-07-25/service-2.json b/botocore/data/auditmanager/2017-07-25/service-2.json index eb086df6fe..4ad7117fab 100644 --- a/botocore/data/auditmanager/2017-07-25/service-2.json +++ b/botocore/data/auditmanager/2017-07-25/service-2.json @@ -122,7 +122,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, - {"shape":"ServiceQuotaExceededException"} + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} ], "documentation":"

Creates an assessment in Audit Manager.

" }, @@ -567,7 +568,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets a list of all of the Amazon Web Services that you can choose to include in your assessment. When you create an assessment, specify which of these services you want to include to narrow the assessment's scope.

" + "documentation":"

Gets a list of the Amazon Web Services from which Audit Manager can collect evidence.

Audit Manager defines which Amazon Web Services are in scope for an assessment. Audit Manager infers this scope by examining the assessment’s controls and their data sources, and then mapping this information to one or more of the corresponding Amazon Web Services that are in this list.

For information about why it's no longer possible to specify services in scope manually, see I can't edit the services in scope for my assessment in the Troubleshooting section of the Audit Manager user guide.

" }, "GetSettings":{ "name":"GetSettings", @@ -673,7 +674,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Lists the latest analytics data for control domains across all of your active assessments.

A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that control domain.

" + "documentation":"

Lists the latest analytics data for control domains across all of your active assessments.

Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference.

A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that control domain.

" }, "ListControlDomainInsightsByAssessment":{ "name":"ListControlDomainInsightsByAssessment", @@ -689,7 +690,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists analytics data for control domains within a specified active assessment.

A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that domain.

" + "documentation":"

Lists analytics data for control domains within a specified active assessment.

Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference.

A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that domain.

" }, "ListControlInsightsByControlDomain":{ "name":"ListControlInsightsByControlDomain", @@ -735,7 +736,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a list of keywords that are pre-mapped to the specified control data source.

" + "documentation":"

Returns a list of keywords that are pre-mapped to the specified control data source.

" }, "ListNotifications":{ "name":"ListNotifications", @@ -858,7 +859,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} ], "documentation":"

Edits an Audit Manager assessment.

" }, @@ -2073,10 +2075,20 @@ "tags":{ "shape":"TagMap", "documentation":"

The tags associated with the control.

" + }, + "state":{ + "shape":"ControlState", + "documentation":"

The state of the control. The END_OF_SUPPORT state is applicable to standard controls only. This state indicates that the standard control can still be used to collect evidence, but Audit Manager is no longer updating or maintaining that control.

" } }, "documentation":"

A control in Audit Manager.

" }, + "ControlCatalogId":{ + "type":"string", + "max":2048, + "min":13, + "pattern":"^arn:.*:controlcatalog:.*|UNCATEGORIZED" + }, "ControlComment":{ "type":"structure", "members":{ @@ -2108,18 +2120,25 @@ "ControlDescription":{ "type":"string", "max":1000, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true + }, + "ControlDomainId":{ + "type":"string", + "max":2048, + "min":13, + "pattern":"^arn:.*:controlcatalog:.*:.*:domain/.*|UNCATEGORIZED|^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" }, "ControlDomainInsights":{ "type":"structure", "members":{ "name":{ - "shape":"NonEmptyString", + "shape":"String", "documentation":"

The name of the control domain.

" }, "id":{ - "shape":"UUID", - "documentation":"

The unique identifier for the control domain.

" + "shape":"ControlDomainId", + "documentation":"

The unique identifier for the control domain. Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference.

" }, "controlsCountByNoncompliantEvidence":{ "shape":"NullableInteger", @@ -2156,11 +2175,11 @@ "type":"structure", "members":{ "name":{ - "shape":"NonEmptyString", + "shape":"String", "documentation":"

The name of the assessment control.

" }, "id":{ - "shape":"UUID", + "shape":"ControlDomainId", "documentation":"

The unique identifier for the assessment control.

" }, "evidenceInsights":{ @@ -2182,11 +2201,11 @@ "type":"structure", "members":{ "name":{ - "shape":"NonEmptyString", + "shape":"String", "documentation":"

The name of the control.

" }, "id":{ - "shape":"UUID", + "shape":"ControlDomainId", "documentation":"

The unique identifier for the control.

" }, "evidenceInsights":{ @@ -2217,11 +2236,11 @@ }, "sourceSetUpOption":{ "shape":"SourceSetUpOption", - "documentation":"

The setup option for the data source. This option reflects if the evidence collection is automated or manual.

" + "documentation":"

The setup option for the data source. This option reflects if the evidence collection method is automated or manual. If you don’t provide a value for sourceSetUpOption, Audit Manager automatically infers and populates the correct value based on the sourceType that you specify.

" }, "sourceType":{ "shape":"SourceType", - "documentation":"

Specifies one of the five data source types for evidence collection.

" + "documentation":"

Specifies which type of data source is used to collect evidence.

  • The source can be an individual data source type, such as AWS_Cloudtrail, AWS_Config, AWS_Security_Hub, AWS_API_Call, or MANUAL.

  • The source can also be a managed grouping of data sources, such as a Core_Control or a Common_Control.

" }, "sourceKeyword":{"shape":"SourceKeyword"}, "sourceFrequency":{ @@ -2340,6 +2359,13 @@ "min":1, "pattern":"^[a-zA-Z_0-9-\\s.,]+$" }, + "ControlState":{ + "type":"string", + "enum":[ + "ACTIVE", + "END_OF_SUPPORT" + ] + }, "ControlStatus":{ "type":"string", "enum":[ @@ -2352,7 +2378,8 @@ "type":"string", "enum":[ "Standard", - "Custom" + "Custom", + "Core" ] }, "Controls":{ @@ -2527,11 +2554,11 @@ }, "sourceSetUpOption":{ "shape":"SourceSetUpOption", - "documentation":"

The setup option for the data source, which reflects if the evidence collection is automated or manual.

" + "documentation":"

The setup option for the data source. This option reflects if the evidence collection method is automated or manual. If you don’t provide a value for sourceSetUpOption, Audit Manager automatically infers and populates the correct value based on the sourceType that you specify.

" }, "sourceType":{ "shape":"SourceType", - "documentation":"

Specifies one of the five types of data sources for evidence collection.

" + "documentation":"

Specifies which type of data source is used to collect evidence.

  • The source can be an individual data source type, such as AWS_Cloudtrail, AWS_Config, AWS_Security_Hub, AWS_API_Call, or MANUAL.

  • The source can also be a managed grouping of data sources, such as a Core_Control or a Common_Control.

" }, "sourceKeyword":{"shape":"SourceKeyword"}, "sourceFrequency":{ @@ -2543,7 +2570,7 @@ "documentation":"

The instructions for troubleshooting the control.

" } }, - "documentation":"

The control mapping fields that represent the source for evidence collection, along with related parameters and metadata. This doesn't contain mappingID.

" + "documentation":"

The mapping attributes that determine the evidence source for a given control, along with related parameters and metadata. This doesn't contain mappingID.

" }, "CreateControlMappingSources":{ "type":"list", @@ -2632,6 +2659,16 @@ "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$", "sensitive":true }, + "DataSourceType":{ + "type":"string", + "enum":[ + "AWS_Cloudtrail", + "AWS_Config", + "AWS_Security_Hub", + "AWS_API_Call", + "MANUAL" + ] + }, "DefaultExportDestination":{ "type":"structure", "members":{ @@ -3848,7 +3885,7 @@ "type":"string", "max":100, "min":1, - "pattern":"^[a-zA-Z_0-9-\\s().]+$" + "pattern":"^[a-zA-Z_0-9-\\s().:\\/]+$" }, "Keywords":{ "type":"list", @@ -3875,8 +3912,8 @@ ], "members":{ "controlDomainId":{ - "shape":"UUID", - "documentation":"

The unique identifier for the control domain.

", + "shape":"ControlDomainId", + "documentation":"

The unique identifier for the control domain.

Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference.

", "location":"querystring", "locationName":"controlDomainId" }, @@ -4129,8 +4166,8 @@ "required":["controlDomainId"], "members":{ "controlDomainId":{ - "shape":"UUID", - "documentation":"

The unique identifier for the control domain.

", + "shape":"ControlDomainId", + "documentation":"

The unique identifier for the control domain.

Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference.

", "location":"querystring", "locationName":"controlDomainId" }, @@ -4167,21 +4204,27 @@ "members":{ "controlType":{ "shape":"ControlType", - "documentation":"

The type of control, such as a standard control or a custom control.

", + "documentation":"

A filter that narrows the list of controls to a specific type.

", "location":"querystring", "locationName":"controlType" }, "nextToken":{ "shape":"Token", - "documentation":"

The pagination token that's used to fetch the next set of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

Represents the maximum number of results on a page or for an API request call.

", + "documentation":"

The maximum number of results on a page or for an API request call.

", "location":"querystring", "locationName":"maxResults" + }, + "controlCatalogId":{ + "shape":"ControlCatalogId", + "documentation":"

A filter that narrows the list of controls to a specific resource from the Amazon Web Services Control Catalog.

To use this parameter, specify the ARN of the Control Catalog resource. You can specify either a control domain, a control objective, or a common control. For information about how to find the ARNs for these resources, see ListDomains , ListObjectives , and ListCommonControls .

You can only filter by one Control Catalog resource at a time. Specifying multiple resource ARNs isn’t currently supported. If you want to filter by more than one ARN, we recommend that you run the ListControls operation separately for each ARN.

Alternatively, specify UNCATEGORIZED to list controls that aren't mapped to a Control Catalog resource. For example, this operation might return a list of custom controls that don't belong to any control domain or control objective.

", + "location":"querystring", + "locationName":"controlCatalogId" } } }, @@ -4194,7 +4237,7 @@ }, "nextToken":{ "shape":"Token", - "documentation":"

The pagination token that's used to fetch the next set of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" } } }, @@ -4203,8 +4246,8 @@ "required":["source"], "members":{ "source":{ - "shape":"SourceType", - "documentation":"

The control mapping data source that the keywords apply to.

", + "shape":"DataSourceType", + "documentation":"

The control mapping data source that the keywords apply to.

", "location":"querystring", "locationName":"source" }, @@ -4227,7 +4270,7 @@ "members":{ "keywords":{ "shape":"Keywords", - "documentation":"

The list of keywords for the event mapping source.

" + "documentation":"

The list of keywords for the control mapping source.

" }, "nextToken":{ "shape":"Token", @@ -4540,10 +4583,12 @@ }, "awsServices":{ "shape":"AWSServices", - "documentation":"

The Amazon Web Services services that are included in the scope of the assessment.

" + "documentation":"

The Amazon Web Services services that are included in the scope of the assessment.

This API parameter is no longer supported. If you use this parameter to specify one or more Amazon Web Services, Audit Manager ignores this input. Instead, the value for awsServices will show as empty.

", + "deprecated":true, + "deprecatedMessage":"You can't specify services in scope when creating/updating an assessment. If you use the parameter to specify one or more AWS services, Audit Manager ignores the input. Instead the value of the parameter will show as empty indicating that the services are defined and managed by Audit Manager." } }, - "documentation":"

The wrapper that contains the Amazon Web Services accounts and services that are in scope for the assessment.

", + "documentation":"

The wrapper that contains the Amazon Web Services accounts that are in scope for the assessment.

You no longer need to specify which Amazon Web Services are in scope when you create or update an assessment. Audit Manager infers the services in scope by examining your assessment controls and their data sources, and then mapping this information to the relevant Amazon Web Services.

If an underlying data source changes for your assessment, we automatically update the services scope as needed to reflect the correct Amazon Web Services. This ensures that your assessment collects accurate and comprehensive evidence about all of the relevant services in your AWS environment.

", "sensitive":true }, "ServiceMetadata":{ @@ -4701,7 +4746,7 @@ }, "SourceName":{ "type":"string", - "max":100, + "max":300, "min":1 }, "SourceSetUpOption":{ @@ -4718,7 +4763,9 @@ "AWS_Config", "AWS_Security_Hub", "AWS_API_Call", - "MANUAL" + "MANUAL", + "Common_Control", + "Core_Control" ] }, "StartAssessmentFrameworkShareRequest":{ @@ -4826,7 +4873,7 @@ "message":{"shape":"String"} }, "documentation":"

The request was denied due to request throttling.

", - "error":{"httpStatusCode":400}, + "error":{"httpStatusCode":429}, "exception":true }, "Timestamp":{"type":"timestamp"}, diff --git a/botocore/data/autoscaling/2011-01-01/service-2.json b/botocore/data/autoscaling/2011-01-01/service-2.json index 636cd0fe03..423d48a1ef 100644 --- a/botocore/data/autoscaling/2011-01-01/service-2.json +++ b/botocore/data/autoscaling/2011-01-01/service-2.json @@ -4,11 +4,13 @@ "apiVersion":"2011-01-01", "endpointPrefix":"autoscaling", "protocol":"query", + "protocols":["query"], "serviceFullName":"Auto Scaling", "serviceId":"Auto Scaling", "signatureVersion":"v4", "uid":"autoscaling-2011-01-01", - "xmlNamespace":"http://autoscaling.amazonaws.com/doc/2011-01-01/" + "xmlNamespace":"http://autoscaling.amazonaws.com/doc/2011-01-01/", + "auth":["aws.auth#sigv4"] }, "operations":{ "AttachInstances":{ @@ -22,7 +24,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Attaches one or more EC2 instances to the specified Auto Scaling group.

When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.

If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups.

For more information, see Attach EC2 instances to your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Attaches one or more EC2 instances to the specified Auto Scaling group.

When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.

If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups.

For more information, see Detach or attach instances in the Amazon EC2 Auto Scaling User Guide.

" }, "AttachLoadBalancerTargetGroups":{ "name":"AttachLoadBalancerTargetGroups", @@ -170,7 +172,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates a launch configuration.

If you exceed your maximum limit of launch configurations, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Quotas for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

For more information, see Launch configurations in the Amazon EC2 Auto Scaling User Guide.

Amazon EC2 Auto Scaling configures instances launched as part of an Auto Scaling group using either a launch template or a launch configuration. We strongly recommend that you do not use launch configurations. They do not provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. For information about using launch templates, see Launch templates in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Creates a launch configuration.

If you exceed your maximum limit of launch configurations, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Quotas for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

For more information, see Launch configurations in the Amazon EC2 Auto Scaling User Guide.

Amazon EC2 Auto Scaling configures instances launched as part of an Auto Scaling group using either a launch template or a launch configuration. We strongly recommend that you do not use launch configurations. They do not provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. For information about using launch templates, see Launch templates in the Amazon EC2 Auto Scaling User Guide.

" }, "CreateOrUpdateTags":{ "name":"CreateOrUpdateTags", @@ -253,7 +255,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Deletes the specified scaling policy.

Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.

For more information, see Deleting a scaling policy in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Deletes the specified scaling policy.

Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.

For more information, see Delete a scaling policy in the Amazon EC2 Auto Scaling User Guide.

" }, "DeleteScheduledAction":{ "name":"DeleteScheduledAction", @@ -542,7 +544,7 @@ {"shape":"InvalidNextToken"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Gets information about the scaling activities in the account and Region.

When scaling events occur, you see a record of the scaling activity in the scaling activities. For more information, see Verifying a scaling activity for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

If the scaling event succeeds, the value of the StatusCode element in the response is Successful. If an attempt to launch instances failed, the StatusCode value is Failed or Cancelled and the StatusMessage element in the response indicates the cause of the failure. For help interpreting the StatusMessage, see Troubleshooting Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Gets information about the scaling activities in the account and Region.

When scaling events occur, you see a record of the scaling activity in the scaling activities. For more information, see Verify a scaling activity for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

If the scaling event succeeds, the value of the StatusCode element in the response is Successful. If an attempt to launch instances failed, the StatusCode value is Failed or Cancelled and the StatusMessage element in the response indicates the cause of the failure. For help interpreting the StatusMessage, see Troubleshooting Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "DescribeScalingProcessTypes":{ "name":"DescribeScalingProcessTypes", @@ -606,7 +608,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the termination policies supported by Amazon EC2 Auto Scaling.

For more information, see Work with Amazon EC2 Auto Scaling termination policies in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes the termination policies supported by Amazon EC2 Auto Scaling.

For more information, see Configure termination policies for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "DescribeTrafficSources":{ "name":"DescribeTrafficSources", @@ -657,7 +659,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Removes one or more instances from the specified Auto Scaling group.

After the instances are detached, you can manage them independent of the Auto Scaling group.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached.

If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups.

For more information, see Detach EC2 instances from your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Removes one or more instances from the specified Auto Scaling group.

After the instances are detached, you can manage them independent of the Auto Scaling group.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached.

If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups.

For more information, see Detach or attach instances in the Amazon EC2 Auto Scaling User Guide.

" }, "DetachLoadBalancerTargetGroups":{ "name":"DetachLoadBalancerTargetGroups", @@ -821,7 +823,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.

This configuration overwrites any existing configuration.

For more information, see Getting Amazon SNS notifications when your Auto Scaling group scales in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call fails.

" + "documentation":"

Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.

This configuration overwrites any existing configuration.

For more information, see Amazon SNS notification options for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call fails.

" }, "PutScalingPolicy":{ "name":"PutScalingPolicy", @@ -853,7 +855,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates or updates a scheduled scaling action for an Auto Scaling group.

For more information, see Scheduled scaling in the Amazon EC2 Auto Scaling User Guide.

You can view the scheduled actions for an Auto Scaling group using the DescribeScheduledActions API call. If you are no longer using a scheduled action, you can delete it by calling the DeleteScheduledAction API.

If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error message.

" + "documentation":"

Creates or updates a scheduled scaling action for an Auto Scaling group.

For more information, see Scheduled scaling in the Amazon EC2 Auto Scaling User Guide.

You can view the scheduled actions for an Auto Scaling group using the DescribeScheduledActions API call. If you are no longer using a scheduled action, you can delete it by calling the DeleteScheduledAction API.

If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error message.

" }, "PutWarmPool":{ "name":"PutWarmPool", @@ -870,7 +872,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates or updates a warm pool for the specified Auto Scaling group. A warm pool is a pool of pre-initialized EC2 instances that sits alongside the Auto Scaling group. Whenever your application needs to scale out, the Auto Scaling group can draw on the warm pool to meet its new desired capacity. For more information and example configurations, see Warm pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

This operation must be called from the Region in which the Auto Scaling group was created. This operation cannot be called on an Auto Scaling group that has a mixed instances policy or a launch template or launch configuration that requests Spot Instances.

You can view the instances in the warm pool using the DescribeWarmPool API call. If you are no longer using a warm pool, you can delete it by calling the DeleteWarmPool API.

" + "documentation":"

Creates or updates a warm pool for the specified Auto Scaling group. A warm pool is a pool of pre-initialized EC2 instances that sits alongside the Auto Scaling group. Whenever your application needs to scale out, the Auto Scaling group can draw on the warm pool to meet its new desired capacity.

This operation must be called from the Region in which the Auto Scaling group was created.

You can view the instances in the warm pool using the DescribeWarmPool API call. If you are no longer using a warm pool, you can delete it by calling the DeleteWarmPool API.

For more information, see Warm pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "RecordLifecycleActionHeartbeat":{ "name":"RecordLifecycleActionHeartbeat", @@ -899,7 +901,7 @@ {"shape":"ResourceInUseFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Resumes the specified suspended auto scaling processes, or all suspended process, for the specified Auto Scaling group.

For more information, see Suspending and resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Resumes the specified suspended auto scaling processes, or all suspended process, for the specified Auto Scaling group.

For more information, see Suspend and resume Amazon EC2 Auto Scaling processes in the Amazon EC2 Auto Scaling User Guide.

" }, "RollbackInstanceRefresh":{ "name":"RollbackInstanceRefresh", @@ -931,7 +933,7 @@ {"shape":"ScalingActivityInProgressFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Sets the size of the specified Auto Scaling group.

If a scale-in activity occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.

For more information, see Manual scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Sets the size of the specified Auto Scaling group.

If a scale-in activity occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.

For more information, see Manual scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "SetInstanceHealth":{ "name":"SetInstanceHealth", @@ -943,7 +945,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Sets the health status of the specified instance.

For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Sets the health status of the specified instance.

For more information, see Health checks for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" }, "SetInstanceProtection":{ "name":"SetInstanceProtection", @@ -960,7 +962,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Updates the instance protection settings of the specified instances. This operation cannot be called on instances in a warm pool.

For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Using instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails.

" + "documentation":"

Updates the instance protection settings of the specified instances. This operation cannot be called on instances in a warm pool.

For more information, see Use instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails.

" }, "StartInstanceRefresh":{ "name":"StartInstanceRefresh", @@ -991,7 +993,7 @@ {"shape":"ResourceInUseFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Suspends the specified auto scaling processes, or all processes, for the specified Auto Scaling group.

If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly. For more information, see Suspending and resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.

To resume processes that have been suspended, call the ResumeProcesses API.

" + "documentation":"

Suspends the specified auto scaling processes, or all processes, for the specified Auto Scaling group.

If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly. For more information, see Suspend and resume Amazon EC2 Auto Scaling processes in the Amazon EC2 Auto Scaling User Guide.

To resume processes that have been suspended, call the ResumeProcesses API.

" }, "TerminateInstanceInAutoScalingGroup":{ "name":"TerminateInstanceInAutoScalingGroup", @@ -1008,7 +1010,7 @@ {"shape":"ScalingActivityInProgressFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Terminates the specified instance and optionally adjusts the desired group size. This operation cannot be called on instances in a warm pool.

This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated.

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Rebalancing activities in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Terminates the specified instance and optionally adjusts the desired group size. This operation cannot be called on instances in a warm pool.

This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated.

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Manual scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "UpdateAutoScalingGroup":{ "name":"UpdateAutoScalingGroup", @@ -1457,7 +1459,7 @@ "documentation":"

The name of the placement group into which to launch your instances, if any.

" }, "VPCZoneIdentifier":{ - "shape":"XmlStringMaxLen2047", + "shape":"XmlStringMaxLen5000", "documentation":"

One or more subnet IDs, if applicable, separated by commas.

" }, "EnabledMetrics":{ @@ -1604,7 +1606,7 @@ }, "LifecycleState":{ "shape":"XmlStringMaxLen32", - "documentation":"

The lifecycle state for the instance. The Quarantined state is not used. For information about lifecycle states, see Instance lifecycle in the Amazon EC2 Auto Scaling User Guide.

Valid values: Pending | Pending:Wait | Pending:Proceed | Quarantined | InService | Terminating | Terminating:Wait | Terminating:Proceed | Terminated | Detaching | Detached | EnteringStandby | Standby | Warmed:Pending | Warmed:Pending:Wait | Warmed:Pending:Proceed | Warmed:Terminating | Warmed:Terminating:Wait | Warmed:Terminating:Proceed | Warmed:Terminated | Warmed:Stopped | Warmed:Running

" + "documentation":"

The lifecycle state for the instance. The Quarantined state is not used. For more information, see Amazon EC2 Auto Scaling instance lifecycle in the Amazon EC2 Auto Scaling User Guide.

Valid values: Pending | Pending:Wait | Pending:Proceed | Quarantined | InService | Terminating | Terminating:Wait | Terminating:Proceed | Terminated | Detaching | Detached | EnteringStandby | Standby | Warmed:Pending | Warmed:Pending:Wait | Warmed:Pending:Proceed | Warmed:Terminating | Warmed:Terminating:Wait | Warmed:Terminating:Proceed | Warmed:Terminated | Warmed:Stopped | Warmed:Running

" }, "HealthStatus":{ "shape":"XmlStringMaxLen32", @@ -1903,7 +1905,7 @@ }, "LaunchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"

Information used to specify the launch template and version to use to launch instances.

Conditional: You must specify either a launch template (LaunchTemplate or MixedInstancesPolicy) or a launch configuration (LaunchConfigurationName or InstanceId).

The launch template that is specified must be configured for use with an Auto Scaling group. For more information, see Creating a launch template for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Information used to specify the launch template and version to use to launch instances.

Conditional: You must specify either a launch template (LaunchTemplate or MixedInstancesPolicy) or a launch configuration (LaunchConfigurationName or InstanceId).

The launch template that is specified must be configured for use with an Auto Scaling group. For more information, see Create a launch template for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" }, "MixedInstancesPolicy":{ "shape":"MixedInstancesPolicy", @@ -1911,7 +1913,7 @@ }, "InstanceId":{ "shape":"XmlStringMaxLen19", - "documentation":"

The ID of the instance used to base the launch configuration on. If specified, Amazon EC2 Auto Scaling uses the configuration values from the specified instance to create a new launch configuration. To get the instance ID, use the Amazon EC2 DescribeInstances API operation. For more information, see Creating an Auto Scaling group using an EC2 instance in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The ID of the instance used to base the launch configuration on. If specified, Amazon EC2 Auto Scaling uses the configuration values from the specified instance to create a new launch configuration. To get the instance ID, use the Amazon EC2 DescribeInstances API operation. For more information, see Create an Auto Scaling group using parameters from an existing instance in the Amazon EC2 Auto Scaling User Guide.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", @@ -1927,7 +1929,7 @@ }, "DefaultCooldown":{ "shape":"Cooldown", - "documentation":"

Only needed if you use simple scaling policies.

The amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

Default: 300 seconds

" + "documentation":"

Only needed if you use simple scaling policies.

The amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

Default: 300 seconds

" }, "AvailabilityZones":{ "shape":"AvailabilityZones", @@ -1943,7 +1945,7 @@ }, "HealthCheckType":{ "shape":"XmlStringMaxLen32", - "documentation":"

A comma-separated value string of one or more health check types.

The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.

Only specify EC2 if you must clear a value that was previously set.

" + "documentation":"

A comma-separated value string of one or more health check types.

The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

Only specify EC2 if you must clear a value that was previously set.

" }, "HealthCheckGracePeriod":{ "shape":"HealthCheckGracePeriod", @@ -1954,16 +1956,16 @@ "documentation":"

The name of the placement group into which to launch your instances. For more information, see Placement groups in the Amazon EC2 User Guide for Linux Instances.

A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.

" }, "VPCZoneIdentifier":{ - "shape":"XmlStringMaxLen2047", + "shape":"XmlStringMaxLen5000", "documentation":"

A comma-separated list of subnet IDs for a virtual private cloud (VPC) where instances in the Auto Scaling group can be created. If you specify VPCZoneIdentifier with AvailabilityZones, the subnets that you specify must reside in those Availability Zones.

" }, "TerminationPolicies":{ "shape":"TerminationPolicies", - "documentation":"

A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see Work with Amazon EC2 Auto Scaling termination policies in the Amazon EC2 Auto Scaling User Guide.

Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | NewestInstance | OldestInstance | OldestLaunchConfiguration | OldestLaunchTemplate | arn:aws:lambda:region:account-id:function:my-function:my-alias

" + "documentation":"

A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see Configure termination policies for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | NewestInstance | OldestInstance | OldestLaunchConfiguration | OldestLaunchTemplate | arn:aws:lambda:region:account-id:function:my-function:my-alias

" }, "NewInstancesProtectedFromScaleIn":{ "shape":"InstanceProtected", - "documentation":"

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see Using instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see Use instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

" }, "CapacityRebalance":{ "shape":"CapacityRebalanceEnabled", @@ -1983,7 +1985,7 @@ }, "MaxInstanceLifetime":{ "shape":"MaxInstanceLifetime", - "documentation":"

The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). For more information, see Replacing Auto Scaling instances based on maximum instance lifetime in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). For more information, see Replace Auto Scaling instances based on maximum instance lifetime in the Amazon EC2 Auto Scaling User Guide.

" }, "Context":{ "shape":"Context", @@ -1991,7 +1993,7 @@ }, "DesiredCapacityType":{ "shape":"XmlStringMaxLen255", - "documentation":"

The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance type selection only. For more information, see Creating an Auto Scaling group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide.

By default, Amazon EC2 Auto Scaling specifies units, which translates into number of instances.

Valid values: units | vcpu | memory-mib

" + "documentation":"

The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance type selection only. For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide.

By default, Amazon EC2 Auto Scaling specifies units, which translates into number of instances.

Valid values: units | vcpu | memory-mib

" }, "DefaultInstanceWarmup":{ "shape":"DefaultInstanceWarmup", @@ -2017,15 +2019,15 @@ }, "ImageId":{ "shape":"XmlStringMaxLen255", - "documentation":"

The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Finding a Linux AMI in the Amazon EC2 User Guide for Linux Instances.

If you specify InstanceId, an ImageId is not required.

" + "documentation":"

The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Find a Linux AMI in the Amazon EC2 User Guide for Linux Instances.

If you specify InstanceId, an ImageId is not required.

" }, "KeyName":{ "shape":"XmlStringMaxLen255", - "documentation":"

The name of the key pair. For more information, see Amazon EC2 key pairs and Linux instances in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The name of the key pair. For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide for Linux Instances.

" }, "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

A list that contains the security group IDs to assign to the instances in the Auto Scaling group. For more information, see Control traffic to resources using security groups in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

A list that contains the security group IDs to assign to the instances in the Auto Scaling group. For more information, see Control traffic to your Amazon Web Services resources using security groups in the Amazon Virtual Private Cloud User Guide.

" }, "ClassicLinkVPCId":{ "shape":"XmlStringMaxLen255", @@ -2041,7 +2043,7 @@ }, "InstanceId":{ "shape":"XmlStringMaxLen19", - "documentation":"

The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, except for the block device mapping.

To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.

For more information, see Creating a launch configuration using an EC2 instance in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, except for the block device mapping.

To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.

For more information, see Create a launch configuration in the Amazon EC2 Auto Scaling User Guide.

" }, "InstanceType":{ "shape":"XmlStringMaxLen255", @@ -2061,7 +2063,7 @@ }, "InstanceMonitoring":{ "shape":"InstanceMonitoring", - "documentation":"

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

The default value is true (enabled).

When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

The default value is true (enabled).

When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure monitoring for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.

" }, "SpotPrice":{ "shape":"SpotPrice", @@ -2073,19 +2075,19 @@ }, "EbsOptimized":{ "shape":"EbsOptimized", - "documentation":"

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide for Linux Instances.

The default value is false.

" + "documentation":"

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide for Linux Instances.

The default value is false.

" }, "AssociatePublicIpAddress":{ "shape":"AssociatePublicIpAddress", - "documentation":"

Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet.

If you specify true, each instance in the Auto Scaling group receives a unique public IPv4 address. For more information, see Launching Auto Scaling instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify this property, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

" + "documentation":"

Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet.

If you specify true, each instance in the Auto Scaling group receives a unique public IPv4 address. For more information, see Provide network connectivity for your Auto Scaling instances using Amazon VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify this property, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

" }, "PlacementTenancy":{ "shape":"XmlStringMaxLen64", - "documentation":"

The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default), you must set the value of this property to dedicated. For more information, see Configuring instance tenancy with Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

If you specify PlacementTenancy, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

Valid values: default | dedicated

" + "documentation":"

The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default), you must set the value of this property to dedicated.

If you specify PlacementTenancy, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

Valid values: default | dedicated

" }, "MetadataOptions":{ "shape":"InstanceMetadataOptions", - "documentation":"

The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The metadata options for the instances. For more information, see Configure the instance metadata options in the Amazon EC2 Auto Scaling User Guide.

" } } }, @@ -2652,7 +2654,7 @@ "members":{ "LaunchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"

Describes the launch template and the version of the launch template that Amazon EC2 Auto Scaling uses to launch Amazon EC2 instances. For more information about launch templates, see Launch templates in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes the launch template and the version of the launch template that Amazon EC2 Auto Scaling uses to launch Amazon EC2 instances. For more information about launch templates, see Launch templates in the Amazon EC2 Auto Scaling User Guide.

" }, "MixedInstancesPolicy":{ "shape":"MixedInstancesPolicy", @@ -2767,7 +2769,7 @@ }, "Metrics":{ "shape":"Metrics", - "documentation":"

Identifies the metrics to disable.

You can specify one or more of the following metrics:

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

  • GroupInServiceCapacity

  • GroupPendingCapacity

  • GroupStandbyCapacity

  • GroupTerminatingCapacity

  • GroupTotalCapacity

  • WarmPoolDesiredCapacity

  • WarmPoolWarmedCapacity

  • WarmPoolPendingCapacity

  • WarmPoolTerminatingCapacity

  • WarmPoolTotalCapacity

  • GroupAndWarmPoolDesiredCapacity

  • GroupAndWarmPoolTotalCapacity

If you omit this property, all metrics are disabled.

For more information, see Auto Scaling group metrics in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Identifies the metrics to disable.

You can specify one or more of the following metrics:

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

  • GroupInServiceCapacity

  • GroupPendingCapacity

  • GroupStandbyCapacity

  • GroupTerminatingCapacity

  • GroupTotalCapacity

  • WarmPoolDesiredCapacity

  • WarmPoolWarmedCapacity

  • WarmPoolPendingCapacity

  • WarmPoolTerminatingCapacity

  • WarmPoolTotalCapacity

  • GroupAndWarmPoolDesiredCapacity

  • GroupAndWarmPoolTotalCapacity

If you omit this property, all metrics are disabled.

For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" } } }, @@ -2785,7 +2787,7 @@ }, "VolumeType":{ "shape":"BlockDeviceEbsVolumeType", - "documentation":"

The volume type. For more information, see Amazon EBS volume types in the Amazon EC2 User Guide for Linux Instances.

Valid values: standard | io1 | gp2 | st1 | sc1 | gp3

" + "documentation":"

The volume type. For more information, see Amazon EBS volume types in the Amazon EBS User Guide.

Valid values: standard | io1 | gp2 | st1 | sc1 | gp3

" }, "DeleteOnTermination":{ "shape":"BlockDeviceEbsDeleteOnTermination", @@ -2793,11 +2795,11 @@ }, "Iops":{ "shape":"BlockDeviceEbsIops", - "documentation":"

The number of input/output (I/O) operations per second (IOPS) to provision for the volume. For gp3 and io1 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type:

  • gp3: 3,000-16,000 IOPS

  • io1: 100-64,000 IOPS

For io1 volumes, we guarantee 64,000 IOPS only for Instances built on the Nitro System. Other instance families guarantee performance up to 32,000 IOPS.

Iops is supported when the volume type is gp3 or io1 and required only when the volume type is io1. (Not used with standard, gp2, st1, or sc1 volumes.)

" + "documentation":"

The number of input/output (I/O) operations per second (IOPS) to provision for the volume. For gp3 and io1 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type:

  • gp3: 3,000-16,000 IOPS

  • io1: 100-64,000 IOPS

For io1 volumes, we guarantee 64,000 IOPS only for Instances built on the Amazon Web Services Nitro System. Other instance families guarantee performance up to 32,000 IOPS.

Iops is supported when the volume type is gp3 or io1 and required only when the volume type is io1. (Not used with standard, gp2, st1, or sc1 volumes.)

" }, "Encrypted":{ "shape":"BlockDeviceEbsEncrypted", - "documentation":"

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported instance types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.

If you are creating a volume from a snapshot, you cannot create an unencrypted volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using a launch configuration.

If you enable encryption by default, the EBS volumes that you create are always encrypted, either using the Amazon Web Services managed KMS key or a customer-managed KMS key, regardless of whether the snapshot was encrypted.

For more information, see Use Amazon Web Services KMS keys to encrypt Amazon EBS volumes in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Requirements for Amazon EBS encryption in the Amazon EBS User Guide. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.

If you are creating a volume from a snapshot, you cannot create an unencrypted volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using a launch configuration.

If you enable encryption by default, the EBS volumes that you create are always encrypted, either using the Amazon Web Services managed KMS key or a customer-managed KMS key, regardless of whether the snapshot was encrypted.

For more information, see Use Amazon Web Services KMS keys to encrypt Amazon EBS volumes in the Amazon EC2 Auto Scaling User Guide.

" }, "Throughput":{ "shape":"BlockDeviceEbsThroughput", @@ -2820,7 +2822,7 @@ }, "Metrics":{ "shape":"Metrics", - "documentation":"

Identifies the metrics to enable.

You can specify one or more of the following metrics:

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

  • GroupInServiceCapacity

  • GroupPendingCapacity

  • GroupStandbyCapacity

  • GroupTerminatingCapacity

  • GroupTotalCapacity

  • WarmPoolDesiredCapacity

  • WarmPoolWarmedCapacity

  • WarmPoolPendingCapacity

  • WarmPoolTerminatingCapacity

  • WarmPoolTotalCapacity

  • GroupAndWarmPoolDesiredCapacity

  • GroupAndWarmPoolTotalCapacity

If you specify Granularity and don't specify any metrics, all metrics are enabled.

For more information, see Auto Scaling group metrics in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Identifies the metrics to enable.

You can specify one or more of the following metrics:

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

  • GroupInServiceCapacity

  • GroupPendingCapacity

  • GroupStandbyCapacity

  • GroupTerminatingCapacity

  • GroupTotalCapacity

  • WarmPoolDesiredCapacity

  • WarmPoolWarmedCapacity

  • WarmPoolPendingCapacity

  • WarmPoolTerminatingCapacity

  • WarmPoolTotalCapacity

  • GroupAndWarmPoolDesiredCapacity

  • GroupAndWarmPoolTotalCapacity

If you specify Granularity and don't specify any metrics, all metrics are enabled.

For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "Granularity":{ "shape":"XmlStringMaxLen255", @@ -2833,7 +2835,7 @@ "members":{ "Metric":{ "shape":"XmlStringMaxLen255", - "documentation":"

One of the following metrics:

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

  • GroupInServiceCapacity

  • GroupPendingCapacity

  • GroupStandbyCapacity

  • GroupTerminatingCapacity

  • GroupTotalCapacity

  • WarmPoolDesiredCapacity

  • WarmPoolWarmedCapacity

  • WarmPoolPendingCapacity

  • WarmPoolTerminatingCapacity

  • WarmPoolTotalCapacity

  • GroupAndWarmPoolDesiredCapacity

  • GroupAndWarmPoolTotalCapacity

For more information, see Auto Scaling group metrics in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

One of the following metrics:

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

  • GroupInServiceCapacity

  • GroupPendingCapacity

  • GroupStandbyCapacity

  • GroupTerminatingCapacity

  • GroupTotalCapacity

  • WarmPoolDesiredCapacity

  • WarmPoolWarmedCapacity

  • WarmPoolPendingCapacity

  • WarmPoolTerminatingCapacity

  • WarmPoolTotalCapacity

  • GroupAndWarmPoolDesiredCapacity

  • GroupAndWarmPoolTotalCapacity

For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "Granularity":{ "shape":"XmlStringMaxLen255", @@ -2902,7 +2904,7 @@ }, "HonorCooldown":{ "shape":"HonorCooldown", - "documentation":"

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing the policy.

Valid only if the policy type is SimpleScaling. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing the policy.

Valid only if the policy type is SimpleScaling. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "MetricValue":{ "shape":"MetricScale", @@ -3057,7 +3059,7 @@ }, "LifecycleState":{ "shape":"LifecycleState", - "documentation":"

A description of the current lifecycle state. The Quarantined state is not used. For information about lifecycle states, see Instance lifecycle in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

A description of the current lifecycle state. The Quarantined state is not used. For more information, see Amazon EC2 Auto Scaling instance lifecycle in the Amazon EC2 Auto Scaling User Guide.

" }, "HealthStatus":{ "shape":"XmlStringMaxLen32", @@ -3146,7 +3148,7 @@ "documentation":"

This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled.

If you specify a value of disabled, you will not be able to access your instance metadata.

" } }, - "documentation":"

The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The metadata options for the instances. For more information, see Configure the instance metadata options in the Amazon EC2 Auto Scaling User Guide.

" }, "InstanceMonitoring":{ "type":"structure", @@ -3394,7 +3396,7 @@ "documentation":"

The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes.

You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*.

For example, if you specify c5*, Amazon EC2 Auto Scaling will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, Amazon EC2 Auto Scaling will allow all the M5a instance types, but not the M5n instance types.

If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes.

Default: All instance types

" } }, - "documentation":"

The attributes for the instance types for a mixed instances policy. Amazon EC2 Auto Scaling uses your specified requirements to identify instance types. Then, it uses your On-Demand and Spot allocation strategies to launch instances from these instance types.

When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.

To limit the list of instance types from which Amazon EC2 Auto Scaling can identify matching instance types, you can use one of the following parameters, but not both in the same request:

  • AllowedInstanceTypes - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.

  • ExcludedInstanceTypes - The instance types to exclude from the list, even if they match your specified attributes.

You must specify VCpuCount and MemoryMiB. All other attributes are optional. Any unspecified optional attribute is set to its default.

For more information, see Creating an Auto Scaling group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. For help determining which instance types match your attributes before you apply them to your Auto Scaling group, see Preview instance types with specified attributes in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The attributes for the instance types for a mixed instances policy. Amazon EC2 Auto Scaling uses your specified requirements to identify instance types. Then, it uses your On-Demand and Spot allocation strategies to launch instances from these instance types.

When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.

To limit the list of instance types from which Amazon EC2 Auto Scaling can identify matching instance types, you can use one of the following parameters, but not both in the same request:

  • AllowedInstanceTypes - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.

  • ExcludedInstanceTypes - The instance types to exclude from the list, even if they match your specified attributes.

You must specify VCpuCount and MemoryMiB. All other attributes are optional. Any unspecified optional attribute is set to its default.

For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. For help determining which instance types match your attributes before you apply them to your Auto Scaling group, see Preview instance types with specified attributes in the Amazon EC2 User Guide for Linux Instances.

" }, "InstanceReusePolicy":{ "type":"structure", @@ -3516,11 +3518,11 @@ }, "KeyName":{ "shape":"XmlStringMaxLen255", - "documentation":"

The name of the key pair.

For more information, see Amazon EC2 Key Pairs in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The name of the key pair.

For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide for Linux Instances.

" }, "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

A list that contains the security groups to assign to the instances in the Auto Scaling group. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

A list that contains the security groups to assign to the instances in the Auto Scaling group. For more information, see Control traffic to your Amazon Web Services resources using security groups in the Amazon Virtual Private Cloud User Guide.

" }, "ClassicLinkVPCId":{ "shape":"XmlStringMaxLen255", @@ -3548,15 +3550,15 @@ }, "BlockDeviceMappings":{ "shape":"BlockDeviceMappings", - "documentation":"

The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block Device Mapping in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the Amazon EC2 User Guide for Linux Instances.

" }, "InstanceMonitoring":{ "shape":"InstanceMonitoring", - "documentation":"

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

For more information, see Configure monitoring for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.

" }, "SpotPrice":{ "shape":"SpotPrice", - "documentation":"

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Requesting Spot Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Requesting Spot Instances for fault-tolerant and flexible applications in the Amazon EC2 Auto Scaling User Guide.

" }, "IamInstanceProfile":{ "shape":"XmlStringMaxLen1600", @@ -3568,19 +3570,19 @@ }, "EbsOptimized":{ "shape":"EbsOptimized", - "documentation":"

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide for Linux Instances.

" }, "AssociatePublicIpAddress":{ "shape":"AssociatePublicIpAddress", - "documentation":"

Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet. For more information, see Launching Auto Scaling instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet. For more information, see Provide network connectivity for your Auto Scaling instances using Amazon VPC in the Amazon EC2 Auto Scaling User Guide.

" }, "PlacementTenancy":{ "shape":"XmlStringMaxLen64", - "documentation":"

The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.

For more information, see Configuring instance tenancy with Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.

" }, "MetadataOptions":{ "shape":"InstanceMetadataOptions", - "documentation":"

The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The metadata options for the instances. For more information, see Configure the instance metadata options in the Amazon EC2 Auto Scaling User Guide.

" } }, "documentation":"

Describes a launch configuration.

" @@ -3659,11 +3661,11 @@ "members":{ "InstanceType":{ "shape":"XmlStringMaxLen255", - "documentation":"

The instance type, such as m3.xlarge. You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see Instance types in the Amazon Elastic Compute Cloud User Guide.

You can specify up to 40 instance types per Auto Scaling group.

" + "documentation":"

The instance type, such as m3.xlarge. You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see Instance types in the Amazon EC2 User Guide for Linux Instances.

You can specify up to 40 instance types per Auto Scaling group.

" }, "WeightedCapacity":{ "shape":"XmlStringMaxLen32", - "documentation":"

If you provide a list of instance types to use, you can specify the number of capacity units provided by each instance type in terms of virtual CPUs, memory, storage, throughput, or other relative performance characteristic. When a Spot or On-Demand Instance is launched, the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling launches instances until the desired capacity is totally fulfilled, even if this results in an overage. For example, if there are two units remaining to fulfill capacity, and Amazon EC2 Auto Scaling can only launch an instance with a WeightedCapacity of five units, the instance is launched, and the desired capacity is exceeded by three units. For more information, see Configuring instance weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. Value must be in the range of 1–999.

If you specify a value for WeightedCapacity for one instance type, you must specify a value for WeightedCapacity for all of them.

Every Auto Scaling group has three size parameters (DesiredCapacity, MaxSize, and MinSize). Usually, you set these sizes based on a specific number of instances. However, if you configure a mixed instances policy that defines weights for the instance types, you must specify these sizes with the same units that you use for weighting instances.

" + "documentation":"

If you provide a list of instance types to use, you can specify the number of capacity units provided by each instance type in terms of virtual CPUs, memory, storage, throughput, or other relative performance characteristic. When a Spot or On-Demand Instance is launched, the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling launches instances until the desired capacity is totally fulfilled, even if this results in an overage. For example, if there are two units remaining to fulfill capacity, and Amazon EC2 Auto Scaling can only launch an instance with a WeightedCapacity of five units, the instance is launched, and the desired capacity is exceeded by three units. For more information, see Configure an Auto Scaling group to use instance weights in the Amazon EC2 Auto Scaling User Guide. Value must be in the range of 1–999.

If you specify a value for WeightedCapacity for one instance type, you must specify a value for WeightedCapacity for all of them.

Every Auto Scaling group has three size parameters (DesiredCapacity, MaxSize, and MinSize). Usually, you set these sizes based on a specific number of instances. However, if you configure a mixed instances policy that defines weights for the instance types, you must specify these sizes with the same units that you use for weighting instances.

" }, "LaunchTemplateSpecification":{ "shape":"LaunchTemplateSpecification", @@ -3692,7 +3694,7 @@ "documentation":"

The version number, $Latest, or $Default. To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API. If the value is $Latest, Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default, Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default.

" } }, - "documentation":"

Describes the launch template and the version of the launch template that Amazon EC2 Auto Scaling uses to launch Amazon EC2 instances. For more information about launch templates, see Launch templates in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes the launch template and the version of the launch template that Amazon EC2 Auto Scaling uses to launch Amazon EC2 instances. For more information about launch templates, see Launch templates in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleActionResult":{"type":"string"}, "LifecycleActionToken":{ @@ -3780,7 +3782,7 @@ }, "RoleARN":{ "shape":"XmlStringMaxLen255", - "documentation":"

The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see Configure a notification target for a lifecycle hook in the Amazon EC2 Auto Scaling User Guide.

Valid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.

" + "documentation":"

The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see Prepare to add a lifecycle hook to your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

Valid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.

" } }, "documentation":"

Describes information used to specify a lifecycle hook for an Auto Scaling group.

For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.

" @@ -4315,7 +4317,7 @@ }, "MaxCapacityBreachBehavior":{ "shape":"PredictiveScalingMaxCapacityBreachBehavior", - "documentation":"

Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Defaults to HonorMaxCapacity if not specified.

The following are possible values:

  • HonorMaxCapacity - Amazon EC2 Auto Scaling cannot scale out capacity higher than the maximum capacity. The maximum capacity is enforced as a hard limit.

  • IncreaseMaxCapacity - Amazon EC2 Auto Scaling can scale out capacity higher than the maximum capacity when the forecast capacity is close to or exceeds the maximum capacity. The upper limit is determined by the forecasted capacity and the value for MaxCapacityBuffer.

" + "documentation":"

Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Defaults to HonorMaxCapacity if not specified.

The following are possible values:

  • HonorMaxCapacity - Amazon EC2 Auto Scaling can't increase the maximum capacity of the group when the forecast capacity is close to or exceeds the maximum capacity.

  • IncreaseMaxCapacity - Amazon EC2 Auto Scaling can increase the maximum capacity of the group when the forecast capacity is close to or exceeds the maximum capacity. The upper limit is determined by the forecasted capacity and the value for MaxCapacityBuffer.

Use caution when allowing the maximum capacity to be automatically increased. This can lead to more instances being launched than intended if the increased maximum capacity is not monitored and managed. The increased maximum capacity then becomes the new normal maximum capacity for the Auto Scaling group until you manually update it. The maximum capacity does not automatically decrease back to the original maximum.

" }, "MaxCapacityBuffer":{ "shape":"PredictiveScalingMaxCapacityBuffer", @@ -4485,7 +4487,7 @@ "documentation":"

One of the following processes:

  • Launch

  • Terminate

  • AddToLoadBalancer

  • AlarmNotification

  • AZRebalance

  • HealthCheck

  • InstanceRefresh

  • ReplaceUnhealthy

  • ScheduledActions

" } }, - "documentation":"

Describes a process type.

For more information, see Scaling processes in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes a process type.

For more information, see Types of processes in the Amazon EC2 Auto Scaling User Guide.

" }, "Processes":{ "type":"list", @@ -4608,7 +4610,7 @@ }, "Cooldown":{ "shape":"Cooldown", - "documentation":"

A cooldown period, in seconds, that applies to a specific simple scaling policy. When a cooldown period is specified here, it overrides the default cooldown.

Valid only if the policy type is SimpleScaling. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

Default: None

" + "documentation":"

A cooldown period, in seconds, that applies to a specific simple scaling policy. When a cooldown period is specified here, it overrides the default cooldown.

Valid only if the policy type is SimpleScaling. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

Default: None

" }, "MetricAggregationType":{ "shape":"XmlStringMaxLen32", @@ -4628,7 +4630,7 @@ }, "Enabled":{ "shape":"ScalingPolicyEnabled", - "documentation":"

Indicates whether the scaling policy is enabled or disabled. The default is enabled. For more information, see Disabling a scaling policy for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Indicates whether the scaling policy is enabled or disabled. The default is enabled. For more information, see Disable a scaling policy for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" }, "PredictiveScalingConfiguration":{ "shape":"PredictiveScalingConfiguration", @@ -4763,7 +4765,7 @@ }, "CheckpointPercentages":{ "shape":"CheckpointPercentages", - "documentation":"

(Optional) Threshold values for each checkpoint in ascending order. Each number must be unique. To replace all instances in the Auto Scaling group, the last number in the array must be 100.

For usage examples, see Adding checkpoints to an instance refresh in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

(Optional) Threshold values for each checkpoint in ascending order. Each number must be unique. To replace all instances in the Auto Scaling group, the last number in the array must be 100.

For usage examples, see Add checkpoints to an instance refresh in the Amazon EC2 Auto Scaling User Guide.

" }, "CheckpointDelay":{ "shape":"CheckpointDelay", @@ -5185,7 +5187,7 @@ }, "ShouldRespectGracePeriod":{ "shape":"ShouldRespectGracePeriod", - "documentation":"

If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod specified for the group, by default, this call respects the grace period. Set this to False, to have the call not respect the grace period associated with the group.

For more information about the health check grace period, see CreateAutoScalingGroup in the Amazon EC2 Auto Scaling API Reference.

" + "documentation":"

If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod specified for the group, by default, this call respects the grace period. Set this to False, to have the call not respect the grace period associated with the group.

For more information about the health check grace period, see Set the health check grace period for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" } } }, @@ -5299,7 +5301,7 @@ "documentation":"

The reason that the process was suspended.

" } }, - "documentation":"

Describes an auto scaling process that has been suspended.

For more information, see Scaling processes in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes an auto scaling process that has been suspended.

For more information, see Types of processes in the Amazon EC2 Auto Scaling User Guide.

" }, "SuspendedProcesses":{ "type":"list", @@ -5427,7 +5429,7 @@ "required":["Id"], "members":{ "Id":{ - "shape":"XmlStringMaxLen255", + "shape":"XmlStringMaxLen64", "documentation":"

A short name that identifies the object's results in the response. This name must be unique among all TargetTrackingMetricDataQuery objects specified for a single scaling policy. If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscores. The first character must be a lowercase letter.

" }, "Expression":{ @@ -5588,7 +5590,7 @@ }, "DefaultCooldown":{ "shape":"Cooldown", - "documentation":"

Only needed if you use simple scaling policies.

The amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Only needed if you use simple scaling policies.

The amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "AvailabilityZones":{ "shape":"AvailabilityZones", @@ -5596,7 +5598,7 @@ }, "HealthCheckType":{ "shape":"XmlStringMaxLen32", - "documentation":"

A comma-separated value string of one or more health check types.

The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.

Only specify EC2 if you must clear a value that was previously set.

" + "documentation":"

A comma-separated value string of one or more health check types.

The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

Only specify EC2 if you must clear a value that was previously set.

" }, "HealthCheckGracePeriod":{ "shape":"HealthCheckGracePeriod", @@ -5607,16 +5609,16 @@ "documentation":"

The name of an existing placement group into which to launch your instances. For more information, see Placement groups in the Amazon EC2 User Guide for Linux Instances.

A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.

" }, "VPCZoneIdentifier":{ - "shape":"XmlStringMaxLen2047", + "shape":"XmlStringMaxLen5000", "documentation":"

A comma-separated list of subnet IDs for a virtual private cloud (VPC). If you specify VPCZoneIdentifier with AvailabilityZones, the subnets that you specify must reside in those Availability Zones.

" }, "TerminationPolicies":{ "shape":"TerminationPolicies", - "documentation":"

A policy or a list of policies that are used to select the instances to terminate. The policies are executed in the order that you list them. For more information, see Work with Amazon EC2 Auto Scaling termination policies in the Amazon EC2 Auto Scaling User Guide.

Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | NewestInstance | OldestInstance | OldestLaunchConfiguration | OldestLaunchTemplate | arn:aws:lambda:region:account-id:function:my-function:my-alias

" + "documentation":"

A policy or a list of policies that are used to select the instances to terminate. The policies are executed in the order that you list them. For more information, see Configure termination policies for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | NewestInstance | OldestInstance | OldestLaunchConfiguration | OldestLaunchTemplate | arn:aws:lambda:region:account-id:function:my-function:my-alias

" }, "NewInstancesProtectedFromScaleIn":{ "shape":"InstanceProtected", - "documentation":"

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see Using instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see Use instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

" }, "ServiceLinkedRoleARN":{ "shape":"ResourceName", @@ -5636,7 +5638,7 @@ }, "DesiredCapacityType":{ "shape":"XmlStringMaxLen255", - "documentation":"

The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance type selection only. For more information, see Creating an Auto Scaling group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide.

By default, Amazon EC2 Auto Scaling specifies units, which translates into number of instances.

Valid values: units | vcpu | memory-mib

" + "documentation":"

The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance type selection only. For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide.

By default, Amazon EC2 Auto Scaling specifies units, which translates into number of instances.

Valid values: units | vcpu | memory-mib

" }, "DefaultInstanceWarmup":{ "shape":"DefaultInstanceWarmup", @@ -5750,6 +5752,12 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" }, + "XmlStringMaxLen5000":{ + "type":"string", + "max":5000, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, "XmlStringMaxLen511":{ "type":"string", "max":511, @@ -5779,5 +5787,5 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" } }, - "documentation":"Amazon EC2 Auto Scaling

Amazon EC2 Auto Scaling is designed to automatically launch and terminate EC2 instances based on user-defined scaling policies, scheduled actions, and health checks.

For more information, see the Amazon EC2 Auto Scaling User Guide and the Amazon EC2 Auto Scaling API Reference.

" + "documentation":"Amazon EC2 Auto Scaling

Amazon EC2 Auto Scaling is designed to automatically launch and terminate EC2 instances based on user-defined scaling policies, scheduled actions, and health checks.

For more information, see the Amazon EC2 Auto Scaling User Guide and the Amazon EC2 Auto Scaling API Reference.

" } diff --git a/botocore/data/b2bi/2022-06-23/service-2.json b/botocore/data/b2bi/2022-06-23/service-2.json index 7031d351e9..85839cbfba 100644 --- a/botocore/data/b2bi/2022-06-23/service-2.json +++ b/botocore/data/b2bi/2022-06-23/service-2.json @@ -2,9 +2,11 @@ "version":"2.0", "metadata":{ "apiVersion":"2022-06-23", + "auth":["aws.auth#sigv4"], "endpointPrefix":"b2bi", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"AWS B2BI", "serviceFullName":"AWS B2B Data Interchange", "serviceId":"b2bi", @@ -259,6 +261,12 @@ }, "input":{"shape":"ListCapabilitiesRequest"}, "output":{"shape":"ListCapabilitiesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], "documentation":"

Lists the capabilities associated with your Amazon Web Services account for your current or specified region. A trading capability contains the information required to transform incoming EDI documents into JSON or XML outputs.

" }, "ListPartnerships":{ @@ -286,6 +294,12 @@ }, "input":{"shape":"ListProfilesRequest"}, "output":{"shape":"ListProfilesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], "documentation":"

Lists the profiles associated with your Amazon Web Services account for your current or specified region. A profile is the mechanism used to create the concept of a private network.

" }, "ListTagsForResource":{ @@ -311,6 +325,12 @@ }, "input":{"shape":"ListTransformersRequest"}, "output":{"shape":"ListTransformersResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], "documentation":"

Lists the available transformers. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file.

" }, "StartTransformerJob":{ @@ -322,6 +342,7 @@ "input":{"shape":"StartTransformerJobRequest"}, "output":{"shape":"StartTransformerJobResponse"}, "errors":[ + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, @@ -655,7 +676,8 @@ "required":[ "profileId", "name", - "email" + "email", + "capabilities" ], "members":{ "profileId":{ diff --git a/botocore/data/backupstorage/2018-04-10/endpoint-rule-set-1.json b/botocore/data/backupstorage/2018-04-10/endpoint-rule-set-1.json deleted file mode 100644 index de1b329f49..0000000000 --- a/botocore/data/backupstorage/2018-04-10/endpoint-rule-set-1.json +++ /dev/null @@ -1,314 +0,0 @@ -{ - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://backupstorage-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://backupstorage-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://backupstorage.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://backupstorage.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] -} \ No newline at end of file diff --git a/botocore/data/backupstorage/2018-04-10/paginators-1.json b/botocore/data/backupstorage/2018-04-10/paginators-1.json deleted file mode 100644 index ea142457a6..0000000000 --- a/botocore/data/backupstorage/2018-04-10/paginators-1.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "pagination": {} -} diff --git a/botocore/data/backupstorage/2018-04-10/service-2.json b/botocore/data/backupstorage/2018-04-10/service-2.json deleted file mode 100644 index 57a99fa969..0000000000 --- a/botocore/data/backupstorage/2018-04-10/service-2.json +++ /dev/null @@ -1,924 +0,0 @@ -{ - "version":"2.0", - "metadata":{ - "apiVersion":"2018-04-10", - "endpointPrefix":"backupstorage", - "jsonVersion":"1.1", - "protocol":"rest-json", - "serviceFullName":"AWS Backup Storage", - "serviceId":"BackupStorage", - "signatureVersion":"v4", - "signingName":"backup-storage", - "uid":"backupstorage-2018-04-10" - }, - "operations":{ - "DeleteObject":{ - "name":"DeleteObject", - "http":{ - "method":"DELETE", - "requestUri":"/backup-jobs/{jobId}/object/{objectName}" - }, - "input":{"shape":"DeleteObjectInput"}, - "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceInternalException"}, - {"shape":"RetryableException"}, - {"shape":"IllegalArgumentException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, - {"shape":"AccessDeniedException"} - ], - "documentation":"Delete Object from the incremental base Backup." - }, - "GetChunk":{ - "name":"GetChunk", - "http":{ - "method":"GET", - "requestUri":"/restore-jobs/{jobId}/chunk/{chunkToken}" - }, - "input":{"shape":"GetChunkInput"}, - "output":{"shape":"GetChunkOutput"}, - "errors":[ - {"shape":"IllegalArgumentException"}, - {"shape":"RetryableException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceInternalException"}, - {"shape":"ThrottlingException"}, - {"shape":"KMSInvalidKeyUsageException"}, - {"shape":"AccessDeniedException"} - ], - "documentation":"Gets the specified object's chunk." - }, - "GetObjectMetadata":{ - "name":"GetObjectMetadata", - "http":{ - "method":"GET", - "requestUri":"/restore-jobs/{jobId}/object/{objectToken}/metadata" - }, - "input":{"shape":"GetObjectMetadataInput"}, - "output":{"shape":"GetObjectMetadataOutput"}, - "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceInternalException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"RetryableException"}, - {"shape":"IllegalArgumentException"}, - {"shape":"ThrottlingException"}, - {"shape":"KMSInvalidKeyUsageException"}, - {"shape":"AccessDeniedException"} - ], - "documentation":"Get metadata associated with an Object." - }, - "ListChunks":{ - "name":"ListChunks", - "http":{ - "method":"GET", - "requestUri":"/restore-jobs/{jobId}/chunks/{objectToken}/list" - }, - "input":{"shape":"ListChunksInput"}, - "output":{"shape":"ListChunksOutput"}, - "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceInternalException"}, - {"shape":"RetryableException"}, - {"shape":"IllegalArgumentException"}, - {"shape":"AccessDeniedException"} - ], - "documentation":"List chunks in a given Object" - }, - "ListObjects":{ - "name":"ListObjects", - "http":{ - "method":"GET", - "requestUri":"/restore-jobs/{jobId}/objects/list" - }, - "input":{"shape":"ListObjectsInput"}, - "output":{"shape":"ListObjectsOutput"}, - "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceInternalException"}, - {"shape":"RetryableException"}, - {"shape":"IllegalArgumentException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"KMSInvalidKeyUsageException"}, - {"shape":"AccessDeniedException"} - ], - "documentation":"List all Objects in a given Backup." - }, - "NotifyObjectComplete":{ - "name":"NotifyObjectComplete", - "http":{ - "method":"PUT", - "requestUri":"/backup-jobs/{jobId}/object/{uploadId}/complete" - }, - "input":{"shape":"NotifyObjectCompleteInput"}, - "output":{"shape":"NotifyObjectCompleteOutput"}, - "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceInternalException"}, - {"shape":"NotReadableInputStreamException"}, - {"shape":"RetryableException"}, - {"shape":"IllegalArgumentException"}, - {"shape":"ThrottlingException"}, - {"shape":"KMSInvalidKeyUsageException"}, - {"shape":"AccessDeniedException"} - ], - "documentation":"Complete upload", - "authtype":"v4-unsigned-body" - }, - "PutChunk":{ - "name":"PutChunk", - "http":{ - "method":"PUT", - "requestUri":"/backup-jobs/{jobId}/chunk/{uploadId}/{chunkIndex}" - }, - "input":{"shape":"PutChunkInput"}, - "output":{"shape":"PutChunkOutput"}, - "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceInternalException"}, - {"shape":"NotReadableInputStreamException"}, - {"shape":"RetryableException"}, - {"shape":"IllegalArgumentException"}, - {"shape":"ThrottlingException"}, - {"shape":"KMSInvalidKeyUsageException"}, - {"shape":"AccessDeniedException"} - ], - "documentation":"Upload chunk.", - "authtype":"v4-unsigned-body" - }, - "PutObject":{ - "name":"PutObject", - "http":{ - "method":"PUT", - "requestUri":"/backup-jobs/{jobId}/object/{objectName}/put-object" - }, - "input":{"shape":"PutObjectInput"}, - "output":{"shape":"PutObjectOutput"}, - "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceInternalException"}, - {"shape":"NotReadableInputStreamException"}, - {"shape":"RetryableException"}, - {"shape":"IllegalArgumentException"}, - {"shape":"ThrottlingException"}, - {"shape":"KMSInvalidKeyUsageException"}, - {"shape":"AccessDeniedException"} - ], - "documentation":"Upload object that can store object metadata String and data blob in single API call using inline chunk field.", - "authtype":"v4-unsigned-body" - }, - "StartObject":{ - "name":"StartObject", - "http":{ - "method":"PUT", - "requestUri":"/backup-jobs/{jobId}/object/{objectName}" - }, - "input":{"shape":"StartObjectInput"}, - "output":{"shape":"StartObjectOutput"}, - "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceInternalException"}, - {"shape":"RetryableException"}, - {"shape":"IllegalArgumentException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"DataAlreadyExistsException"}, - {"shape":"ThrottlingException"}, - {"shape":"AccessDeniedException"} - ], - "documentation":"Start upload containing one or many chunks." - } - }, - "shapes":{ - "AccessDeniedException":{ - "type":"structure", - "members":{ - "Message":{"shape":"ServiceErrorMessage"} - }, - "error":{"httpStatusCode":403}, - "exception":true, - "synthetic":true - }, - "BackupObject":{ - "type":"structure", - "required":[ - "Name", - "ObjectChecksum", - "ObjectChecksumAlgorithm", - "ObjectToken" - ], - "members":{ - "Name":{ - "shape":"string", - "documentation":"Object name" - }, - "ChunksCount":{ - "shape":"OptionalLong", - "documentation":"Number of chunks in object" - }, - "MetadataString":{ - "shape":"string", - "documentation":"Metadata string associated with the Object" - }, - "ObjectChecksum":{ - "shape":"string", - "documentation":"Object checksum" - }, - "ObjectChecksumAlgorithm":{ - "shape":"SummaryChecksumAlgorithm", - "documentation":"Checksum algorithm" - }, - "ObjectToken":{ - "shape":"string", - "documentation":"Object token" - } - }, - "documentation":"Object" - }, - "Chunk":{ - "type":"structure", - "required":[ - "Index", - "Length", - "Checksum", - "ChecksumAlgorithm", - "ChunkToken" - ], - "members":{ - "Index":{ - "shape":"long", - "documentation":"Chunk index" - }, - "Length":{ - "shape":"long", - "documentation":"Chunk length" - }, - "Checksum":{ - "shape":"string", - "documentation":"Chunk checksum" - }, - "ChecksumAlgorithm":{ - "shape":"DataChecksumAlgorithm", - "documentation":"Checksum algorithm" - }, - "ChunkToken":{ - "shape":"string", - "documentation":"Chunk token" - } - }, - "documentation":"Chunk" - }, - "ChunkList":{ - "type":"list", - "member":{"shape":"Chunk"} - }, - "DataAlreadyExistsException":{ - "type":"structure", - "members":{ - "Message":{"shape":"string"}, - "Checksum":{ - "shape":"string", - "documentation":"Data checksum used" - }, - "ChecksumAlgorithm":{ - "shape":"string", - "documentation":"Checksum algorithm used" - } - }, - "documentation":"Non-retryable exception. Attempted to create already existing object or chunk. This message contains a checksum of already presented data.", - "error":{"httpStatusCode":400}, - "exception":true - }, - "DataChecksumAlgorithm":{ - "type":"string", - "enum":["SHA256"] - }, - "DeleteObjectInput":{ - "type":"structure", - "required":[ - "BackupJobId", - "ObjectName" - ], - "members":{ - "BackupJobId":{ - "shape":"string", - "documentation":"Backup job Id for the in-progress backup.", - "location":"uri", - "locationName":"jobId" - }, - "ObjectName":{ - "shape":"string", - "documentation":"The name of the Object.", - "location":"uri", - "locationName":"objectName" - } - } - }, - "GetChunkInput":{ - "type":"structure", - "required":[ - "StorageJobId", - "ChunkToken" - ], - "members":{ - "StorageJobId":{ - "shape":"string", - "documentation":"Storage job id", - "location":"uri", - "locationName":"jobId" - }, - "ChunkToken":{ - "shape":"string", - "documentation":"Chunk token", - "location":"uri", - "locationName":"chunkToken" - } - } - }, - "GetChunkOutput":{ - "type":"structure", - "required":[ - "Data", - "Length", - "Checksum", - "ChecksumAlgorithm" - ], - "members":{ - "Data":{ - "shape":"PayloadBlob", - "documentation":"Chunk data" - }, - "Length":{ - "shape":"long", - "documentation":"Data length", - "location":"header", - "locationName":"x-amz-data-length" - }, - "Checksum":{ - "shape":"string", - "documentation":"Data checksum", - "location":"header", - "locationName":"x-amz-checksum" - }, - "ChecksumAlgorithm":{ - "shape":"DataChecksumAlgorithm", - "documentation":"Checksum algorithm", - "location":"header", - "locationName":"x-amz-checksum-algorithm" - } - }, - "payload":"Data" - }, - "GetObjectMetadataInput":{ - "type":"structure", - "required":[ - "StorageJobId", - "ObjectToken" - ], - "members":{ - "StorageJobId":{ - "shape":"string", - "documentation":"Backup job id for the in-progress backup.", - "location":"uri", - "locationName":"jobId" - }, - "ObjectToken":{ - "shape":"string", - "documentation":"Object token.", - "location":"uri", - "locationName":"objectToken" - } - } - }, - "GetObjectMetadataOutput":{ - "type":"structure", - "members":{ - "MetadataString":{ - "shape":"string", - "documentation":"Metadata string.", - "location":"header", - "locationName":"x-amz-metadata-string" - }, - "MetadataBlob":{ - "shape":"PayloadBlob", - "documentation":"Metadata blob." - }, - "MetadataBlobLength":{ - "shape":"long", - "documentation":"The size of MetadataBlob.", - "location":"header", - "locationName":"x-amz-data-length" - }, - "MetadataBlobChecksum":{ - "shape":"string", - "documentation":"MetadataBlob checksum.", - "location":"header", - "locationName":"x-amz-checksum" - }, - "MetadataBlobChecksumAlgorithm":{ - "shape":"DataChecksumAlgorithm", - "documentation":"Checksum algorithm.", - "location":"header", - "locationName":"x-amz-checksum-algorithm" - } - }, - "payload":"MetadataBlob" - }, - "IllegalArgumentException":{ - "type":"structure", - "members":{ - "Message":{"shape":"string"} - }, - "documentation":"Non-retryable exception, indicates client error (wrong argument passed to API). See exception message for details.", - "error":{"httpStatusCode":400}, - "exception":true - }, - "KMSInvalidKeyUsageException":{ - "type":"structure", - "members":{ - "Message":{"shape":"string"} - }, - "documentation":"Non-retryable exception. Indicates the KMS key usage is incorrect. See exception message for details.", - "error":{"httpStatusCode":400}, - "exception":true - }, - "ListChunksInput":{ - "type":"structure", - "required":[ - "StorageJobId", - "ObjectToken" - ], - "members":{ - "StorageJobId":{ - "shape":"string", - "documentation":"Storage job id", - "location":"uri", - "locationName":"jobId" - }, - "ObjectToken":{ - "shape":"string", - "documentation":"Object token", - "location":"uri", - "locationName":"objectToken" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"Maximum number of chunks", - "location":"querystring", - "locationName":"max-results" - }, - "NextToken":{ - "shape":"string", - "documentation":"Pagination token", - "location":"querystring", - "locationName":"next-token" - } - } - }, - "ListChunksOutput":{ - "type":"structure", - "required":["ChunkList"], - "members":{ - "ChunkList":{ - "shape":"ChunkList", - "documentation":"List of chunks" - }, - "NextToken":{ - "shape":"string", - "documentation":"Pagination token" - } - } - }, - "ListObjectsInput":{ - "type":"structure", - "required":["StorageJobId"], - "members":{ - "StorageJobId":{ - "shape":"string", - "documentation":"Storage job id", - "location":"uri", - "locationName":"jobId" - }, - "StartingObjectName":{ - "shape":"string", - "documentation":"Optional, specifies the starting Object name to list from. Ignored if NextToken is not NULL", - "location":"querystring", - "locationName":"starting-object-name" - }, - "StartingObjectPrefix":{ - "shape":"string", - "documentation":"Optional, specifies the starting Object prefix to list from. Ignored if NextToken is not NULL", - "location":"querystring", - "locationName":"starting-object-prefix" - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"Maximum objects count", - "location":"querystring", - "locationName":"max-results" - }, - "NextToken":{ - "shape":"string", - "documentation":"Pagination token", - "location":"querystring", - "locationName":"next-token" - }, - "CreatedBefore":{ - "shape":"timestamp", - "documentation":"(Optional) Created before filter", - "location":"querystring", - "locationName":"created-before" - }, - "CreatedAfter":{ - "shape":"timestamp", - "documentation":"(Optional) Created after filter", - "location":"querystring", - "locationName":"created-after" - } - } - }, - "ListObjectsOutput":{ - "type":"structure", - "required":["ObjectList"], - "members":{ - "ObjectList":{ - "shape":"ObjectList", - "documentation":"Object list" - }, - "NextToken":{ - "shape":"string", - "documentation":"Pagination token" - } - } - }, - "MaxResults":{ - "type":"integer", - "max":100, - "min":1 - }, - "MetadataString":{ - "type":"string", - "pattern":"^.{1,256}$" - }, - "NotReadableInputStreamException":{ - "type":"structure", - "members":{ - "Message":{"shape":"string"} - }, - "documentation":"Retryalble exception. Indicated issues while reading an input stream due to the networking issues or connection drop on the client side.", - "error":{"httpStatusCode":400}, - "exception":true - }, - "NotifyObjectCompleteInput":{ - "type":"structure", - "required":[ - "BackupJobId", - "UploadId", - "ObjectChecksum", - "ObjectChecksumAlgorithm" - ], - "members":{ - "BackupJobId":{ - "shape":"string", - "documentation":"Backup job Id for the in-progress backup", - "location":"uri", - "locationName":"jobId" - }, - "UploadId":{ - "shape":"string", - "documentation":"Upload Id for the in-progress upload", - "location":"uri", - "locationName":"uploadId" - }, - "ObjectChecksum":{ - "shape":"string", - "documentation":"Object checksum", - "location":"querystring", - "locationName":"checksum" - }, - "ObjectChecksumAlgorithm":{ - "shape":"SummaryChecksumAlgorithm", - "documentation":"Checksum algorithm", - "location":"querystring", - "locationName":"checksum-algorithm" - }, - "MetadataString":{ - "shape":"MetadataString", - "documentation":"Optional metadata associated with an Object. Maximum string length is 256 bytes.", - "location":"querystring", - "locationName":"metadata-string" - }, - "MetadataBlob":{ - "shape":"PayloadBlob", - "documentation":"Optional metadata associated with an Object. Maximum length is 4MB." - }, - "MetadataBlobLength":{ - "shape":"long", - "documentation":"The size of MetadataBlob.", - "location":"querystring", - "locationName":"metadata-blob-length" - }, - "MetadataBlobChecksum":{ - "shape":"string", - "documentation":"Checksum of MetadataBlob.", - "location":"querystring", - "locationName":"metadata-checksum" - }, - "MetadataBlobChecksumAlgorithm":{ - "shape":"DataChecksumAlgorithm", - "documentation":"Checksum algorithm.", - "location":"querystring", - "locationName":"metadata-checksum-algorithm" - } - }, - "payload":"MetadataBlob" - }, - "NotifyObjectCompleteOutput":{ - "type":"structure", - "required":[ - "ObjectChecksum", - "ObjectChecksumAlgorithm" - ], - "members":{ - "ObjectChecksum":{ - "shape":"string", - "documentation":"Object checksum" - }, - "ObjectChecksumAlgorithm":{ - "shape":"SummaryChecksumAlgorithm", - "documentation":"Checksum algorithm" - } - } - }, - "ObjectList":{ - "type":"list", - "member":{"shape":"BackupObject"} - }, - "OptionalLong":{"type":"long"}, - "PayloadBlob":{ - "type":"blob", - "streaming":true - }, - "PutChunkInput":{ - "type":"structure", - "required":[ - "BackupJobId", - "UploadId", - "ChunkIndex", - "Data", - "Length", - "Checksum", - "ChecksumAlgorithm" - ], - "members":{ - "BackupJobId":{ - "shape":"string", - "documentation":"Backup job Id for the in-progress backup.", - "location":"uri", - "locationName":"jobId" - }, - "UploadId":{ - "shape":"string", - "documentation":"Upload Id for the in-progress upload.", - "location":"uri", - "locationName":"uploadId" - }, - "ChunkIndex":{ - "shape":"long", - "documentation":"Describes this chunk's position relative to the other chunks", - "location":"uri", - "locationName":"chunkIndex" - }, - "Data":{ - "shape":"PayloadBlob", - "documentation":"Data to be uploaded" - }, - "Length":{ - "shape":"long", - "documentation":"Data length", - "location":"querystring", - "locationName":"length" - }, - "Checksum":{ - "shape":"string", - "documentation":"Data checksum", - "location":"querystring", - "locationName":"checksum" - }, - "ChecksumAlgorithm":{ - "shape":"DataChecksumAlgorithm", - "documentation":"Checksum algorithm", - "location":"querystring", - "locationName":"checksum-algorithm" - } - }, - "payload":"Data" - }, - "PutChunkOutput":{ - "type":"structure", - "required":[ - "ChunkChecksum", - "ChunkChecksumAlgorithm" - ], - "members":{ - "ChunkChecksum":{ - "shape":"string", - "documentation":"Chunk checksum" - }, - "ChunkChecksumAlgorithm":{ - "shape":"DataChecksumAlgorithm", - "documentation":"Checksum algorithm" - } - } - }, - "PutObjectInput":{ - "type":"structure", - "required":[ - "BackupJobId", - "ObjectName" - ], - "members":{ - "BackupJobId":{ - "shape":"string", - "documentation":"Backup job Id for the in-progress backup.", - "location":"uri", - "locationName":"jobId" - }, - "ObjectName":{ - "shape":"string", - "documentation":"The name of the Object to be uploaded.", - "location":"uri", - "locationName":"objectName" - }, - "MetadataString":{ - "shape":"string", - "documentation":"Store user defined metadata like backup checksum, disk ids, restore metadata etc.", - "location":"querystring", - "locationName":"metadata-string" - }, - "InlineChunk":{ - "shape":"PayloadBlob", - "documentation":"Inline chunk data to be uploaded." - }, - "InlineChunkLength":{ - "shape":"long", - "documentation":"Length of the inline chunk data.", - "location":"querystring", - "locationName":"length" - }, - "InlineChunkChecksum":{ - "shape":"string", - "documentation":"Inline chunk checksum", - "location":"querystring", - "locationName":"checksum" - }, - "InlineChunkChecksumAlgorithm":{ - "shape":"string", - "documentation":"Inline chunk checksum algorithm", - "location":"querystring", - "locationName":"checksum-algorithm" - }, - "ObjectChecksum":{ - "shape":"string", - "documentation":"object checksum", - "location":"querystring", - "locationName":"object-checksum" - }, - "ObjectChecksumAlgorithm":{ - "shape":"SummaryChecksumAlgorithm", - "documentation":"object checksum algorithm", - "location":"querystring", - "locationName":"object-checksum-algorithm" - }, - "ThrowOnDuplicate":{ - "shape":"boolean", - "documentation":"Throw an exception if Object name is already exist.", - "location":"querystring", - "locationName":"throwOnDuplicate" - } - }, - "payload":"InlineChunk" - }, - "PutObjectOutput":{ - "type":"structure", - "required":[ - "InlineChunkChecksum", - "InlineChunkChecksumAlgorithm", - "ObjectChecksum", - "ObjectChecksumAlgorithm" - ], - "members":{ - "InlineChunkChecksum":{ - "shape":"string", - "documentation":"Inline chunk checksum" - }, - "InlineChunkChecksumAlgorithm":{ - "shape":"DataChecksumAlgorithm", - "documentation":"Inline chunk checksum algorithm" - }, - "ObjectChecksum":{ - "shape":"string", - "documentation":"object checksum" - }, - "ObjectChecksumAlgorithm":{ - "shape":"SummaryChecksumAlgorithm", - "documentation":"object checksum algorithm" - } - } - }, - "ResourceNotFoundException":{ - "type":"structure", - "members":{ - "Message":{"shape":"string"} - }, - "documentation":"Non-retryable exception. Attempted to make an operation on non-existing or expired resource.", - "error":{"httpStatusCode":404}, - "exception":true - }, - "RetryableException":{ - "type":"structure", - "members":{ - "Message":{"shape":"string"} - }, - "documentation":"Retryable exception. In general indicates internal failure that can be fixed by retry.", - "error":{"httpStatusCode":500}, - "exception":true - }, - "ServiceErrorMessage":{"type":"string"}, - "ServiceInternalException":{ - "type":"structure", - "members":{ - "Message":{"shape":"string"} - }, - "documentation":"Deprecated. To be removed from the model.", - "error":{"httpStatusCode":500}, - "exception":true, - "fault":true - }, - "ServiceUnavailableException":{ - "type":"structure", - "members":{ - "Message":{"shape":"string"} - }, - "documentation":"Retryable exception, indicates internal server error.", - "error":{"httpStatusCode":503}, - "exception":true, - "fault":true - }, - "StartObjectInput":{ - "type":"structure", - "required":[ - "BackupJobId", - "ObjectName" - ], - "members":{ - "BackupJobId":{ - "shape":"string", - "documentation":"Backup job Id for the in-progress backup", - "location":"uri", - "locationName":"jobId" - }, - "ObjectName":{ - "shape":"string", - "documentation":"Name for the object.", - "location":"uri", - "locationName":"objectName" - }, - "ThrowOnDuplicate":{ - "shape":"boolean", - "documentation":"Throw an exception if Object name is already exist." - } - } - }, - "StartObjectOutput":{ - "type":"structure", - "required":["UploadId"], - "members":{ - "UploadId":{ - "shape":"string", - "documentation":"Upload Id for a given upload." - } - } - }, - "SummaryChecksumAlgorithm":{ - "type":"string", - "enum":["SUMMARY"] - }, - "ThrottlingException":{ - "type":"structure", - "members":{ - "Message":{"shape":"string"} - }, - "documentation":"Increased rate over throttling limits. Can be retried with exponential backoff.", - "error":{"httpStatusCode":429}, - "exception":true - }, - "boolean":{"type":"boolean"}, - "long":{"type":"long"}, - "string":{"type":"string"}, - "timestamp":{"type":"timestamp"} - }, - "documentation":"The frontend service for Cryo Storage." -} diff --git a/botocore/data/batch/2016-08-10/service-2.json b/botocore/data/batch/2016-08-10/service-2.json index 5eabeadf32..1eac45acda 100644 --- a/botocore/data/batch/2016-08-10/service-2.json +++ b/botocore/data/batch/2016-08-10/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"batch", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"AWS Batch", "serviceFullName":"AWS Batch", "serviceId":"Batch", "signatureVersion":"v4", - "uid":"batch-2016-08-10" + "uid":"batch-2016-08-10", + "auth":["aws.auth#sigv4"] }, "operations":{ "CancelJob":{ @@ -24,7 +26,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Cancels a job in an Batch job queue. Jobs that are in the SUBMITTED or PENDING are canceled. A job inRUNNABLE remains in RUNNABLE until it reaches the head of the job queue. Then the job status is updated to FAILED.

A PENDING job is canceled after all dependency jobs are completed. Therefore, it may take longer than expected to cancel a job in PENDING status.

When you try to cancel an array parent job in PENDING, Batch attempts to cancel all child jobs. The array parent job is canceled when all child jobs are completed.

Jobs that progressed to the STARTING or RUNNING state aren't canceled. However, the API operation still succeeds, even if no job is canceled. These jobs must be terminated with the TerminateJob operation.

" + "documentation":"

Cancels a job in an Batch job queue. Jobs that are in a SUBMITTED, PENDING, or RUNNABLE state are cancelled and the job status is updated to FAILED.

A PENDING job is canceled after all dependency jobs are completed. Therefore, it may take longer than expected to cancel a job in PENDING status.

When you try to cancel an array parent job in PENDING, Batch attempts to cancel all child jobs. The array parent job is canceled when all child jobs are completed.

Jobs that progressed to the STARTING or RUNNING state aren't canceled. However, the API operation still succeeds, even if no job is canceled. These jobs must be terminated with the TerminateJob operation.

" }, "CreateComputeEnvironment":{ "name":"CreateComputeEnvironment", @@ -194,6 +196,20 @@ ], "documentation":"

Describes one or more of your scheduling policies.

" }, + "GetJobQueueSnapshot":{ + "name":"GetJobQueueSnapshot", + "http":{ + "method":"POST", + "requestUri":"/v1/getjobqueuesnapshot" + }, + "input":{"shape":"GetJobQueueSnapshotRequest"}, + "output":{"shape":"GetJobQueueSnapshotResponse"}, + "errors":[ + {"shape":"ClientException"}, + {"shape":"ServerException"} + ], + "documentation":"

Provides a list of the first 100 RUNNABLE jobs associated to a single job queue.

" + }, "ListJobs":{ "name":"ListJobs", "http":{ @@ -665,6 +681,10 @@ "uuid":{ "shape":"String", "documentation":"

Unique identifier for the compute environment.

" + }, + "context":{ + "shape":"String", + "documentation":"

Reserved.

" } }, "documentation":"

An object that represents an Batch compute environment.

" @@ -1160,6 +1180,10 @@ "eksConfiguration":{ "shape":"EksConfiguration", "documentation":"

The details for the Amazon EKS cluster that supports the compute environment.

" + }, + "context":{ + "shape":"String", + "documentation":"

Reserved.

" } }, "documentation":"

Contains the parameters for CreateComputeEnvironment.

" @@ -1751,6 +1775,10 @@ "shape":"EksAttemptContainerDetails", "documentation":"

The details for the init containers.

" }, + "eksClusterArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon EKS cluster.

" + }, "podName":{ "shape":"String", "documentation":"

The name of the pod for this job attempt.

" @@ -2318,6 +2346,57 @@ "documentation":"

The platform configuration for jobs that are running on Fargate resources. Jobs that run on Amazon EC2 resources must not specify this parameter.

" }, "Float":{"type":"float"}, + "FrontOfQueueDetail":{ + "type":"structure", + "members":{ + "jobs":{ + "shape":"FrontOfQueueJobSummaryList", + "documentation":"

The Amazon Resource Names (ARNs) of the first 100 RUNNABLE jobs in a named job queue. For first-in-first-out (FIFO) job queues, jobs are ordered based on their submission time. For fair share scheduling (FSS) job queues, jobs are ordered based on their job priority and share usage.

" + }, + "lastUpdatedAt":{ + "shape":"Long", + "documentation":"

The Unix timestamp (in milliseconds) for when each of the first 100 RUNNABLE jobs were last updated.

" + } + }, + "documentation":"

Contains a list of the first 100 RUNNABLE jobs associated to a single job queue.

" + }, + "FrontOfQueueJobSummary":{ + "type":"structure", + "members":{ + "jobArn":{ + "shape":"String", + "documentation":"

The ARN for a job in a named job queue.

" + }, + "earliestTimeAtPosition":{ + "shape":"Long", + "documentation":"

The Unix timestamp (in milliseconds) for when the job transitioned to its current position in the job queue.

" + } + }, + "documentation":"

An object that represents summary details for the first 100 RUNNABLE jobs in a job queue.

" + }, + "FrontOfQueueJobSummaryList":{ + "type":"list", + "member":{"shape":"FrontOfQueueJobSummary"} + }, + "GetJobQueueSnapshotRequest":{ + "type":"structure", + "required":["jobQueue"], + "members":{ + "jobQueue":{ + "shape":"String", + "documentation":"

The job queue’s name or full queue Amazon Resource Name (ARN).

" + } + } + }, + "GetJobQueueSnapshotResponse":{ + "type":"structure", + "members":{ + "frontOfQueue":{ + "shape":"FrontOfQueueDetail", + "documentation":"

The list of the first 100 RUNNABLE jobs in each job queue. For first-in-first-out (FIFO) job queues, jobs are ordered based on their submission time. For fair share scheduling (FSS) job queues, jobs are ordered based on their job priority and share usage.

" + } + } + }, "Host":{ "type":"structure", "members":{ @@ -2923,7 +3002,7 @@ }, "maxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results returned by ListJobs in paginated output. When this parameter is used, ListJobs only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then ListJobs returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

The maximum number of results returned by ListJobs in a paginated output. When this parameter is used, ListJobs returns up to maxResults results in a single page and a nextToken response element, if applicable. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value.

The following outlines key parameters and limitations:

  • The minimum value is 1.

  • When --job-status is used, Batch returns up to 1000 values.

  • When --filters is used, Batch returns up to 100 values.

  • If neither parameter is used, then ListJobs returns up to 1000 results (jobs that are in the RUNNING status) and a nextToken value, if applicable.

" }, "nextToken":{ "shape":"String", @@ -3194,6 +3273,10 @@ "instanceTypes":{ "shape":"StringList", "documentation":"

An object that contains the instance types that you want to replace for the existing resources of a job.

" + }, + "eksPropertiesOverride":{ + "shape":"EksPropertiesOverride", + "documentation":"

An object that contains the properties that you want to replace for the existing Amazon EKS resources of a job.

" } }, "documentation":"

The object that represents any node overrides to a job definition that's used in a SubmitJob API operation.

" @@ -3225,6 +3308,10 @@ "ecsProperties":{ "shape":"EcsProperties", "documentation":"

This is an object that represents the properties of the node range for a multi-node parallel job.

" + }, + "eksProperties":{ + "shape":"EksProperties", + "documentation":"

This is an object that represents the properties of the node range for a multi-node parallel job.

" } }, "documentation":"

This is an object that represents the properties of the node range for a multi-node parallel job.

" @@ -4003,6 +4090,10 @@ "updatePolicy":{ "shape":"UpdatePolicy", "documentation":"

Specifies the updated infrastructure update policy for the compute environment. For more information about infrastructure updates, see Updating compute environments in the Batch User Guide.

" + }, + "context":{ + "shape":"String", + "documentation":"

Reserved.

" } }, "documentation":"

Contains the parameters for UpdateComputeEnvironment.

" diff --git a/botocore/data/bedrock-agent-runtime/2023-07-26/paginators-1.json b/botocore/data/bedrock-agent-runtime/2023-07-26/paginators-1.json index bd57dfbc67..6d64542de3 100644 --- a/botocore/data/bedrock-agent-runtime/2023-07-26/paginators-1.json +++ b/botocore/data/bedrock-agent-runtime/2023-07-26/paginators-1.json @@ -4,6 +4,12 @@ "input_token": "nextToken", "output_token": "nextToken", "result_key": "retrievalResults" + }, + "GetAgentMemory": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxItems", + "result_key": "memoryContents" } } } diff --git a/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json b/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json index 5e6c792452..22a8d04af2 100644 --- a/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json +++ b/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json @@ -12,6 +12,51 @@ "uid":"bedrock-agent-runtime-2023-07-26" }, "operations":{ + "DeleteAgentMemory":{ + "name":"DeleteAgentMemory", + "http":{ + "method":"DELETE", + "requestUri":"/agents/{agentId}/agentAliases/{agentAliasId}/memories", + "responseCode":202 + }, + "input":{"shape":"DeleteAgentMemoryRequest"}, + "output":{"shape":"DeleteAgentMemoryResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"DependencyFailedException"}, + {"shape":"BadGatewayException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Deletes memory from the specified memory identifier.

", + "idempotent":true + }, + "GetAgentMemory":{ + "name":"GetAgentMemory", + "http":{ + "method":"GET", + "requestUri":"/agents/{agentId}/agentAliases/{agentAliasId}/memories", + "responseCode":200 + }, + "input":{"shape":"GetAgentMemoryRequest"}, + "output":{"shape":"GetAgentMemoryResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"DependencyFailedException"}, + {"shape":"BadGatewayException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Gets the sessions stored in the memory of the agent.

" + }, "InvokeAgent":{ "name":"InvokeAgent", "http":{ @@ -32,7 +77,29 @@ {"shape":"AccessDeniedException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

The CLI doesn't support InvokeAgent.

Sends a prompt for the agent to process and respond to. Note the following fields for the request:

  • To continue the same conversation with an agent, use the same sessionId value in the request.

  • To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.

  • End a conversation by setting endSession to true.

  • In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.

The response is returned in the bytes field of the chunk object.

  • The attribution object contains citations for parts of the response.

  • If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.

  • If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field.

  • Errors are also surfaced in the response.

" + "documentation":"

The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent.

Sends a prompt for the agent to process and respond to. Note the following fields for the request:

  • To continue the same conversation with an agent, use the same sessionId value in the request.

  • To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.

  • End a conversation by setting endSession to true.

  • In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.

The response is returned in the bytes field of the chunk object.

  • The attribution object contains citations for parts of the response.

  • If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.

  • If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field.

  • Errors are also surfaced in the response.

" + }, + "InvokeFlow":{ + "name":"InvokeFlow", + "http":{ + "method":"POST", + "requestUri":"/flows/{flowIdentifier}/aliases/{flowAliasIdentifier}", + "responseCode":200 + }, + "input":{"shape":"InvokeFlowRequest"}, + "output":{"shape":"InvokeFlowResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"DependencyFailedException"}, + {"shape":"BadGatewayException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Invokes an alias of a flow to run the inputs that you specify and return the output of each node as a stream. If there's an error, the error is returned. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeFlow.

" }, "Retrieve":{ "name":"Retrieve", @@ -103,10 +170,18 @@ "shape":"ApiPath", "documentation":"

The path to the API to call, based off the action group.

" }, + "executionType":{ + "shape":"ExecutionType", + "documentation":"

How fulfillment of the action is handled. For more information, see Handling fulfillment of the action.

" + }, "function":{ "shape":"Function", "documentation":"

The function in the action group to call.

" }, + "invocationId":{ + "shape":"String", + "documentation":"

The unique identifier of the invocation. Only returned if the executionType is RETURN_CONTROL.

" + }, "parameters":{ "shape":"Parameters", "documentation":"

The parameters in the Lambda input event.

" @@ -336,6 +411,24 @@ }, "documentation":"

This property contains the document to chat with, along with its attributes.

" }, + "ByteContentFile":{ + "type":"structure", + "required":[ + "data", + "mediaType" + ], + "members":{ + "data":{ + "shape":"ByteContentBlob", + "documentation":"

The byte value of the file to attach, encoded as Base-64 string. The maximum size of all files that is attached is 10MB. You can attach a maximum of 5 files.

" + }, + "mediaType":{ + "shape":"MimeType", + "documentation":"

The MIME type of data contained in the file used for chat.

" + } + }, + "documentation":"

The property contains the file to chat with, along with its attributes.

" + }, "Citation":{ "type":"structure", "members":{ @@ -354,6 +447,42 @@ "type":"list", "member":{"shape":"Citation"} }, + "CodeInterpreterInvocationInput":{ + "type":"structure", + "members":{ + "code":{ + "shape":"String", + "documentation":"

The code for the code interpreter to use.

" + }, + "files":{ + "shape":"Files", + "documentation":"

Files that are uploaded for code interpreter to use.

" + } + }, + "documentation":"

Contains information about the code interpreter being invoked.

" + }, + "CodeInterpreterInvocationOutput":{ + "type":"structure", + "members":{ + "executionError":{ + "shape":"String", + "documentation":"

Contains the error returned from code execution.

" + }, + "executionOutput":{ + "shape":"String", + "documentation":"

Contains the successful output returned from code execution

" + }, + "executionTimeout":{ + "shape":"Boolean", + "documentation":"

Indicates if the execution of the code timed out.

" + }, + "files":{ + "shape":"Files", + "documentation":"

Contains output files, if generated by code execution.

" + } + }, + "documentation":"

Contains the JSON-formatted string returned by the API invoked by the code interpreter.

" + }, "ConflictException":{ "type":"structure", "members":{ @@ -392,6 +521,43 @@ "OVERRIDDEN" ] }, + "DateTimestamp":{ + "type":"timestamp", + "documentation":"

Time Stamp.

", + "timestampFormat":"iso8601" + }, + "DeleteAgentMemoryRequest":{ + "type":"structure", + "required":[ + "agentAliasId", + "agentId" + ], + "members":{ + "agentAliasId":{ + "shape":"AgentAliasId", + "documentation":"

The unique identifier of an alias of an agent.

", + "location":"uri", + "locationName":"agentAliasId" + }, + "agentId":{ + "shape":"AgentId", + "documentation":"

The unique identifier of the agent to which the alias belongs.

", + "location":"uri", + "locationName":"agentId" + }, + "memoryId":{ + "shape":"MemoryId", + "documentation":"

The unique identifier of the memory.

", + "location":"querystring", + "locationName":"memoryId" + } + } + }, + "DeleteAgentMemoryResponse":{ + "type":"structure", + "members":{ + } + }, "DependencyFailedException":{ "type":"structure", "members":{ @@ -408,10 +574,23 @@ }, "exception":true }, + "Document":{ + "type":"structure", + "members":{ + }, + "document":true + }, "Double":{ "type":"double", "box":true }, + "ExecutionType":{ + "type":"string", + "enum":[ + "LAMBDA", + "RETURN_CONTROL" + ] + }, "ExternalSource":{ "type":"structure", "required":["sourceType"], @@ -507,6 +686,60 @@ "documentation":"

Contains information about the failure of the interaction.

", "sensitive":true }, + "FileBody":{ + "type":"blob", + "max":1000000, + "min":0, + "sensitive":true + }, + "FilePart":{ + "type":"structure", + "members":{ + "files":{ + "shape":"OutputFiles", + "documentation":"

Files containing intermediate response for the user.

" + } + }, + "documentation":"

Contains intermediate response for code interpreter if any files have been generated.

", + "event":true + }, + "FileSource":{ + "type":"structure", + "required":["sourceType"], + "members":{ + "byteContent":{ + "shape":"ByteContentFile", + "documentation":"

The data and the text of the attached files.

" + }, + "s3Location":{ + "shape":"S3ObjectFile", + "documentation":"

The s3 location of the files to attach.

" + }, + "sourceType":{ + "shape":"FileSourceType", + "documentation":"

The source type of the files to attach.

" + } + }, + "documentation":"

The source file of the content contained in the wrapper object.

" + }, + "FileSourceType":{ + "type":"string", + "enum":[ + "S3", + "BYTE_CONTENT" + ] + }, + "FileUseCase":{ + "type":"string", + "enum":[ + "CODE_INTERPRETER", + "CHAT" + ] + }, + "Files":{ + "type":"list", + "member":{"shape":"String"} + }, "FilterAttribute":{ "type":"structure", "required":[ @@ -550,6 +783,163 @@ "type":"string", "sensitive":true }, + "FlowAliasIdentifier":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/[0-9a-zA-Z]{10})|(\\bTSTALIASID\\b|[0-9a-zA-Z]+)$" + }, + "FlowCompletionEvent":{ + "type":"structure", + "required":["completionReason"], + "members":{ + "completionReason":{ + "shape":"FlowCompletionReason", + "documentation":"

The reason that the flow completed.

" + } + }, + "documentation":"

Contains information about why a flow completed.

This data type is used in the following API operations:

", + "event":true, + "sensitive":true + }, + "FlowCompletionReason":{ + "type":"string", + "enum":["SUCCESS"] + }, + "FlowIdentifier":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$" + }, + "FlowInput":{ + "type":"structure", + "required":[ + "content", + "nodeName", + "nodeOutputName" + ], + "members":{ + "content":{ + "shape":"FlowInputContent", + "documentation":"

Contains information about an input into the prompt flow.

" + }, + "nodeName":{ + "shape":"NodeName", + "documentation":"

The name of the flow input node that begins the prompt flow.

" + }, + "nodeOutputName":{ + "shape":"NodeOutputName", + "documentation":"

The name of the output from the flow input node that begins the prompt flow.

" + } + }, + "documentation":"

Contains information about an input into the prompt flow and where to send it.

This data type is used in the following API operations:

" + }, + "FlowInputContent":{ + "type":"structure", + "members":{ + "document":{ + "shape":"Document", + "documentation":"

The input to send to the prompt flow input node.

" + } + }, + "documentation":"

Contains information about an input into the flow.

This data type is used in the following API operations:

", + "sensitive":true, + "union":true + }, + "FlowInputs":{ + "type":"list", + "member":{"shape":"FlowInput"}, + "max":1, + "min":1 + }, + "FlowOutputContent":{ + "type":"structure", + "members":{ + "document":{ + "shape":"Document", + "documentation":"

The content in the output.

" + } + }, + "documentation":"

Contains information about the content in an output from prompt flow invocation.

This data type is used in the following API operations:

", + "union":true + }, + "FlowOutputEvent":{ + "type":"structure", + "required":[ + "content", + "nodeName", + "nodeType" + ], + "members":{ + "content":{ + "shape":"FlowOutputContent", + "documentation":"

The content in the output.

" + }, + "nodeName":{ + "shape":"NodeName", + "documentation":"

The name of the flow output node that the output is from.

" + }, + "nodeType":{ + "shape":"NodeType", + "documentation":"

The type of the node that the output is from.

" + } + }, + "documentation":"

Contains information about an output from prompt flow invoction.

This data type is used in the following API operations:

", + "event":true, + "sensitive":true + }, + "FlowResponseStream":{ + "type":"structure", + "members":{ + "accessDeniedException":{ + "shape":"AccessDeniedException", + "documentation":"

The request is denied because of missing access permissions. Check your permissions and retry your request.

" + }, + "badGatewayException":{ + "shape":"BadGatewayException", + "documentation":"

There was an issue with a dependency due to a server issue. Retry your request.

" + }, + "conflictException":{ + "shape":"ConflictException", + "documentation":"

There was a conflict performing an operation. Resolve the conflict and retry your request.

" + }, + "dependencyFailedException":{ + "shape":"DependencyFailedException", + "documentation":"

There was an issue with a dependency. Check the resource configurations and retry the request.

" + }, + "flowCompletionEvent":{ + "shape":"FlowCompletionEvent", + "documentation":"

Contains information about why the flow completed.

" + }, + "flowOutputEvent":{ + "shape":"FlowOutputEvent", + "documentation":"

Contains information about an output from flow invocation.

" + }, + "internalServerException":{ + "shape":"InternalServerException", + "documentation":"

An internal server error occurred. Retry your request.

" + }, + "resourceNotFoundException":{ + "shape":"ResourceNotFoundException", + "documentation":"

The specified resource Amazon Resource Name (ARN) was not found. Check the Amazon Resource Name (ARN) and try your request again.

" + }, + "serviceQuotaExceededException":{ + "shape":"ServiceQuotaExceededException", + "documentation":"

The number of requests exceeds the service quota. Resubmit your request later.

" + }, + "throttlingException":{ + "shape":"ThrottlingException", + "documentation":"

The number of requests exceeds the limit. Resubmit your request later.

" + }, + "validationException":{ + "shape":"ValidationException", + "documentation":"

Input validation failed. Check your request parameters and retry the request.

" + } + }, + "documentation":"

The output of the flow.

This data type is used in the following API operations:

", + "eventstream":true + }, "Function":{ "type":"string", "sensitive":true @@ -648,44 +1038,440 @@ "documentation":"

Contains the template for the prompt that's sent to the model for response generation.

" } }, - "documentation":"

Contains configurations for response generation based on the knowledge base query results.

This data type is used in the following API operations:

" + "documentation":"

Contains configurations for response generation based on the knowledge base query results.

This data type is used in the following API operations:

" + }, + "GetAgentMemoryRequest":{ + "type":"structure", + "required":[ + "agentAliasId", + "agentId", + "memoryId", + "memoryType" + ], + "members":{ + "agentAliasId":{ + "shape":"AgentAliasId", + "documentation":"

The unique identifier of an alias of an agent.

", + "location":"uri", + "locationName":"agentAliasId" + }, + "agentId":{ + "shape":"AgentId", + "documentation":"

The unique identifier of the agent to which the alias belongs.

", + "location":"uri", + "locationName":"agentId" + }, + "maxItems":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", + "location":"querystring", + "locationName":"maxItems" + }, + "memoryId":{ + "shape":"MemoryId", + "documentation":"

The unique identifier of the memory.

", + "location":"querystring", + "locationName":"memoryId" + }, + "memoryType":{ + "shape":"MemoryType", + "documentation":"

The type of memory.

", + "location":"querystring", + "locationName":"memoryType" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results is greater than the maxItems value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetAgentMemoryResponse":{ + "type":"structure", + "members":{ + "memoryContents":{ + "shape":"Memories", + "documentation":"

Contains details of the sessions stored in the memory

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results is greater than the maxItems value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "GuadrailAction":{ + "type":"string", + "enum":[ + "INTERVENED", + "NONE" + ] + }, + "GuardrailAction":{ + "type":"string", + "enum":[ + "INTERVENED", + "NONE" + ] + }, + "GuardrailAssessment":{ + "type":"structure", + "members":{ + "contentPolicy":{ + "shape":"GuardrailContentPolicyAssessment", + "documentation":"

Content policy details of the Guardrail.

" + }, + "sensitiveInformationPolicy":{ + "shape":"GuardrailSensitiveInformationPolicyAssessment", + "documentation":"

Sensitive Information policy details of Guardrail.

" + }, + "topicPolicy":{ + "shape":"GuardrailTopicPolicyAssessment", + "documentation":"

Topic policy details of the Guardrail.

" + }, + "wordPolicy":{ + "shape":"GuardrailWordPolicyAssessment", + "documentation":"

Word policy details of the Guardrail.

" + } + }, + "documentation":"

Assessment details of the content analyzed by Guardrails.

", + "sensitive":true + }, + "GuardrailAssessmentList":{ + "type":"list", + "member":{"shape":"GuardrailAssessment"} + }, + "GuardrailConfiguration":{ + "type":"structure", + "required":[ + "guardrailId", + "guardrailVersion" + ], + "members":{ + "guardrailId":{ + "shape":"GuardrailConfigurationGuardrailIdString", + "documentation":"

The unique identifier for the guardrail.

" + }, + "guardrailVersion":{ + "shape":"GuardrailConfigurationGuardrailVersionString", + "documentation":"

The version of the guardrail.

" + } + }, + "documentation":"

The configuration details for the guardrail.

" + }, + "GuardrailConfigurationGuardrailIdString":{ + "type":"string", + "max":64, + "min":0, + "pattern":"^[a-z0-9]+$" + }, + "GuardrailConfigurationGuardrailVersionString":{ + "type":"string", + "max":5, + "min":1, + "pattern":"^(([1-9][0-9]{0,7})|(DRAFT))$" + }, + "GuardrailContentFilter":{ + "type":"structure", + "members":{ + "action":{ + "shape":"GuardrailContentPolicyAction", + "documentation":"

The action placed on the content by the Guardrail filter.

" + }, + "confidence":{ + "shape":"GuardrailContentFilterConfidence", + "documentation":"

The confidence level regarding the content detected in the filter by the Guardrail.

" + }, + "type":{ + "shape":"GuardrailContentFilterType", + "documentation":"

The type of content detected in the filter by the Guardrail.

" + } + }, + "documentation":"

Details of the content filter used in the Guardrail.

", + "sensitive":true + }, + "GuardrailContentFilterConfidence":{ + "type":"string", + "enum":[ + "NONE", + "LOW", + "MEDIUM", + "HIGH" + ] + }, + "GuardrailContentFilterList":{ + "type":"list", + "member":{"shape":"GuardrailContentFilter"}, + "sensitive":true + }, + "GuardrailContentFilterType":{ + "type":"string", + "enum":[ + "INSULTS", + "HATE", + "SEXUAL", + "VIOLENCE", + "MISCONDUCT", + "PROMPT_ATTACK" + ] + }, + "GuardrailContentPolicyAction":{ + "type":"string", + "enum":["BLOCKED"] + }, + "GuardrailContentPolicyAssessment":{ + "type":"structure", + "members":{ + "filters":{ + "shape":"GuardrailContentFilterList", + "documentation":"

The filter details of the policy assessment used in the Guardrails filter.

" + } + }, + "documentation":"

The details of the policy assessment in the Guardrails filter.

", + "sensitive":true + }, + "GuardrailCustomWord":{ + "type":"structure", + "members":{ + "action":{ + "shape":"GuardrailWordPolicyAction", + "documentation":"

The action details for the custom word filter in the Guardrail.

" + }, + "match":{ + "shape":"String", + "documentation":"

The match details for the custom word filter in the Guardrail.

" + } + }, + "documentation":"

The custom word details for the filter in the Guardrail.

", + "sensitive":true + }, + "GuardrailCustomWordList":{ + "type":"list", + "member":{"shape":"GuardrailCustomWord"}, + "sensitive":true + }, + "GuardrailManagedWord":{ + "type":"structure", + "members":{ + "action":{ + "shape":"GuardrailWordPolicyAction", + "documentation":"

The action details for the managed word filter in the Guardrail.

" + }, + "match":{ + "shape":"String", + "documentation":"

The match details for the managed word filter in the Guardrail.

" + }, + "type":{ + "shape":"GuardrailManagedWordType", + "documentation":"

The type details for the managed word filter in the Guardrail.

" + } + }, + "documentation":"

The managed word details for the filter in the Guardrail.

", + "sensitive":true + }, + "GuardrailManagedWordList":{ + "type":"list", + "member":{"shape":"GuardrailManagedWord"}, + "sensitive":true + }, + "GuardrailManagedWordType":{ + "type":"string", + "enum":["PROFANITY"] + }, + "GuardrailPiiEntityFilter":{ + "type":"structure", + "members":{ + "action":{ + "shape":"GuardrailSensitiveInformationPolicyAction", + "documentation":"

The action of the Guardrail filter to identify and remove PII.

" + }, + "match":{ + "shape":"String", + "documentation":"

The match to settings in the Guardrail filter to identify and remove PII.

" + }, + "type":{ + "shape":"GuardrailPiiEntityType", + "documentation":"

The type of PII the Guardrail filter has identified and removed.

" + } + }, + "documentation":"

The Guardrail filter to identify and remove personally identifiable information (PII).

", + "sensitive":true + }, + "GuardrailPiiEntityFilterList":{ + "type":"list", + "member":{"shape":"GuardrailPiiEntityFilter"}, + "sensitive":true + }, + "GuardrailPiiEntityType":{ + "type":"string", + "enum":[ + "ADDRESS", + "AGE", + "AWS_ACCESS_KEY", + "AWS_SECRET_KEY", + "CA_HEALTH_NUMBER", + "CA_SOCIAL_INSURANCE_NUMBER", + "CREDIT_DEBIT_CARD_CVV", + "CREDIT_DEBIT_CARD_EXPIRY", + "CREDIT_DEBIT_CARD_NUMBER", + "DRIVER_ID", + "EMAIL", + "INTERNATIONAL_BANK_ACCOUNT_NUMBER", + "IP_ADDRESS", + "LICENSE_PLATE", + "MAC_ADDRESS", + "NAME", + "PASSWORD", + "PHONE", + "PIN", + "SWIFT_CODE", + "UK_NATIONAL_HEALTH_SERVICE_NUMBER", + "UK_NATIONAL_INSURANCE_NUMBER", + "UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER", + "URL", + "USERNAME", + "US_BANK_ACCOUNT_NUMBER", + "US_BANK_ROUTING_NUMBER", + "US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER", + "US_PASSPORT_NUMBER", + "US_SOCIAL_SECURITY_NUMBER", + "VEHICLE_IDENTIFICATION_NUMBER" + ] + }, + "GuardrailRegexFilter":{ + "type":"structure", + "members":{ + "action":{ + "shape":"GuardrailSensitiveInformationPolicyAction", + "documentation":"

The action details for the regex filter used in the Guardrail.

" + }, + "match":{ + "shape":"String", + "documentation":"

The match details for the regex filter used in the Guardrail.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name details for the regex filter used in the Guardrail.

" + }, + "regex":{ + "shape":"String", + "documentation":"

The regex details for the regex filter used in the Guardrail.

" + } + }, + "documentation":"

The details for the regex filter used in the Guardrail.

", + "sensitive":true + }, + "GuardrailRegexFilterList":{ + "type":"list", + "member":{"shape":"GuardrailRegexFilter"}, + "sensitive":true + }, + "GuardrailSensitiveInformationPolicyAction":{ + "type":"string", + "enum":[ + "BLOCKED", + "ANONYMIZED" + ] + }, + "GuardrailSensitiveInformationPolicyAssessment":{ + "type":"structure", + "members":{ + "piiEntities":{ + "shape":"GuardrailPiiEntityFilterList", + "documentation":"

The details of the PII entities used in the sensitive policy assessment for the Guardrail.

" + }, + "regexes":{ + "shape":"GuardrailRegexFilterList", + "documentation":"

The details of the regexes used in the sensitive policy assessment for the Guardrail.

" + } + }, + "documentation":"

The details of the sensitive policy assessment used in the Guardrail.

", + "sensitive":true + }, + "GuardrailTopic":{ + "type":"structure", + "members":{ + "action":{ + "shape":"GuardrailTopicPolicyAction", + "documentation":"

The action details on a specific topic in the Guardrail.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name details on a specific topic in the Guardrail.

" + }, + "type":{ + "shape":"GuardrailTopicType", + "documentation":"

The type details on a specific topic in the Guardrail.

" + } + }, + "documentation":"

The details for a specific topic defined in the Guardrail.

", + "sensitive":true + }, + "GuardrailTopicList":{ + "type":"list", + "member":{"shape":"GuardrailTopic"}, + "sensitive":true + }, + "GuardrailTopicPolicyAction":{ + "type":"string", + "enum":["BLOCKED"] + }, + "GuardrailTopicPolicyAssessment":{ + "type":"structure", + "members":{ + "topics":{ + "shape":"GuardrailTopicList", + "documentation":"

The topic details of the policy assessment used in the Guardrail.

" + } + }, + "documentation":"

The details of the policy assessment used in the Guardrail.

", + "sensitive":true }, - "GuadrailAction":{ + "GuardrailTopicType":{ "type":"string", - "enum":[ - "INTERVENED", - "NONE" - ] + "enum":["DENY"] }, - "GuardrailConfiguration":{ + "GuardrailTrace":{ "type":"structure", - "required":[ - "guardrailId", - "guardrailVersion" - ], "members":{ - "guardrailId":{ - "shape":"GuardrailConfigurationGuardrailIdString", - "documentation":"

The unique identifier for the guardrail.

" + "action":{ + "shape":"GuardrailAction", + "documentation":"

The trace action details used with the Guardrail.

" }, - "guardrailVersion":{ - "shape":"GuardrailConfigurationGuardrailVersionString", - "documentation":"

The version of the guardrail.

" + "inputAssessments":{ + "shape":"GuardrailAssessmentList", + "documentation":"

The details of the input assessments used in the Guardrail Trace.

" + }, + "outputAssessments":{ + "shape":"GuardrailAssessmentList", + "documentation":"

The details of the output assessments used in the Guardrail Trace.

" + }, + "traceId":{ + "shape":"TraceId", + "documentation":"

The details of the trace Id used in the Guardrail Trace.

" } }, - "documentation":"

The configuration details for the guardrail.

" + "documentation":"

The trace details used in the Guardrail.

", + "sensitive":true }, - "GuardrailConfigurationGuardrailIdString":{ + "GuardrailWordPolicyAction":{ "type":"string", - "max":64, - "min":0, - "pattern":"^[a-z0-9]+$" + "enum":["BLOCKED"] }, - "GuardrailConfigurationGuardrailVersionString":{ - "type":"string", - "max":5, - "min":1, - "pattern":"^(([1-9][0-9]{0,7})|(DRAFT))$" + "GuardrailWordPolicyAssessment":{ + "type":"structure", + "members":{ + "customWords":{ + "shape":"GuardrailCustomWordList", + "documentation":"

The custom word details for words defined in the Guardrail filter.

" + }, + "managedWordLists":{ + "shape":"GuardrailManagedWordList", + "documentation":"

The managed word lists for words defined in the Guardrail filter.

" + } + }, + "documentation":"

The assessment details for words defined in the Guardrail filter.

", + "sensitive":true }, "Identifier":{ "type":"string", @@ -724,11 +1510,38 @@ }, "topP":{ "shape":"TopP", - "documentation":"

While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for Top P determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topP to 80, the model only selects the next token from the top 80% of the probability distribution of next tokens.

" + "documentation":"

While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for Top P determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topP to 0.8, the model only selects the next token from the top 80% of the probability distribution of next tokens.

" } }, "documentation":"

Specifications about the inference parameters that were provided alongside the prompt. These are specified in the PromptOverrideConfiguration object that was set when the agent was created or updated. For more information, see Inference parameters for foundation models.

" }, + "InputFile":{ + "type":"structure", + "required":[ + "name", + "source", + "useCase" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of the source file.

" + }, + "source":{ + "shape":"FileSource", + "documentation":"

Specifies where the files are located.

" + }, + "useCase":{ + "shape":"FileUseCase", + "documentation":"

Specifies how the source files will be used by the code interpreter.

" + } + }, + "documentation":"

Contains details of the source files.

" + }, + "InputFiles":{ + "type":"list", + "member":{"shape":"InputFile"} + }, "InputText":{ "type":"string", "max":25000000, @@ -756,6 +1569,10 @@ "shape":"ActionGroupInvocationInput", "documentation":"

Contains information about the action group to be invoked.

" }, + "codeInterpreterInvocationInput":{ + "shape":"CodeInterpreterInvocationInput", + "documentation":"

Contains information about the code interpreter to be invoked.

" + }, "invocationType":{ "shape":"InvocationType", "documentation":"

Specifies whether the agent is invoking an action group or a knowledge base.

" @@ -813,7 +1630,8 @@ "enum":[ "ACTION_GROUP", "KNOWLEDGE_BASE", - "FINISH" + "FINISH", + "ACTION_GROUP_CODE_INTERPRETER" ] }, "InvokeAgentRequest":{ @@ -848,6 +1666,10 @@ "shape":"InputText", "documentation":"

The prompt text to send the agent.

If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored.

" }, + "memoryId":{ + "shape":"MemoryId", + "documentation":"

The unique identifier of the agent memory.

" + }, "sessionId":{ "shape":"SessionId", "documentation":"

The unique identifier of the session. Use the same value across requests to continue the same conversation.

", @@ -878,6 +1700,12 @@ "location":"header", "locationName":"x-amzn-bedrock-agent-content-type" }, + "memoryId":{ + "shape":"MemoryId", + "documentation":"

The unique identifier of the agent memory.

", + "location":"header", + "locationName":"x-amz-bedrock-agent-memory-id" + }, "sessionId":{ "shape":"SessionId", "documentation":"

The unique identifier of the session with the agent.

", @@ -887,12 +1715,72 @@ }, "payload":"completion" }, + "InvokeFlowRequest":{ + "type":"structure", + "required":[ + "flowAliasIdentifier", + "flowIdentifier", + "inputs" + ], + "members":{ + "flowAliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

The unique identifier of the flow alias.

", + "location":"uri", + "locationName":"flowAliasIdentifier" + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow.

", + "location":"uri", + "locationName":"flowIdentifier" + }, + "inputs":{ + "shape":"FlowInputs", + "documentation":"

A list of objects, each containing information about an input into the flow.

" + } + } + }, + "InvokeFlowResponse":{ + "type":"structure", + "required":["responseStream"], + "members":{ + "responseStream":{ + "shape":"FlowResponseStream", + "documentation":"

The output of the flow, returned as a stream. If there's an error, the error is returned.

" + } + }, + "payload":"responseStream" + }, "KmsKeyArn":{ "type":"string", "max":2048, "min":1, "pattern":"^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$" }, + "KnowledgeBaseConfiguration":{ + "type":"structure", + "required":[ + "knowledgeBaseId", + "retrievalConfiguration" + ], + "members":{ + "knowledgeBaseId":{ + "shape":"KnowledgeBaseId", + "documentation":"

The unique identifier for a knowledge base attached to the agent.

" + }, + "retrievalConfiguration":{ + "shape":"KnowledgeBaseRetrievalConfiguration", + "documentation":"

The configurations to apply to the knowledge base during query. For more information, see Query configurations.

" + } + }, + "documentation":"

Configurations to apply to a knowledge base attached to the agent during query. For more information, see Knowledge base retrieval configurations.

" + }, + "KnowledgeBaseConfigurations":{ + "type":"list", + "member":{"shape":"KnowledgeBaseConfiguration"}, + "min":1 + }, "KnowledgeBaseId":{ "type":"string", "max":10, @@ -953,7 +1841,7 @@ "documentation":"

Contains details about how the results from the vector search should be returned. For more information, see Query configurations.

" } }, - "documentation":"

Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations.

This data type is used in the following API operations:

" + "documentation":"

Contains configurations for knowledge base query. For more information, see Query configurations.

This data type is used in the following API operations:

" }, "KnowledgeBaseRetrievalResult":{ "type":"structure", @@ -992,7 +1880,7 @@ "members":{ "generationConfiguration":{ "shape":"GenerationConfiguration", - "documentation":"

Contains configurations for response generation based on the knowwledge base query results.

" + "documentation":"

Contains configurations for response generation based on the knowledge base query results.

" }, "knowledgeBaseId":{ "shape":"KnowledgeBaseId", @@ -1002,6 +1890,10 @@ "shape":"BedrockModelArn", "documentation":"

The ARN of the foundation model used to generate a response.

" }, + "orchestrationConfiguration":{ + "shape":"OrchestrationConfiguration", + "documentation":"

Settings for how the model processes the prompt prior to retrieval and generation.

" + }, "retrievalConfiguration":{ "shape":"KnowledgeBaseRetrievalConfiguration", "documentation":"

Contains configurations for how to retrieve and return the knowledge base query.

" @@ -1035,6 +1927,13 @@ "min":1 }, "LambdaArn":{"type":"string"}, + "MaxResults":{ + "type":"integer", + "documentation":"

Max Results.

", + "box":true, + "max":1000, + "min":1 + }, "MaxTokens":{ "type":"integer", "box":true, @@ -1047,6 +1946,68 @@ "max":4096, "min":0 }, + "Memories":{ + "type":"list", + "member":{"shape":"Memory"} + }, + "Memory":{ + "type":"structure", + "members":{ + "sessionSummary":{ + "shape":"MemorySessionSummary", + "documentation":"

Contains summary of a session.

" + } + }, + "documentation":"

Contains sessions summaries.

", + "union":true + }, + "MemoryId":{ + "type":"string", + "max":100, + "min":2, + "pattern":"^[0-9a-zA-Z._:-]+$" + }, + "MemorySessionSummary":{ + "type":"structure", + "members":{ + "memoryId":{ + "shape":"MemoryId", + "documentation":"

The unique identifier of the memory where the session summary is stored.

" + }, + "sessionExpiryTime":{ + "shape":"DateTimestamp", + "documentation":"

The time when the memory duration for the session is set to end.

" + }, + "sessionId":{ + "shape":"SessionId", + "documentation":"

The identifier for this session.

" + }, + "sessionStartTime":{ + "shape":"DateTimestamp", + "documentation":"

The start time for this session.

" + }, + "summaryText":{ + "shape":"SummaryText", + "documentation":"

The summarized text for this session.

" + } + }, + "documentation":"

Contains details of a session summary.

" + }, + "MemoryType":{ + "type":"string", + "enum":["SESSION_SUMMARY"] + }, + "Metadata":{ + "type":"structure", + "members":{ + "usage":{ + "shape":"Usage", + "documentation":"

Contains details of the foundation model usage.

" + } + }, + "documentation":"

Provides details of the foundation model.

", + "sensitive":true + }, "MimeType":{"type":"string"}, "ModelInvocationInput":{ "type":"structure", @@ -1089,6 +2050,26 @@ "min":1, "pattern":"^\\S*$" }, + "NodeName":{ + "type":"string", + "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$" + }, + "NodeOutputName":{ + "type":"string", + "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$" + }, + "NodeType":{ + "type":"string", + "enum":[ + "FlowInputNode", + "FlowOutputNode", + "LambdaFunctionNode", + "KnowledgeBaseNode", + "PromptNode", + "ConditionNode", + "LexNode" + ] + }, "NonBlankString":{ "type":"string", "pattern":"^[\\s\\S]*$" @@ -1100,6 +2081,10 @@ "shape":"ActionGroupInvocationOutput", "documentation":"

Contains the JSON-formatted string returned by the API invoked by the action group.

" }, + "codeInterpreterInvocationOutput":{ + "shape":"CodeInterpreterInvocationOutput", + "documentation":"

Contains the JSON-formatted string returned by the API invoked by the code interpreter.

" + }, "finalResponse":{ "shape":"FinalResponse", "documentation":"

Contains details about the response to the user.

" @@ -1124,6 +2109,36 @@ "documentation":"

Contains the result or output of an action group or knowledge base, or the response to the user.

", "sensitive":true }, + "OrchestrationConfiguration":{ + "type":"structure", + "required":["queryTransformationConfiguration"], + "members":{ + "queryTransformationConfiguration":{ + "shape":"QueryTransformationConfiguration", + "documentation":"

To split up the prompt and retrieve multiple sources, set the transformation type to QUERY_DECOMPOSITION.

" + } + }, + "documentation":"

Settings for how the model processes the prompt prior to retrieval and generation.

" + }, + "OrchestrationModelInvocationOutput":{ + "type":"structure", + "members":{ + "metadata":{ + "shape":"Metadata", + "documentation":"

Contains information about the foundation model output.

" + }, + "rawResponse":{ + "shape":"RawResponse", + "documentation":"

Contains details of the raw response from the foundation model output.

" + }, + "traceId":{ + "shape":"TraceId", + "documentation":"

The unique identifier of the trace.

" + } + }, + "documentation":"

The foundation model output from the orchestration step.

", + "sensitive":true + }, "OrchestrationTrace":{ "type":"structure", "members":{ @@ -1135,6 +2150,10 @@ "shape":"ModelInvocationInput", "documentation":"

The input for the orchestration step.

  • The type is ORCHESTRATION.

  • The text contains the prompt.

  • The inferenceConfiguration, parserMode, and overrideLambda values are set in the PromptOverrideConfiguration object that was set when the agent was created or updated.

" }, + "modelInvocationOutput":{ + "shape":"OrchestrationModelInvocationOutput", + "documentation":"

Contains information pertaining to the output from the foundation model that is being invoked.

" + }, "observation":{ "shape":"Observation", "documentation":"

Details about the observation (the output of the action group Lambda or knowledge base) made by the agent.

" @@ -1148,6 +2167,31 @@ "sensitive":true, "union":true }, + "OutputFile":{ + "type":"structure", + "members":{ + "bytes":{ + "shape":"FileBody", + "documentation":"

The byte count of files that contains response from code interpreter.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the file containing response from code interpreter.

" + }, + "type":{ + "shape":"MimeType", + "documentation":"

The type of file that contains response from the code interpreter.

" + } + }, + "documentation":"

Contains details of the response from code interpreter.

", + "sensitive":true + }, + "OutputFiles":{ + "type":"list", + "member":{"shape":"OutputFile"}, + "max":5, + "min":0 + }, "OutputString":{ "type":"string", "sensitive":true @@ -1326,6 +2370,21 @@ }, "documentation":"

Contains the parameters in the request body.

" }, + "QueryTransformationConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"QueryTransformationType", + "documentation":"

The type of transformation to apply to the prompt.

" + } + }, + "documentation":"

To split up the prompt and retrieve multiple sources, set the transformation type to QUERY_DECOMPOSITION.

" + }, + "QueryTransformationType":{ + "type":"string", + "enum":["QUERY_DECOMPOSITION"] + }, "RAGStopSequences":{ "type":"list", "member":{"shape":"RAGStopSequencesMemberString"}, @@ -1356,6 +2415,17 @@ "type":"string", "sensitive":true }, + "RawResponse":{ + "type":"structure", + "members":{ + "content":{ + "shape":"String", + "documentation":"

The foundation model's raw output content.

" + } + }, + "documentation":"

Contains the raw output from the foundation model.

", + "sensitive":true + }, "RepromptResponse":{ "type":"structure", "members":{ @@ -1428,6 +2498,10 @@ "shape":"DependencyFailedException", "documentation":"

There was an issue with a dependency. Check the resource configurations and retry the request.

" }, + "files":{ + "shape":"FilePart", + "documentation":"

Contains intermediate response for code interpreter if any files have been generated.

" + }, "internalServerException":{ "shape":"InternalServerException", "documentation":"

An internal server error occurred. Retry your request.

" @@ -1465,50 +2539,58 @@ "members":{ "andAll":{ "shape":"RetrievalFilterList", - "documentation":"

Knowledge base data sources whose metadata attributes fulfill all the filter conditions inside this list are returned.

" + "documentation":"

Knowledge base data sources are returned if their metadata attributes fulfill all the filter conditions inside this list.

" }, "equals":{ "shape":"FilterAttribute", - "documentation":"

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value matches the value in this object are returned.

" + "documentation":"

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value matches the value in this object.

The following example would return data sources with an animal attribute whose value is cat:

\"equals\": { \"key\": \"animal\", \"value\": \"cat\" }

" }, "greaterThan":{ "shape":"FilterAttribute", - "documentation":"

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is greater than the value in this object are returned.

" + "documentation":"

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is greater than the value in this object.

The following example would return data sources with an year attribute whose value is greater than 1989:

\"greaterThan\": { \"key\": \"year\", \"value\": 1989 }

" }, "greaterThanOrEquals":{ "shape":"FilterAttribute", - "documentation":"

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is greater than or equal to the value in this object are returned.

" + "documentation":"

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is greater than or equal to the value in this object.

The following example would return data sources with an year attribute whose value is greater than or equal to 1989:

\"greaterThanOrEquals\": { \"key\": \"year\", \"value\": 1989 }

" }, "in":{ "shape":"FilterAttribute", - "documentation":"

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is in the list specified in the value in this object are returned.

" + "documentation":"

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is in the list specified in the value in this object.

The following example would return data sources with an animal attribute that is either cat or dog:

\"in\": { \"key\": \"animal\", \"value\": [\"cat\", \"dog\"] }

" }, "lessThan":{ "shape":"FilterAttribute", - "documentation":"

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is less than the value in this object are returned.

" + "documentation":"

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is less than the value in this object.

The following example would return data sources with an year attribute whose value is less than to 1989.

\"lessThan\": { \"key\": \"year\", \"value\": 1989 }

" }, "lessThanOrEquals":{ "shape":"FilterAttribute", - "documentation":"

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is less than or equal to the value in this object are returned.

" + "documentation":"

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is less than or equal to the value in this object.

The following example would return data sources with an year attribute whose value is less than or equal to 1989.

\"lessThanOrEquals\": { \"key\": \"year\", \"value\": 1989 }

" + }, + "listContains":{ + "shape":"FilterAttribute", + "documentation":"

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is a list that contains the value as one of its members.

The following example would return data sources with an animals attribute that is a list containing a cat member (for example [\"dog\", \"cat\"]).

\"listContains\": { \"key\": \"animals\", \"value\": \"cat\" }

" }, "notEquals":{ "shape":"FilterAttribute", - "documentation":"

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value doesn't match the value in this object are returned.

" + "documentation":"

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value doesn't match the value in this object are returned.

The following example would return data sources that don't contain an animal attribute whose value is cat.

\"notEquals\": { \"key\": \"animal\", \"value\": \"cat\" }

" }, "notIn":{ "shape":"FilterAttribute", - "documentation":"

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value isn't in the list specified in the value in this object are returned.

" + "documentation":"

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value isn't in the list specified in the value in this object.

The following example would return data sources whose animal attribute is neither cat nor dog.

\"notIn\": { \"key\": \"animal\", \"value\": [\"cat\", \"dog\"] }

" }, "orAll":{ "shape":"RetrievalFilterList", - "documentation":"

Knowledge base data sources whose metadata attributes fulfill at least one of the filter conditions inside this list are returned.

" + "documentation":"

Knowledge base data sources are returned if their metadata attributes fulfill at least one of the filter conditions inside this list.

" }, "startsWith":{ "shape":"FilterAttribute", - "documentation":"

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value starts with the value in this object are returned. This filter is currently only supported for Amazon OpenSearch Serverless vector stores.

" + "documentation":"

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value starts with the value in this object. This filter is currently only supported for Amazon OpenSearch Serverless vector stores.

The following example would return data sources with an animal attribute starts with ca (for example, cat or camel).

\"startsWith\": { \"key\": \"animal\", \"value\": \"ca\" }

" + }, + "stringContains":{ + "shape":"FilterAttribute", + "documentation":"

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is one of the following:

  • A string that contains the value as a substring. The following example would return data sources with an animal attribute that contains the substring at (for example cat).

    \"stringContains\": { \"key\": \"animal\", \"value\": \"at\" }

  • A list with a member that contains the value as a substring. The following example would return data sources with an animals attribute that is a list containing a member that contains the substring at (for example [\"dog\", \"cat\"]).

    \"stringContains\": { \"key\": \"animals\", \"value\": \"at\" }

" } }, - "documentation":"

Specifies the filters to use on the metadata attributes in the knowledge base data sources before returning results. For more information, see Query configurations.

This data type is used in the following API operations:

", + "documentation":"

Specifies the filters to use on the metadata attributes in the knowledge base data sources before returning results. For more information, see Query configurations. See the examples below to see how to use these filters.

This data type is used in the following API operations:

", "sensitive":true, "union":true }, @@ -1518,6 +2600,16 @@ "max":5, "min":2 }, + "RetrievalResultConfluenceLocation":{ + "type":"structure", + "members":{ + "url":{ + "shape":"String", + "documentation":"

The Confluence host URL for the data source location.

" + } + }, + "documentation":"

The Confluence data source location.

" + }, "RetrievalResultContent":{ "type":"structure", "required":["text"], @@ -1534,21 +2626,43 @@ "type":"structure", "required":["type"], "members":{ + "confluenceLocation":{ + "shape":"RetrievalResultConfluenceLocation", + "documentation":"

The Confluence data source location.

" + }, "s3Location":{ "shape":"RetrievalResultS3Location", - "documentation":"

Contains the S3 location of the data source.

" + "documentation":"

The S3 data source location.

" + }, + "salesforceLocation":{ + "shape":"RetrievalResultSalesforceLocation", + "documentation":"

The Salesforce data source location.

" + }, + "sharePointLocation":{ + "shape":"RetrievalResultSharePointLocation", + "documentation":"

The SharePoint data source location.

" }, "type":{ "shape":"RetrievalResultLocationType", - "documentation":"

The type of the location of the data source.

" + "documentation":"

The type of data source location.

" + }, + "webLocation":{ + "shape":"RetrievalResultWebLocation", + "documentation":"

The web URL/URLs data source location.

" } }, - "documentation":"

Contains information about the location of the data source.

This data type is used in the following API operations:

", + "documentation":"

Contains information about the data source location.

This data type is used in the following API operations:

", "sensitive":true }, "RetrievalResultLocationType":{ "type":"string", - "enum":["S3"] + "enum":[ + "S3", + "WEB", + "CONFLUENCE", + "SALESFORCE", + "SHAREPOINT" + ] }, "RetrievalResultMetadata":{ "type":"map", @@ -1573,10 +2687,40 @@ "members":{ "uri":{ "shape":"String", - "documentation":"

The S3 URI of the data source.

" + "documentation":"

The S3 URI for the data source location.

" + } + }, + "documentation":"

The S3 data source location.

This data type is used in the following API operations:

" + }, + "RetrievalResultSalesforceLocation":{ + "type":"structure", + "members":{ + "url":{ + "shape":"String", + "documentation":"

The Salesforce host URL for the data source location.

" + } + }, + "documentation":"

The Salesforce data source location.

" + }, + "RetrievalResultSharePointLocation":{ + "type":"structure", + "members":{ + "url":{ + "shape":"String", + "documentation":"

The SharePoint site URL for the data source location.

" + } + }, + "documentation":"

The SharePoint data source location.

" + }, + "RetrievalResultWebLocation":{ + "type":"structure", + "members":{ + "url":{ + "shape":"String", + "documentation":"

The web URL/URLs for the data source location.

" } }, - "documentation":"

Contains the S3 location of the data source.

This data type is used in the following API operations:

" + "documentation":"

The web URL/URLs data source location.

" }, "RetrieveAndGenerateConfiguration":{ "type":"structure", @@ -1644,7 +2788,7 @@ }, "sessionId":{ "shape":"SessionId", - "documentation":"

The unique identifier of the session. Reuse the same value to continue the same session with the knowledge base.

" + "documentation":"

The unique identifier of the session. When you first make a RetrieveAndGenerate request, Amazon Bedrock automatically generates this value. You must reuse this value for all subsequent requests in the same conversational session. This value allows Amazon Bedrock to maintain context and knowledge from previous interactions. You can't explicitly set the sessionId yourself.

" } } }, @@ -1669,7 +2813,7 @@ }, "sessionId":{ "shape":"SessionId", - "documentation":"

The unique identifier of the session. Reuse the same value to continue the same session with the knowledge base.

" + "documentation":"

The unique identifier of the session. When you first make a RetrieveAndGenerate request, Amazon Bedrock automatically generates this value. You must reuse this value for all subsequent requests in the same conversational session. This value allows Amazon Bedrock to maintain context and knowledge from previous interactions. You can't explicitly set the sessionId yourself.

" } } }, @@ -1787,6 +2931,17 @@ }, "documentation":"

The unique wrapper object of the document from the S3 location.

" }, + "S3ObjectFile":{ + "type":"structure", + "required":["uri"], + "members":{ + "uri":{ + "shape":"S3Uri", + "documentation":"

The uri of the s3 object.

" + } + }, + "documentation":"

Contains details of the s3 object where the source file is located.

" + }, "S3Uri":{ "type":"string", "max":1024, @@ -1826,10 +2981,18 @@ "SessionState":{ "type":"structure", "members":{ + "files":{ + "shape":"InputFiles", + "documentation":"

Contains information about the files used by code interpreter.

" + }, "invocationId":{ "shape":"String", "documentation":"

The identifier of the invocation of an action. This value must match the invocationId returned in the InvokeAgent response for the action whose results are provided in the returnControlInvocationResults field. For more information, see Return control to the agent developer and Control session context.

" }, + "knowledgeBaseConfigurations":{ + "shape":"KnowledgeBaseConfigurations", + "documentation":"

An array of configurations, each of which applies to a knowledge base attached to the agent.

" + }, "promptSessionAttributes":{ "shape":"PromptSessionAttributesMap", "documentation":"

Contains attributes that persist across a prompt and the values of those attributes. These attributes replace the $prompt_session_attributes$ placeholder variable in the orchestration prompt template. For more information, see Prompt template placeholder variables.

" @@ -1885,6 +3048,11 @@ "min":0 }, "String":{"type":"string"}, + "SummaryText":{ + "type":"string", + "max":25000000, + "min":0 + }, "Temperature":{ "type":"float", "box":true, @@ -1965,6 +3133,10 @@ "shape":"FailureTrace", "documentation":"

Contains information about the failure of the interaction.

" }, + "guardrailTrace":{ + "shape":"GuardrailTrace", + "documentation":"

The trace details for a trace defined in the Guardrail filter.

" + }, "orchestrationTrace":{ "shape":"OrchestrationTrace", "documentation":"

Details about the orchestration step, in which the agent determines the order in which actions are executed and which knowledge bases are retrieved.

" @@ -2029,6 +3201,21 @@ "REPROMPT" ] }, + "Usage":{ + "type":"structure", + "members":{ + "inputTokens":{ + "shape":"Integer", + "documentation":"

Contains information about the input tokens from the foundation model usage.

" + }, + "outputTokens":{ + "shape":"Integer", + "documentation":"

Contains information about the output tokens from the foundation model usage.

" + } + }, + "documentation":"

Contains information of the usage of the foundation model.

", + "sensitive":true + }, "ValidationException":{ "type":"structure", "members":{ diff --git a/botocore/data/bedrock-agent/2023-06-05/paginators-1.json b/botocore/data/bedrock-agent/2023-06-05/paginators-1.json index 8e00fb72a9..8485d731a1 100644 --- a/botocore/data/bedrock-agent/2023-06-05/paginators-1.json +++ b/botocore/data/bedrock-agent/2023-06-05/paginators-1.json @@ -47,6 +47,30 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "knowledgeBaseSummaries" + }, + "ListFlowAliases": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "flowAliasSummaries" + }, + "ListFlowVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "flowVersionSummaries" + }, + "ListFlows": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "flowSummaries" + }, + "ListPrompts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "promptSummaries" } } } diff --git a/botocore/data/bedrock-agent/2023-06-05/service-2.json b/botocore/data/bedrock-agent/2023-06-05/service-2.json index 1b06e63592..438e3339c9 100644 --- a/botocore/data/bedrock-agent/2023-06-05/service-2.json +++ b/botocore/data/bedrock-agent/2023-06-05/service-2.json @@ -50,7 +50,7 @@ {"shape":"ConflictException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates an agent that orchestrates interactions between foundation models, data sources, software applications, user conversations, and APIs to carry out tasks to help customers.

  • Specify the following fields for security purposes.

    • agentResourceRoleArn – The Amazon Resource Name (ARN) of the role with permissions to invoke API operations on an agent.

    • (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent.

    • (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeAgent request begins a new session.

  • To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts.

  • If you agent fails to be created, the response returns a list of failureReasons alongside a list of recommendedActions for you to troubleshoot.

", + "documentation":"

Creates an agent that orchestrates interactions between foundation models, data sources, software applications, user conversations, and APIs to carry out tasks to help customers.

  • Specify the following fields for security purposes.

    • agentResourceRoleArn – The Amazon Resource Name (ARN) of the role with permissions to invoke API operations on an agent.

    • (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent.

    • (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeAgent request begins a new session.

  • To enable your agent to retain conversational context across multiple sessions, include a memoryConfiguration object. For more information, see Configure memory.

  • To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts.

  • If you agent fails to be created, the response returns a list of failureReasons alongside a list of recommendedActions for you to troubleshoot.

", "idempotent":true }, "CreateAgentActionGroup":{ @@ -71,7 +71,7 @@ {"shape":"ConflictException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates an action group for an agent. An action group represents the actions that an agent can carry out for the customer by defining the APIs that an agent can call and the logic for calling them.

To allow your agent to request the user for additional information when trying to complete a task, add an action group with the parentActionGroupSignature field set to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group. During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

", + "documentation":"

Creates an action group for an agent. An action group represents the actions that an agent can carry out for the customer by defining the APIs that an agent can call and the logic for calling them.

To allow your agent to request the user for additional information when trying to complete a task, add an action group with the parentActionGroupSignature field set to AMAZON.UserInput.

To allow your agent to generate, run, and troubleshoot code when trying to complete a task, add an action group with the parentActionGroupSignature field set to AMAZON.CodeInterpreter.

You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group. During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

", "idempotent":true }, "CreateAgentAlias":{ @@ -113,7 +113,69 @@ {"shape":"ConflictException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Sets up a data source to be added to a knowledge base.

You can't change the chunkingConfiguration after you create the data source.

", + "documentation":"

Creates a data source connector for a knowledge base.

You can't change the chunkingConfiguration after you create the data source connector.

", + "idempotent":true + }, + "CreateFlow":{ + "name":"CreateFlow", + "http":{ + "method":"POST", + "requestUri":"/flows/", + "responseCode":201 + }, + "input":{"shape":"CreateFlowRequest"}, + "output":{"shape":"CreateFlowResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a prompt flow that you can use to send an input through various steps to yield an output. Configure nodes, each of which corresponds to a step of the flow, and create connections between the nodes to create paths to different outputs. For more information, see How it works and Create a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "idempotent":true + }, + "CreateFlowAlias":{ + "name":"CreateFlowAlias", + "http":{ + "method":"POST", + "requestUri":"/flows/{flowIdentifier}/aliases", + "responseCode":201 + }, + "input":{"shape":"CreateFlowAliasRequest"}, + "output":{"shape":"CreateFlowAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates an alias of a flow for deployment. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "idempotent":true + }, + "CreateFlowVersion":{ + "name":"CreateFlowVersion", + "http":{ + "method":"POST", + "requestUri":"/flows/{flowIdentifier}/versions", + "responseCode":201 + }, + "input":{"shape":"CreateFlowVersionRequest"}, + "output":{"shape":"CreateFlowVersionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a version of the flow that you can deploy. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", "idempotent":true }, "CreateKnowledgeBase":{ @@ -136,6 +198,47 @@ "documentation":"

Creates a knowledge base that contains data sources from which information can be queried and used by LLMs. To create a knowledge base, you must first set up your data sources and configure a supported vector store. For more information, see Set up your data for ingestion.

If you prefer to let Amazon Bedrock create and manage a vector store for you in Amazon OpenSearch Service, use the console. For more information, see Create a knowledge base.

  • Provide the name and an optional description.

  • Provide the Amazon Resource Name (ARN) with permissions to create a knowledge base in the roleArn field.

  • Provide the embedding model to use in the embeddingModelArn field in the knowledgeBaseConfiguration object.

  • Provide the configuration for your vector store in the storageConfiguration object.

", "idempotent":true }, + "CreatePrompt":{ + "name":"CreatePrompt", + "http":{ + "method":"POST", + "requestUri":"/prompts/", + "responseCode":201 + }, + "input":{"shape":"CreatePromptRequest"}, + "output":{"shape":"CreatePromptResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a prompt in your prompt library that you can add to a flow. For more information, see Prompt management in Amazon Bedrock, Create a prompt using Prompt management and Prompt flows in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "idempotent":true + }, + "CreatePromptVersion":{ + "name":"CreatePromptVersion", + "http":{ + "method":"POST", + "requestUri":"/prompts/{promptIdentifier}/versions", + "responseCode":201 + }, + "input":{"shape":"CreatePromptVersionRequest"}, + "output":{"shape":"CreatePromptVersionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a static snapshot of your prompt that can be deployed to production. For more information, see Deploy prompts using Prompt management by creating versions in the Amazon Bedrock User Guide.

", + "idempotent":true + }, "DeleteAgent":{ "name":"DeleteAgent", "http":{ @@ -235,6 +338,66 @@ "documentation":"

Deletes a data source from a knowledge base.

", "idempotent":true }, + "DeleteFlow":{ + "name":"DeleteFlow", + "http":{ + "method":"DELETE", + "requestUri":"/flows/{flowIdentifier}/", + "responseCode":200 + }, + "input":{"shape":"DeleteFlowRequest"}, + "output":{"shape":"DeleteFlowResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes a flow.

", + "idempotent":true + }, + "DeleteFlowAlias":{ + "name":"DeleteFlowAlias", + "http":{ + "method":"DELETE", + "requestUri":"/flows/{flowIdentifier}/aliases/{aliasIdentifier}", + "responseCode":200 + }, + "input":{"shape":"DeleteFlowAliasRequest"}, + "output":{"shape":"DeleteFlowAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes an alias of a flow.

", + "idempotent":true + }, + "DeleteFlowVersion":{ + "name":"DeleteFlowVersion", + "http":{ + "method":"DELETE", + "requestUri":"/flows/{flowIdentifier}/versions/{flowVersion}/", + "responseCode":200 + }, + "input":{"shape":"DeleteFlowVersionRequest"}, + "output":{"shape":"DeleteFlowVersionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes a version of a flow.

", + "idempotent":true + }, "DeleteKnowledgeBase":{ "name":"DeleteKnowledgeBase", "http":{ @@ -255,6 +418,26 @@ "documentation":"

Deletes a knowledge base. Before deleting a knowledge base, you should disassociate the knowledge base from any agents that it is associated with by making a DisassociateAgentKnowledgeBase request.

", "idempotent":true }, + "DeletePrompt":{ + "name":"DeletePrompt", + "http":{ + "method":"DELETE", + "requestUri":"/prompts/{promptIdentifier}/", + "responseCode":200 + }, + "input":{"shape":"DeletePromptRequest"}, + "output":{"shape":"DeletePromptResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes a prompt or a prompt version from the Prompt management tool. For more information, see Delete prompts from the Prompt management tool and Delete a version of a prompt from the Prompt management tool in the Amazon Bedrock User Guide.

", + "idempotent":true + }, "DisassociateAgentKnowledgeBase":{ "name":"DisassociateAgentKnowledgeBase", "http":{ @@ -383,6 +566,60 @@ ], "documentation":"

Gets information about a data source.

" }, + "GetFlow":{ + "name":"GetFlow", + "http":{ + "method":"GET", + "requestUri":"/flows/{flowIdentifier}/", + "responseCode":200 + }, + "input":{"shape":"GetFlowRequest"}, + "output":{"shape":"GetFlowResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves information about a flow. For more information, see Manage a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, + "GetFlowAlias":{ + "name":"GetFlowAlias", + "http":{ + "method":"GET", + "requestUri":"/flows/{flowIdentifier}/aliases/{aliasIdentifier}", + "responseCode":200 + }, + "input":{"shape":"GetFlowAliasRequest"}, + "output":{"shape":"GetFlowAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves information about a flow. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, + "GetFlowVersion":{ + "name":"GetFlowVersion", + "http":{ + "method":"GET", + "requestUri":"/flows/{flowIdentifier}/versions/{flowVersion}/", + "responseCode":200 + }, + "input":{"shape":"GetFlowVersionRequest"}, + "output":{"shape":"GetFlowVersionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves information about a version of a flow. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, "GetIngestionJob":{ "name":"GetIngestionJob", "http":{ @@ -419,6 +656,24 @@ ], "documentation":"

Gets information about a knoweldge base.

" }, + "GetPrompt":{ + "name":"GetPrompt", + "http":{ + "method":"GET", + "requestUri":"/prompts/{promptIdentifier}/", + "responseCode":200 + }, + "input":{"shape":"GetPromptRequest"}, + "output":{"shape":"GetPromptResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves information about a prompt or a version of it. For more information, see View information about prompts using Prompt management and View information about a version of your prompt in the Amazon Bedrock User Guide.

" + }, "ListAgentActionGroups":{ "name":"ListAgentActionGroups", "http":{ @@ -526,6 +781,59 @@ ], "documentation":"

Lists the data sources in a knowledge base and information about each one.

" }, + "ListFlowAliases":{ + "name":"ListFlowAliases", + "http":{ + "method":"GET", + "requestUri":"/flows/{flowIdentifier}/aliases", + "responseCode":200 + }, + "input":{"shape":"ListFlowAliasesRequest"}, + "output":{"shape":"ListFlowAliasesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns a list of aliases for a flow.

" + }, + "ListFlowVersions":{ + "name":"ListFlowVersions", + "http":{ + "method":"GET", + "requestUri":"/flows/{flowIdentifier}/versions", + "responseCode":200 + }, + "input":{"shape":"ListFlowVersionsRequest"}, + "output":{"shape":"ListFlowVersionsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns a list of information about each flow. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, + "ListFlows":{ + "name":"ListFlows", + "http":{ + "method":"GET", + "requestUri":"/flows/", + "responseCode":200 + }, + "input":{"shape":"ListFlowsRequest"}, + "output":{"shape":"ListFlowsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns a list of flows and information about each flow. For more information, see Manage a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, "ListIngestionJobs":{ "name":"ListIngestionJobs", "http":{ @@ -561,6 +869,24 @@ ], "documentation":"

Lists the knowledge bases in an account and information about each of them.

" }, + "ListPrompts":{ + "name":"ListPrompts", + "http":{ + "method":"GET", + "requestUri":"/prompts/", + "responseCode":200 + }, + "input":{"shape":"ListPromptsRequest"}, + "output":{"shape":"ListPromptsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns a list of prompts from the Prompt management tool and information about each prompt. For more information, see View information about prompts using Prompt management in the Amazon Bedrock User Guide.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -599,6 +925,26 @@ ], "documentation":"

Creates a DRAFT version of the agent that can be used for internal testing.

" }, + "PrepareFlow":{ + "name":"PrepareFlow", + "http":{ + "method":"POST", + "requestUri":"/flows/{flowIdentifier}/", + "responseCode":202 + }, + "input":{"shape":"PrepareFlowRequest"}, + "output":{"shape":"PrepareFlowResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Prepares the DRAFT version of a flow so that it can be invoked. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, "StartIngestionJob":{ "name":"StartIngestionJob", "http":{ @@ -758,40 +1104,103 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

Updates configurations for a data source.

You can't change the chunkingConfiguration after you create the data source. Specify the existing chunkingConfiguration.

", + "documentation":"

Updates the configurations for a data source connector.

You can't change the chunkingConfiguration after you create the data source connector. Specify the existing chunkingConfiguration.

", "idempotent":true }, - "UpdateKnowledgeBase":{ - "name":"UpdateKnowledgeBase", + "UpdateFlow":{ + "name":"UpdateFlow", "http":{ "method":"PUT", - "requestUri":"/knowledgebases/{knowledgeBaseId}", - "responseCode":202 + "requestUri":"/flows/{flowIdentifier}/", + "responseCode":200 }, - "input":{"shape":"UpdateKnowledgeBaseRequest"}, - "output":{"shape":"UpdateKnowledgeBaseResponse"}, + "input":{"shape":"UpdateFlowRequest"}, + "output":{"shape":"UpdateFlowResponse"}, "errors":[ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Updates the configuration of a knowledge base with the fields that you specify. Because all fields will be overwritten, you must include the same values for fields that you want to keep the same.

You can change the following fields:

  • name

  • description

  • roleArn

You can't change the knowledgeBaseConfiguration or storageConfiguration fields, so you must specify the same configurations as when you created the knowledge base. You can send a GetKnowledgeBase request and copy the same configurations.

", + "documentation":"

Modifies a flow. Include both fields that you want to keep and fields that you want to change. For more information, see How it works and Create a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", "idempotent":true - } - }, - "shapes":{ - "APISchema":{ - "type":"structure", - "members":{ - "payload":{ - "shape":"Payload", - "documentation":"

The JSON or YAML-formatted payload defining the OpenAPI schema for the action group. For more information, see Action group OpenAPI schemas.

" - }, - "s3":{ - "shape":"S3Identifier", + }, + "UpdateFlowAlias":{ + "name":"UpdateFlowAlias", + "http":{ + "method":"PUT", + "requestUri":"/flows/{flowIdentifier}/aliases/{aliasIdentifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateFlowAliasRequest"}, + "output":{"shape":"UpdateFlowAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Modifies the alias of a flow. Include both fields that you want to keep and ones that you want to change. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "idempotent":true + }, + "UpdateKnowledgeBase":{ + "name":"UpdateKnowledgeBase", + "http":{ + "method":"PUT", + "requestUri":"/knowledgebases/{knowledgeBaseId}", + "responseCode":202 + }, + "input":{"shape":"UpdateKnowledgeBaseRequest"}, + "output":{"shape":"UpdateKnowledgeBaseResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Updates the configuration of a knowledge base with the fields that you specify. Because all fields will be overwritten, you must include the same values for fields that you want to keep the same.

You can change the following fields:

  • name

  • description

  • roleArn

You can't change the knowledgeBaseConfiguration or storageConfiguration fields, so you must specify the same configurations as when you created the knowledge base. You can send a GetKnowledgeBase request and copy the same configurations.

", + "idempotent":true + }, + "UpdatePrompt":{ + "name":"UpdatePrompt", + "http":{ + "method":"PUT", + "requestUri":"/prompts/{promptIdentifier}/", + "responseCode":200 + }, + "input":{"shape":"UpdatePromptRequest"}, + "output":{"shape":"UpdatePromptResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Modifies a prompt in your prompt library. Include both fields that you want to keep and fields that you want to replace. For more information, see Prompt management in Amazon Bedrock and Edit prompts in your prompt library in the Amazon Bedrock User Guide.

", + "idempotent":true + } + }, + "shapes":{ + "APISchema":{ + "type":"structure", + "members":{ + "payload":{ + "shape":"Payload", + "documentation":"

The JSON or YAML-formatted payload defining the OpenAPI schema for the action group. For more information, see Action group OpenAPI schemas.

" + }, + "s3":{ + "shape":"S3Identifier", "documentation":"

Contains details about the S3 object containing the OpenAPI schema for the action group. For more information, see Action group OpenAPI schemas.

" } }, @@ -827,7 +1236,10 @@ }, "ActionGroupSignature":{ "type":"string", - "enum":["AMAZON.UserInput"] + "enum":[ + "AMAZON.UserInput", + "AMAZON.CodeInterpreter" + ] }, "ActionGroupState":{ "type":"string", @@ -936,6 +1348,10 @@ "shape":"ModelIdentifier", "documentation":"

The foundation model used for orchestration by the agent.

" }, + "guardrailConfiguration":{ + "shape":"GuardrailConfiguration", + "documentation":"

Details about the guardrail associated with the agent.

" + }, "idleSessionTTLInSeconds":{ "shape":"SessionTTL", "documentation":"

The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent.

A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout.

" @@ -944,6 +1360,10 @@ "shape":"Instruction", "documentation":"

Instructions that tell the agent what it should do and how it should interact with users.

" }, + "memoryConfiguration":{ + "shape":"MemoryConfiguration", + "documentation":"

Contains memory configuration for the agent.

" + }, "preparedAt":{ "shape":"DateTimestamp", "documentation":"

The time at which the agent was last prepared.

" @@ -1213,6 +1633,17 @@ "min":0, "pattern":"^arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:agent/[0-9a-zA-Z]{10}$" }, + "AgentFlowNodeConfiguration":{ + "type":"structure", + "required":["agentAliasArn"], + "members":{ + "agentAliasArn":{ + "shape":"AgentAliasArn", + "documentation":"

The Amazon Resource Name (ARN) of the alias of the agent to invoke.

" + } + }, + "documentation":"

Defines an agent node in your flow. You specify the agent to invoke at this point in the flow. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + }, "AgentKnowledgeBase":{ "type":"structure", "required":[ @@ -1293,7 +1724,7 @@ "type":"string", "max":2048, "min":0, - "pattern":"^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/(service-role/)?AmazonBedrockExecutionRoleForAgents_.+$" + "pattern":"^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/.+$" }, "AgentStatus":{ "type":"string", @@ -1339,6 +1770,10 @@ "shape":"Description", "documentation":"

The description of the agent.

" }, + "guardrailConfiguration":{ + "shape":"GuardrailConfiguration", + "documentation":"

Details about the guardrail associated with the agent.

" + }, "latestAgentVersion":{ "shape":"Version", "documentation":"

The latest version of the agent.

" @@ -1404,6 +1839,10 @@ "shape":"ModelIdentifier", "documentation":"

The foundation model that the version invokes.

" }, + "guardrailConfiguration":{ + "shape":"GuardrailConfiguration", + "documentation":"

Details about the guardrail associated with the agent.

" + }, "idleSessionTTLInSeconds":{ "shape":"SessionTTL", "documentation":"

The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent.

A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout.

" @@ -1412,6 +1851,10 @@ "shape":"Instruction", "documentation":"

The instructions provided to the agent.

" }, + "memoryConfiguration":{ + "shape":"MemoryConfiguration", + "documentation":"

Contains details of the memory configuration on the version of the agent.

" + }, "promptOverrideConfiguration":{ "shape":"PromptOverrideConfiguration", "documentation":"

Contains configurations to override prompt templates in different parts of an agent sequence. For more information, see Advanced prompts.

" @@ -1467,6 +1910,10 @@ "shape":"Description", "documentation":"

The description of the version of the agent.

" }, + "guardrailConfiguration":{ + "shape":"GuardrailConfiguration", + "documentation":"

Details about the guardrail associated with the agent.

" + }, "updatedAt":{ "shape":"DateTimestamp", "documentation":"

The time at which the version was last updated.

" @@ -1522,13 +1969,45 @@ "BasePromptTemplate":{ "type":"string", "max":100000, - "min":1 + "min":1, + "sensitive":true }, "BedrockEmbeddingModelArn":{ "type":"string", - "max":1011, + "max":2048, "min":20, - "pattern":"^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}))$" + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" + }, + "BedrockEmbeddingModelConfiguration":{ + "type":"structure", + "members":{ + "dimensions":{ + "shape":"Dimensions", + "documentation":"

The dimensions details for the vector configuration used on the Bedrock embeddings model.

" + } + }, + "documentation":"

The vector configuration details for the Bedrock embeddings model.

" + }, + "BedrockFoundationModelConfiguration":{ + "type":"structure", + "required":["modelArn"], + "members":{ + "modelArn":{ + "shape":"BedrockModelArn", + "documentation":"

The model's ARN.

" + }, + "parsingPrompt":{ + "shape":"ParsingPrompt", + "documentation":"

Instructions for interpreting the contents of a document.

" + } + }, + "documentation":"

Settings for a foundation model used to parse documents for a data source.

" + }, + "BedrockModelArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})$" }, "Boolean":{ "type":"boolean", @@ -1546,11 +2025,19 @@ "members":{ "chunkingStrategy":{ "shape":"ChunkingStrategy", - "documentation":"

Knowledge base can split your source data into chunks. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for NONE, then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk.

  • FIXED_SIZE – Amazon Bedrock splits your source data into chunks of the approximate size that you set in the fixedSizeChunkingConfiguration.

  • NONE – Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files.

" + "documentation":"

Knowledge base can split your source data into chunks. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for NONE, then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk.

  • FIXED_SIZE – Amazon Bedrock splits your source data into chunks of the approximate size that you set in the fixedSizeChunkingConfiguration.

  • HIERARCHICAL – Split documents into layers of chunks where the first layer contains large chunks, and the second layer contains smaller chunks derived from the first layer.

  • SEMANTIC – Split documents into chunks based on groups of similar content derived with natural language processing.

  • NONE – Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files.

" }, "fixedSizeChunkingConfiguration":{ "shape":"FixedSizeChunkingConfiguration", "documentation":"

Configurations for when you choose fixed-size chunking. If you set the chunkingStrategy as NONE, exclude this field.

" + }, + "hierarchicalChunkingConfiguration":{ + "shape":"HierarchicalChunkingConfiguration", + "documentation":"

Settings for hierarchical document chunking for a data source. Hierarchical chunking splits documents into layers of chunks where the first layer contains large chunks, and the second layer contains smaller chunks derived from the first layer.

" + }, + "semanticChunkingConfiguration":{ + "shape":"SemanticChunkingConfiguration", + "documentation":"

Settings for semantic document chunking for a data source. Semantic chunking splits a document into into smaller documents based on groups of similar content derived from the text with natural language processing.

" } }, "documentation":"

Details about how to chunk the documents in the data source. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried.

" @@ -1559,14 +2046,22 @@ "type":"string", "enum":[ "FIXED_SIZE", - "NONE" + "NONE", + "HIERARCHICAL", + "SEMANTIC" ] }, "ClientToken":{ "type":"string", "max":256, "min":33, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$" + }, + "CollectorFlowNodeConfiguration":{ + "type":"structure", + "members":{ + }, + "documentation":"

Defines a collector node in your flow. This node takes an iteration of inputs and consolidates them into an array in the output. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" }, "ColumnName":{ "type":"string", @@ -1574,6 +2069,17 @@ "min":0, "pattern":"^[a-zA-Z0-9_\\-]+$" }, + "ConditionFlowNodeConfiguration":{ + "type":"structure", + "required":["conditions"], + "members":{ + "conditions":{ + "shape":"FlowConditions", + "documentation":"

An array of conditions. Each member contains the name of a condition and an expression that defines the condition.

" + } + }, + "documentation":"

Defines a condition node in your flow. You can specify conditions that determine which node comes next in the flow. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + }, "ConflictException":{ "type":"structure", "members":{ @@ -1586,6 +2092,89 @@ }, "exception":true }, + "ConfluenceAuthType":{ + "type":"string", + "enum":[ + "BASIC", + "OAUTH2_CLIENT_CREDENTIALS" + ] + }, + "ConfluenceCrawlerConfiguration":{ + "type":"structure", + "members":{ + "filterConfiguration":{ + "shape":"CrawlFilterConfiguration", + "documentation":"

The configuration of filtering the Confluence content. For example, configuring regular expression patterns to include or exclude certain content.

" + } + }, + "documentation":"

The configuration of the Confluence content. For example, configuring specific types of Confluence content.

" + }, + "ConfluenceDataSourceConfiguration":{ + "type":"structure", + "required":["sourceConfiguration"], + "members":{ + "crawlerConfiguration":{ + "shape":"ConfluenceCrawlerConfiguration", + "documentation":"

The configuration of the Confluence content. For example, configuring specific types of Confluence content.

" + }, + "sourceConfiguration":{ + "shape":"ConfluenceSourceConfiguration", + "documentation":"

The endpoint information to connect to your Confluence data source.

" + } + }, + "documentation":"

The configuration information to connect to Confluence as your data source.

" + }, + "ConfluenceHostType":{ + "type":"string", + "enum":["SAAS"] + }, + "ConfluenceSourceConfiguration":{ + "type":"structure", + "required":[ + "authType", + "credentialsSecretArn", + "hostType", + "hostUrl" + ], + "members":{ + "authType":{ + "shape":"ConfluenceAuthType", + "documentation":"

The supported authentication type to authenticate and connect to your Confluence instance.

" + }, + "credentialsSecretArn":{ + "shape":"SecretArn", + "documentation":"

The Amazon Resource Name of an Secrets Manager secret that stores your authentication credentials for your SharePoint site/sites. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see Confluence connection configuration.

" + }, + "hostType":{ + "shape":"ConfluenceHostType", + "documentation":"

The supported host type, whether online/cloud or server/on-premises.

" + }, + "hostUrl":{ + "shape":"HttpsUrl", + "documentation":"

The Confluence host URL or instance URL.

" + } + }, + "documentation":"

The endpoint information to connect to your Confluence data source.

" + }, + "CrawlFilterConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "patternObjectFilter":{ + "shape":"PatternObjectFilterConfiguration", + "documentation":"

The configuration of filtering certain objects or content types of the data source.

" + }, + "type":{ + "shape":"CrawlFilterConfigurationType", + "documentation":"

The type of filtering that you want to apply to certain objects or content of the data source. For example, the PATTERN type is regular expression patterns you can apply to filter your content.

" + } + }, + "documentation":"

The configuration of filtering the data source content. For example, configuring regular expression patterns to include or exclude certain content.

" + }, + "CrawlFilterConfigurationType":{ + "type":"string", + "enum":["PATTERN"] + }, "CreateAgentActionGroupRequest":{ "type":"structure", "required":[ @@ -1637,7 +2226,7 @@ }, "parentActionGroupSignature":{ "shape":"ActionGroupSignature", - "documentation":"

To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

" + "documentation":"

To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

To allow your agent to generate, run, and troubleshoot code when trying to complete a task, set this field to AMAZON.CodeInterpreter. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

" } } }, @@ -1726,6 +2315,10 @@ "shape":"ModelIdentifier", "documentation":"

The foundation model to be used for orchestration by the agent you create.

" }, + "guardrailConfiguration":{ + "shape":"GuardrailConfiguration", + "documentation":"

The unique Guardrail configuration assigned to the agent when it is created.

" + }, "idleSessionTTLInSeconds":{ "shape":"SessionTTL", "documentation":"

The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent.

A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout.

" @@ -1734,6 +2327,10 @@ "shape":"Instruction", "documentation":"

Instructions that tell the agent what it should do and how it should interact with users.

" }, + "memoryConfiguration":{ + "shape":"MemoryConfiguration", + "documentation":"

Contains the details of the memory configured for the agent.

" + }, "promptOverrideConfiguration":{ "shape":"PromptOverrideConfiguration", "documentation":"

Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts.

" @@ -1769,11 +2366,11 @@ }, "dataDeletionPolicy":{ "shape":"DataDeletionPolicy", - "documentation":"

The data deletion policy assigned to the data source.

" + "documentation":"

The data deletion policy for the data source.

You can set the data deletion policy to:

  • DELETE: Deletes all underlying data belonging to the data source from the vector store upon deletion of a knowledge base or data source resource. Note that the vector store itself is not deleted, only the underlying data. This flag is ignored if an Amazon Web Services account is deleted.

  • RETAIN: Retains all underlying data in your vector store upon deletion of a knowledge base or data source resource.

" }, "dataSourceConfiguration":{ "shape":"DataSourceConfiguration", - "documentation":"

Contains metadata about where the data source is stored.

" + "documentation":"

The connection configuration for the data source.

" }, "description":{ "shape":"Description", @@ -1809,13 +2406,12 @@ } } }, - "CreateKnowledgeBaseRequest":{ + "CreateFlowAliasRequest":{ "type":"structure", "required":[ - "knowledgeBaseConfiguration", + "flowIdentifier", "name", - "roleArn", - "storageConfiguration" + "routingConfiguration" ], "members":{ "clientToken":{ @@ -1825,465 +2421,1075 @@ }, "description":{ "shape":"Description", - "documentation":"

A description of the knowledge base.

" + "documentation":"

A description for the alias.

" }, - "knowledgeBaseConfiguration":{ - "shape":"KnowledgeBaseConfiguration", - "documentation":"

Contains details about the embeddings model used for the knowledge base.

" + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow for which to create an alias.

", + "location":"uri", + "locationName":"flowIdentifier" }, "name":{ "shape":"Name", - "documentation":"

A name for the knowledge base.

" - }, - "roleArn":{ - "shape":"KnowledgeBaseRoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base.

" + "documentation":"

A name for the alias.

" }, - "storageConfiguration":{ - "shape":"StorageConfiguration", - "documentation":"

Contains details about the configuration of the vector database used for the knowledge base.

" + "routingConfiguration":{ + "shape":"FlowAliasRoutingConfiguration", + "documentation":"

Contains information about the version to which to map the alias.

" }, "tags":{ "shape":"TagsMap", - "documentation":"

Specify the key-value pairs for the tags that you want to attach to your knowledge base in this object.

" - } - } - }, - "CreateKnowledgeBaseResponse":{ - "type":"structure", - "required":["knowledgeBase"], - "members":{ - "knowledgeBase":{ - "shape":"KnowledgeBase", - "documentation":"

Contains details about the knowledge base.

" + "documentation":"

Any tags that you want to attach to the alias of the flow. For more information, see Tagging resources in Amazon Bedrock.

" } } }, - "CreationMode":{ - "type":"string", - "enum":[ - "DEFAULT", - "OVERRIDDEN" - ] - }, - "CustomControlMethod":{ - "type":"string", - "enum":["RETURN_CONTROL"] - }, - "DataDeletionPolicy":{ - "type":"string", - "enum":[ - "RETAIN", - "DELETE" - ] - }, - "DataSource":{ + "CreateFlowAliasResponse":{ "type":"structure", "required":[ + "arn", "createdAt", - "dataSourceConfiguration", - "dataSourceId", - "knowledgeBaseId", + "flowId", + "id", "name", - "status", + "routingConfiguration", "updatedAt" ], "members":{ + "arn":{ + "shape":"FlowAliasArn", + "documentation":"

The Amazon Resource Name (ARN) of the alias.

" + }, "createdAt":{ "shape":"DateTimestamp", - "documentation":"

The time at which the data source was created.

" - }, - "dataDeletionPolicy":{ - "shape":"DataDeletionPolicy", - "documentation":"

The data deletion policy for a data source.

" - }, - "dataSourceConfiguration":{ - "shape":"DataSourceConfiguration", - "documentation":"

Contains details about how the data source is stored.

" - }, - "dataSourceId":{ - "shape":"Id", - "documentation":"

The unique identifier of the data source.

" + "documentation":"

The time at which the alias was created.

" }, "description":{ "shape":"Description", - "documentation":"

The description of the data source.

" + "documentation":"

The description of the alias.

" }, - "failureReasons":{ - "shape":"FailureReasons", - "documentation":"

The detailed reasons on the failure to delete a data source.

" + "flowId":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow that the alias belongs to.

" }, - "knowledgeBaseId":{ - "shape":"Id", - "documentation":"

The unique identifier of the knowledge base to which the data source belongs.

" + "id":{ + "shape":"FlowAliasId", + "documentation":"

The unique identifier of the alias.

" }, "name":{ "shape":"Name", - "documentation":"

The name of the data source.

" - }, - "serverSideEncryptionConfiguration":{ - "shape":"ServerSideEncryptionConfiguration", - "documentation":"

Contains details about the configuration of the server-side encryption.

" + "documentation":"

The name of the alias.

" }, - "status":{ - "shape":"DataSourceStatus", - "documentation":"

The status of the data source. The following statuses are possible:

  • Available – The data source has been created and is ready for ingestion into the knowledge base.

  • Deleting – The data source is being deleted.

" + "routingConfiguration":{ + "shape":"FlowAliasRoutingConfiguration", + "documentation":"

Contains information about the version that the alias is mapped to.

" }, "updatedAt":{ "shape":"DateTimestamp", - "documentation":"

The time at which the data source was last updated.

" - }, - "vectorIngestionConfiguration":{ - "shape":"VectorIngestionConfiguration", - "documentation":"

Contains details about how to ingest the documents in the data source.

" + "documentation":"

The time at which the alias of the flow was last updated.

" } - }, - "documentation":"

Contains details about a data source.

" + } }, - "DataSourceConfiguration":{ + "CreateFlowRequest":{ "type":"structure", - "required":["type"], + "required":[ + "executionRoleArn", + "name" + ], "members":{ - "s3Configuration":{ - "shape":"S3DataSourceConfiguration", - "documentation":"

Contains details about the configuration of the S3 object containing the data source.

" + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "idempotencyToken":true }, - "type":{ - "shape":"DataSourceType", - "documentation":"

The type of storage for the data source.

" + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key to encrypt the flow.

" + }, + "definition":{ + "shape":"FlowDefinition", + "documentation":"

A definition of the nodes and connections between nodes in the flow.

" + }, + "description":{ + "shape":"FlowDescription", + "documentation":"

A description for the flow.

" + }, + "executionRoleArn":{ + "shape":"FlowExecutionRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the service role with permissions to create and manage a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, + "name":{ + "shape":"FlowName", + "documentation":"

A name for the flow.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Any tags that you want to attach to the flow. For more information, see Tagging resources in Amazon Bedrock.

" } - }, - "documentation":"

Contains details about how a data source is stored.

" - }, - "DataSourceStatus":{ - "type":"string", - "enum":[ - "AVAILABLE", - "DELETING", - "DELETE_UNSUCCESSFUL" - ] - }, - "DataSourceSummaries":{ - "type":"list", - "member":{"shape":"DataSourceSummary"} + } }, - "DataSourceSummary":{ + "CreateFlowResponse":{ "type":"structure", "required":[ - "dataSourceId", - "knowledgeBaseId", + "arn", + "createdAt", + "executionRoleArn", + "id", "name", "status", - "updatedAt" + "updatedAt", + "version" ], "members":{ - "dataSourceId":{ - "shape":"Id", - "documentation":"

The unique identifier of the data source.

" + "arn":{ + "shape":"FlowArn", + "documentation":"

The Amazon Resource Name (ARN) of the flow.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow was created.

" + }, + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key that you encrypted the flow with.

" + }, + "definition":{ + "shape":"FlowDefinition", + "documentation":"

A definition of the nodes and connections between nodes in the flow.

" }, "description":{ - "shape":"Description", - "documentation":"

The description of the data source.

" + "shape":"FlowDescription", + "documentation":"

The description of the flow.

" }, - "knowledgeBaseId":{ - "shape":"Id", - "documentation":"

The unique identifier of the knowledge base to which the data source belongs.

" + "executionRoleArn":{ + "shape":"FlowExecutionRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, + "id":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow.

" }, "name":{ - "shape":"Name", - "documentation":"

The name of the data source.

" + "shape":"FlowName", + "documentation":"

The name of the flow.

" }, "status":{ - "shape":"DataSourceStatus", - "documentation":"

The status of the data source.

" + "shape":"FlowStatus", + "documentation":"

The status of the flow. When you submit this request, the status will be NotPrepared. If creation fails, the status becomes Failed.

" }, "updatedAt":{ "shape":"DateTimestamp", - "documentation":"

The time at which the data source was last updated.

" + "documentation":"

The time at which the flow was last updated.

" + }, + "version":{ + "shape":"DraftVersion", + "documentation":"

The version of the flow. When you create a flow, the version created is the DRAFT version.

" } - }, - "documentation":"

Contains details about a data source.

" - }, - "DataSourceType":{ - "type":"string", - "enum":["S3"] - }, - "DateTimestamp":{ - "type":"timestamp", - "timestampFormat":"iso8601" + } }, - "DeleteAgentActionGroupRequest":{ + "CreateFlowVersionRequest":{ "type":"structure", - "required":[ - "actionGroupId", - "agentId", - "agentVersion" - ], + "required":["flowIdentifier"], "members":{ - "actionGroupId":{ - "shape":"Id", - "documentation":"

The unique identifier of the action group to delete.

", - "location":"uri", - "locationName":"actionGroupId" + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "idempotencyToken":true }, - "agentId":{ - "shape":"Id", - "documentation":"

The unique identifier of the agent that the action group belongs to.

", - "location":"uri", - "locationName":"agentId" + "description":{ + "shape":"FlowDescription", + "documentation":"

A description of the version of the flow.

" }, - "agentVersion":{ - "shape":"DraftVersion", - "documentation":"

The version of the agent that the action group belongs to.

", + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow that you want to create a version of.

", "location":"uri", - "locationName":"agentVersion" - }, - "skipResourceInUseCheck":{ - "shape":"Boolean", - "documentation":"

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", - "location":"querystring", - "locationName":"skipResourceInUseCheck" + "locationName":"flowIdentifier" } } }, - "DeleteAgentActionGroupResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteAgentAliasRequest":{ + "CreateFlowVersionResponse":{ "type":"structure", "required":[ - "agentAliasId", - "agentId" + "arn", + "createdAt", + "executionRoleArn", + "id", + "name", + "status", + "version" ], "members":{ - "agentAliasId":{ - "shape":"AgentAliasId", - "documentation":"

The unique identifier of the alias to delete.

", - "location":"uri", - "locationName":"agentAliasId" + "arn":{ + "shape":"FlowArn", + "documentation":"

The Amazon Resource Name (ARN) of the flow.

" }, - "agentId":{ - "shape":"Id", - "documentation":"

The unique identifier of the agent that the alias belongs to.

", - "location":"uri", - "locationName":"agentId" + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow was created.

" + }, + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The KMS key that the flow is encrypted with.

" + }, + "definition":{ + "shape":"FlowDefinition", + "documentation":"

A definition of the nodes and connections in the flow.

" + }, + "description":{ + "shape":"FlowDescription", + "documentation":"

The description of the flow version.

" + }, + "executionRoleArn":{ + "shape":"FlowExecutionRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, + "id":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow.

" + }, + "name":{ + "shape":"FlowName", + "documentation":"

The name of the flow version.

" + }, + "status":{ + "shape":"FlowStatus", + "documentation":"

The status of the flow.

" + }, + "version":{ + "shape":"NumericalVersion", + "documentation":"

The version of the flow that was created. Versions are numbered incrementally, starting from 1.

" } } }, - "DeleteAgentAliasResponse":{ + "CreateKnowledgeBaseRequest":{ "type":"structure", "required":[ - "agentAliasId", - "agentAliasStatus", - "agentId" + "knowledgeBaseConfiguration", + "name", + "roleArn", + "storageConfiguration" ], "members":{ - "agentAliasId":{ - "shape":"AgentAliasId", - "documentation":"

The unique identifier of the alias that was deleted.

" + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "idempotencyToken":true }, - "agentAliasStatus":{ - "shape":"AgentAliasStatus", - "documentation":"

The status of the alias.

" + "description":{ + "shape":"Description", + "documentation":"

A description of the knowledge base.

" }, - "agentId":{ - "shape":"Id", - "documentation":"

The unique identifier of the agent that the alias belongs to.

" - } - } - }, - "DeleteAgentRequest":{ - "type":"structure", - "required":["agentId"], - "members":{ - "agentId":{ - "shape":"Id", - "documentation":"

The unique identifier of the agent to delete.

", - "location":"uri", - "locationName":"agentId" + "knowledgeBaseConfiguration":{ + "shape":"KnowledgeBaseConfiguration", + "documentation":"

Contains details about the embeddings model used for the knowledge base.

" }, - "skipResourceInUseCheck":{ - "shape":"Boolean", - "documentation":"

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", - "location":"querystring", - "locationName":"skipResourceInUseCheck" + "name":{ + "shape":"Name", + "documentation":"

A name for the knowledge base.

" + }, + "roleArn":{ + "shape":"KnowledgeBaseRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base.

" + }, + "storageConfiguration":{ + "shape":"StorageConfiguration", + "documentation":"

Contains details about the configuration of the vector database used for the knowledge base.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Specify the key-value pairs for the tags that you want to attach to your knowledge base in this object.

" } } }, - "DeleteAgentResponse":{ + "CreateKnowledgeBaseResponse":{ "type":"structure", - "required":[ - "agentId", - "agentStatus" - ], + "required":["knowledgeBase"], "members":{ - "agentId":{ - "shape":"Id", - "documentation":"

The unique identifier of the agent that was deleted.

" - }, - "agentStatus":{ - "shape":"AgentStatus", - "documentation":"

The status of the agent.

" + "knowledgeBase":{ + "shape":"KnowledgeBase", + "documentation":"

Contains details about the knowledge base.

" } } }, - "DeleteAgentVersionRequest":{ + "CreatePromptRequest":{ "type":"structure", - "required":[ - "agentId", - "agentVersion" - ], + "required":["name"], "members":{ - "agentId":{ - "shape":"Id", - "documentation":"

The unique identifier of the agent that the version belongs to.

", - "location":"uri", - "locationName":"agentId" + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "idempotencyToken":true }, - "agentVersion":{ - "shape":"NumericalVersion", - "documentation":"

The version of the agent to delete.

", - "location":"uri", - "locationName":"agentVersion" + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key to encrypt the prompt.

" }, - "skipResourceInUseCheck":{ - "shape":"Boolean", - "documentation":"

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", - "location":"querystring", - "locationName":"skipResourceInUseCheck" + "defaultVariant":{ + "shape":"PromptVariantName", + "documentation":"

The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object.

" + }, + "description":{ + "shape":"PromptDescription", + "documentation":"

A description for the prompt.

" + }, + "name":{ + "shape":"PromptName", + "documentation":"

A name for the prompt.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Any tags that you want to attach to the prompt. For more information, see Tagging resources in Amazon Bedrock.

" + }, + "variants":{ + "shape":"PromptVariantList", + "documentation":"

A list of objects, each containing details about a variant of the prompt.

" } } }, - "DeleteAgentVersionResponse":{ + "CreatePromptResponse":{ "type":"structure", "required":[ - "agentId", - "agentStatus", - "agentVersion" + "arn", + "createdAt", + "id", + "name", + "updatedAt", + "version" ], "members":{ - "agentId":{ - "shape":"Id", - "documentation":"

The unique identifier of the agent that the version belongs to.

" + "arn":{ + "shape":"PromptArn", + "documentation":"

The Amazon Resource Name (ARN) of the prompt.

" }, - "agentStatus":{ - "shape":"AgentStatus", - "documentation":"

The status of the agent version.

" + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the prompt was created.

" }, - "agentVersion":{ - "shape":"NumericalVersion", - "documentation":"

The version that was deleted.

" + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key that you encrypted the prompt with.

" + }, + "defaultVariant":{ + "shape":"PromptVariantName", + "documentation":"

The name of the default variant for your prompt.

" + }, + "description":{ + "shape":"PromptDescription", + "documentation":"

The description of the prompt.

" + }, + "id":{ + "shape":"PromptId", + "documentation":"

The unique identifier of the prompt.

" + }, + "name":{ + "shape":"PromptName", + "documentation":"

The name of the prompt.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the prompt was last updated.

" + }, + "variants":{ + "shape":"PromptVariantList", + "documentation":"

A list of objects, each containing details about a variant of the prompt.

" + }, + "version":{ + "shape":"Version", + "documentation":"

The version of the prompt. When you create a prompt, the version created is the DRAFT version.

" } } }, - "DeleteDataSourceRequest":{ + "CreatePromptVersionRequest":{ + "type":"structure", + "required":["promptIdentifier"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "idempotencyToken":true + }, + "description":{ + "shape":"PromptDescription", + "documentation":"

A description for the version of the prompt.

" + }, + "promptIdentifier":{ + "shape":"PromptIdentifier", + "documentation":"

The unique identifier of the prompt that you want to create a version of.

", + "location":"uri", + "locationName":"promptIdentifier" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Any tags that you want to attach to the version of the prompt. For more information, see Tagging resources in Amazon Bedrock.

" + } + } + }, + "CreatePromptVersionResponse":{ "type":"structure", "required":[ - "dataSourceId", - "knowledgeBaseId" + "arn", + "createdAt", + "id", + "name", + "updatedAt", + "version" ], "members":{ - "dataSourceId":{ - "shape":"Id", - "documentation":"

The unique identifier of the data source to delete.

", - "location":"uri", - "locationName":"dataSourceId" + "arn":{ + "shape":"PromptArn", + "documentation":"

The Amazon Resource Name (ARN) of the version of the prompt.

" }, - "knowledgeBaseId":{ - "shape":"Id", - "documentation":"

The unique identifier of the knowledge base from which to delete the data source.

", - "location":"uri", - "locationName":"knowledgeBaseId" + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the prompt was created.

" + }, + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key to encrypt the version of the prompt.

" + }, + "defaultVariant":{ + "shape":"PromptVariantName", + "documentation":"

The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object.

" + }, + "description":{ + "shape":"PromptDescription", + "documentation":"

A description for the prompt version.

" + }, + "id":{ + "shape":"PromptId", + "documentation":"

The unique identifier of the prompt.

" + }, + "name":{ + "shape":"PromptName", + "documentation":"

The name of the prompt version.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the prompt was last updated.

" + }, + "variants":{ + "shape":"PromptVariantList", + "documentation":"

A list of objects, each containing details about a variant of the prompt.

" + }, + "version":{ + "shape":"Version", + "documentation":"

The version of the prompt that was created. Versions are numbered incrementally, starting from 1.

" } } }, - "DeleteDataSourceResponse":{ + "CreationMode":{ + "type":"string", + "enum":[ + "DEFAULT", + "OVERRIDDEN" + ] + }, + "CustomControlMethod":{ + "type":"string", + "enum":["RETURN_CONTROL"] + }, + "CustomTransformationConfiguration":{ + "type":"structure", + "required":[ + "intermediateStorage", + "transformations" + ], + "members":{ + "intermediateStorage":{ + "shape":"IntermediateStorage", + "documentation":"

An S3 bucket path for input and output objects.

" + }, + "transformations":{ + "shape":"Transformations", + "documentation":"

A Lambda function that processes documents.

" + } + }, + "documentation":"

Settings for customizing steps in the data source content ingestion pipeline.

You can configure the data source to process documents with a Lambda function after they are parsed and converted into chunks. When you add a post-chunking transformation, the service stores chunked documents in an S3 bucket and invokes a Lambda function to process them.

To process chunked documents with a Lambda function, define an S3 bucket path for input and output objects, and a transformation that specifies the Lambda function to invoke. You can use the Lambda function to customize how chunks are split, and the metadata for each chunk.

" + }, + "DataDeletionPolicy":{ + "type":"string", + "enum":[ + "RETAIN", + "DELETE" + ] + }, + "DataSource":{ "type":"structure", "required":[ + "createdAt", + "dataSourceConfiguration", "dataSourceId", "knowledgeBaseId", - "status" + "name", + "status", + "updatedAt" ], "members":{ + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the data source was created.

" + }, + "dataDeletionPolicy":{ + "shape":"DataDeletionPolicy", + "documentation":"

The data deletion policy for the data source.

" + }, + "dataSourceConfiguration":{ + "shape":"DataSourceConfiguration", + "documentation":"

The connection configuration for the data source.

" + }, "dataSourceId":{ "shape":"Id", - "documentation":"

The unique identifier of the data source that was deleted.

" + "documentation":"

The unique identifier of the data source.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the data source.

" + }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

The detailed reasons on the failure to delete a data source.

" }, "knowledgeBaseId":{ "shape":"Id", - "documentation":"

The unique identifier of the knowledge base to which the data source that was deleted belonged.

" + "documentation":"

The unique identifier of the knowledge base to which the data source belongs.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the data source.

" + }, + "serverSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

Contains details about the configuration of the server-side encryption.

" }, "status":{ "shape":"DataSourceStatus", - "documentation":"

The status of the data source.

" + "documentation":"

The status of the data source. The following statuses are possible:

  • Available – The data source has been created and is ready for ingestion into the knowledge base.

  • Deleting – The data source is being deleted.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the data source was last updated.

" + }, + "vectorIngestionConfiguration":{ + "shape":"VectorIngestionConfiguration", + "documentation":"

Contains details about how to ingest the documents in the data source.

" } - } + }, + "documentation":"

Contains details about a data source.

" }, - "DeleteKnowledgeBaseRequest":{ + "DataSourceConfiguration":{ "type":"structure", - "required":["knowledgeBaseId"], + "required":["type"], "members":{ - "knowledgeBaseId":{ - "shape":"Id", - "documentation":"

The unique identifier of the knowledge base to delete.

", - "location":"uri", - "locationName":"knowledgeBaseId" + "confluenceConfiguration":{ + "shape":"ConfluenceDataSourceConfiguration", + "documentation":"

The configuration information to connect to Confluence as your data source.

Confluence data source connector is in preview release and is subject to change.

" + }, + "s3Configuration":{ + "shape":"S3DataSourceConfiguration", + "documentation":"

The configuration information to connect to Amazon S3 as your data source.

" + }, + "salesforceConfiguration":{ + "shape":"SalesforceDataSourceConfiguration", + "documentation":"

The configuration information to connect to Salesforce as your data source.

Salesforce data source connector is in preview release and is subject to change.

" + }, + "sharePointConfiguration":{ + "shape":"SharePointDataSourceConfiguration", + "documentation":"

The configuration information to connect to SharePoint as your data source.

SharePoint data source connector is in preview release and is subject to change.

" + }, + "type":{ + "shape":"DataSourceType", + "documentation":"

The type of data source.

" + }, + "webConfiguration":{ + "shape":"WebDataSourceConfiguration", + "documentation":"

The configuration of web URLs to crawl for your data source. You should be authorized to crawl the URLs.

Crawling web URLs as your data source is in preview release and is subject to change.

" } - } + }, + "documentation":"

The connection configuration for the data source.

" }, - "DeleteKnowledgeBaseResponse":{ + "DataSourceStatus":{ + "type":"string", + "enum":[ + "AVAILABLE", + "DELETING", + "DELETE_UNSUCCESSFUL" + ] + }, + "DataSourceSummaries":{ + "type":"list", + "member":{"shape":"DataSourceSummary"} + }, + "DataSourceSummary":{ "type":"structure", "required":[ + "dataSourceId", "knowledgeBaseId", - "status" + "name", + "status", + "updatedAt" ], "members":{ + "dataSourceId":{ + "shape":"Id", + "documentation":"

The unique identifier of the data source.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the data source.

" + }, "knowledgeBaseId":{ "shape":"Id", - "documentation":"

The unique identifier of the knowledge base that was deleted.

" + "documentation":"

The unique identifier of the knowledge base to which the data source belongs.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the data source.

" }, "status":{ - "shape":"KnowledgeBaseStatus", - "documentation":"

The status of the knowledge base and whether it has been successfully deleted.

" + "shape":"DataSourceStatus", + "documentation":"

The status of the data source.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the data source was last updated.

" } - } + }, + "documentation":"

Contains details about a data source.

" }, - "Description":{ + "DataSourceType":{ "type":"string", - "max":200, - "min":1 + "enum":[ + "S3", + "WEB", + "CONFLUENCE", + "SALESFORCE", + "SHAREPOINT" + ] }, - "DisassociateAgentKnowledgeBaseRequest":{ + "DateTimestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "DeleteAgentActionGroupRequest":{ "type":"structure", "required":[ + "actionGroupId", "agentId", - "agentVersion", - "knowledgeBaseId" + "agentVersion" ], "members":{ + "actionGroupId":{ + "shape":"Id", + "documentation":"

The unique identifier of the action group to delete.

", + "location":"uri", + "locationName":"actionGroupId" + }, "agentId":{ "shape":"Id", - "documentation":"

The unique identifier of the agent from which to disassociate the knowledge base.

", + "documentation":"

The unique identifier of the agent that the action group belongs to.

", "location":"uri", "locationName":"agentId" }, "agentVersion":{ "shape":"DraftVersion", - "documentation":"

The version of the agent from which to disassociate the knowledge base.

", + "documentation":"

The version of the agent that the action group belongs to.

", "location":"uri", "locationName":"agentVersion" }, - "knowledgeBaseId":{ - "shape":"Id", - "documentation":"

The unique identifier of the knowledge base to disassociate.

", - "location":"uri", - "locationName":"knowledgeBaseId" + "skipResourceInUseCheck":{ + "shape":"Boolean", + "documentation":"

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", + "location":"querystring", + "locationName":"skipResourceInUseCheck" } } }, - "DisassociateAgentKnowledgeBaseResponse":{ + "DeleteAgentActionGroupResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteAgentAliasRequest":{ + "type":"structure", + "required":[ + "agentAliasId", + "agentId" + ], + "members":{ + "agentAliasId":{ + "shape":"AgentAliasId", + "documentation":"

The unique identifier of the alias to delete.

", + "location":"uri", + "locationName":"agentAliasId" + }, + "agentId":{ + "shape":"Id", + "documentation":"

The unique identifier of the agent that the alias belongs to.

", + "location":"uri", + "locationName":"agentId" + } + } + }, + "DeleteAgentAliasResponse":{ + "type":"structure", + "required":[ + "agentAliasId", + "agentAliasStatus", + "agentId" + ], + "members":{ + "agentAliasId":{ + "shape":"AgentAliasId", + "documentation":"

The unique identifier of the alias that was deleted.

" + }, + "agentAliasStatus":{ + "shape":"AgentAliasStatus", + "documentation":"

The status of the alias.

" + }, + "agentId":{ + "shape":"Id", + "documentation":"

The unique identifier of the agent that the alias belongs to.

" + } + } + }, + "DeleteAgentRequest":{ + "type":"structure", + "required":["agentId"], + "members":{ + "agentId":{ + "shape":"Id", + "documentation":"

The unique identifier of the agent to delete.

", + "location":"uri", + "locationName":"agentId" + }, + "skipResourceInUseCheck":{ + "shape":"Boolean", + "documentation":"

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", + "location":"querystring", + "locationName":"skipResourceInUseCheck" + } + } + }, + "DeleteAgentResponse":{ + "type":"structure", + "required":[ + "agentId", + "agentStatus" + ], + "members":{ + "agentId":{ + "shape":"Id", + "documentation":"

The unique identifier of the agent that was deleted.

" + }, + "agentStatus":{ + "shape":"AgentStatus", + "documentation":"

The status of the agent.

" + } + } + }, + "DeleteAgentVersionRequest":{ + "type":"structure", + "required":[ + "agentId", + "agentVersion" + ], + "members":{ + "agentId":{ + "shape":"Id", + "documentation":"

The unique identifier of the agent that the version belongs to.

", + "location":"uri", + "locationName":"agentId" + }, + "agentVersion":{ + "shape":"NumericalVersion", + "documentation":"

The version of the agent to delete.

", + "location":"uri", + "locationName":"agentVersion" + }, + "skipResourceInUseCheck":{ + "shape":"Boolean", + "documentation":"

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", + "location":"querystring", + "locationName":"skipResourceInUseCheck" + } + } + }, + "DeleteAgentVersionResponse":{ + "type":"structure", + "required":[ + "agentId", + "agentStatus", + "agentVersion" + ], + "members":{ + "agentId":{ + "shape":"Id", + "documentation":"

The unique identifier of the agent that the version belongs to.

" + }, + "agentStatus":{ + "shape":"AgentStatus", + "documentation":"

The status of the agent version.

" + }, + "agentVersion":{ + "shape":"NumericalVersion", + "documentation":"

The version that was deleted.

" + } + } + }, + "DeleteDataSourceRequest":{ + "type":"structure", + "required":[ + "dataSourceId", + "knowledgeBaseId" + ], + "members":{ + "dataSourceId":{ + "shape":"Id", + "documentation":"

The unique identifier of the data source to delete.

", + "location":"uri", + "locationName":"dataSourceId" + }, + "knowledgeBaseId":{ + "shape":"Id", + "documentation":"

The unique identifier of the knowledge base from which to delete the data source.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "DeleteDataSourceResponse":{ + "type":"structure", + "required":[ + "dataSourceId", + "knowledgeBaseId", + "status" + ], + "members":{ + "dataSourceId":{ + "shape":"Id", + "documentation":"

The unique identifier of the data source that was deleted.

" + }, + "knowledgeBaseId":{ + "shape":"Id", + "documentation":"

The unique identifier of the knowledge base to which the data source that was deleted belonged.

" + }, + "status":{ + "shape":"DataSourceStatus", + "documentation":"

The status of the data source.

" + } + } + }, + "DeleteFlowAliasRequest":{ + "type":"structure", + "required":[ + "aliasIdentifier", + "flowIdentifier" + ], + "members":{ + "aliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

The unique identifier of the alias to be deleted.

", + "location":"uri", + "locationName":"aliasIdentifier" + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow that the alias belongs to.

", + "location":"uri", + "locationName":"flowIdentifier" + } + } + }, + "DeleteFlowAliasResponse":{ + "type":"structure", + "required":[ + "flowId", + "id" + ], + "members":{ + "flowId":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow that the alias belongs to.

" + }, + "id":{ + "shape":"FlowAliasId", + "documentation":"

The unique identifier of the flow.

" + } + } + }, + "DeleteFlowRequest":{ + "type":"structure", + "required":["flowIdentifier"], + "members":{ + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow.

", + "location":"uri", + "locationName":"flowIdentifier" + }, + "skipResourceInUseCheck":{ + "shape":"Boolean", + "documentation":"

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", + "location":"querystring", + "locationName":"skipResourceInUseCheck" + } + } + }, + "DeleteFlowResponse":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow.

" + } + } + }, + "DeleteFlowVersionRequest":{ + "type":"structure", + "required":[ + "flowIdentifier", + "flowVersion" + ], + "members":{ + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow whose version that you want to delete

", + "location":"uri", + "locationName":"flowIdentifier" + }, + "flowVersion":{ + "shape":"NumericalVersion", + "documentation":"

The version of the flow that you want to delete.

", + "location":"uri", + "locationName":"flowVersion" + }, + "skipResourceInUseCheck":{ + "shape":"Boolean", + "documentation":"

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", + "location":"querystring", + "locationName":"skipResourceInUseCheck" + } + } + }, + "DeleteFlowVersionResponse":{ + "type":"structure", + "required":[ + "id", + "version" + ], + "members":{ + "id":{ + "shape":"Id", + "documentation":"

The unique identifier of the flow.

" + }, + "version":{ + "shape":"NumericalVersion", + "documentation":"

The version of the flow being deleted.

" + } + } + }, + "DeleteKnowledgeBaseRequest":{ + "type":"structure", + "required":["knowledgeBaseId"], + "members":{ + "knowledgeBaseId":{ + "shape":"Id", + "documentation":"

The unique identifier of the knowledge base to delete.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "DeleteKnowledgeBaseResponse":{ + "type":"structure", + "required":[ + "knowledgeBaseId", + "status" + ], + "members":{ + "knowledgeBaseId":{ + "shape":"Id", + "documentation":"

The unique identifier of the knowledge base that was deleted.

" + }, + "status":{ + "shape":"KnowledgeBaseStatus", + "documentation":"

The status of the knowledge base and whether it has been successfully deleted.

" + } + } + }, + "DeletePromptRequest":{ + "type":"structure", + "required":["promptIdentifier"], + "members":{ + "promptIdentifier":{ + "shape":"PromptIdentifier", + "documentation":"

The unique identifier of the prompt.

", + "location":"uri", + "locationName":"promptIdentifier" + }, + "promptVersion":{ + "shape":"NumericalVersion", + "documentation":"

The version of the prompt to delete.

", + "location":"querystring", + "locationName":"promptVersion" + } + } + }, + "DeletePromptResponse":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"PromptId", + "documentation":"

The unique identifier of the prompt that was deleted.

" + }, + "version":{ + "shape":"NumericalVersion", + "documentation":"

The version of the prompt that was deleted.

" + } + } + }, + "Description":{ + "type":"string", + "max":200, + "min":1 + }, + "Dimensions":{ + "type":"integer", + "box":true, + "max":4096, + "min":0 + }, + "DisassociateAgentKnowledgeBaseRequest":{ + "type":"structure", + "required":[ + "agentId", + "agentVersion", + "knowledgeBaseId" + ], + "members":{ + "agentId":{ + "shape":"Id", + "documentation":"

The unique identifier of the agent from which to disassociate the knowledge base.

", + "location":"uri", + "locationName":"agentId" + }, + "agentVersion":{ + "shape":"DraftVersion", + "documentation":"

The version of the agent from which to disassociate the knowledge base.

", + "location":"uri", + "locationName":"agentVersion" + }, + "knowledgeBaseId":{ + "shape":"Id", + "documentation":"

The unique identifier of the knowledge base to disassociate.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "DisassociateAgentKnowledgeBaseResponse":{ "type":"structure", "members":{ } @@ -2291,54 +3497,659 @@ "DraftVersion":{ "type":"string", "max":5, - "min":5, - "pattern":"^DRAFT$" + "min":5, + "pattern":"^DRAFT$" + }, + "EmbeddingModelConfiguration":{ + "type":"structure", + "members":{ + "bedrockEmbeddingModelConfiguration":{ + "shape":"BedrockEmbeddingModelConfiguration", + "documentation":"

The vector configuration details on the Bedrock embeddings model.

" + } + }, + "documentation":"

The configuration details for the embeddings model.

" + }, + "EnabledMemoryTypes":{ + "type":"list", + "member":{"shape":"MemoryType"}, + "max":1, + "min":1 + }, + "FailureReason":{ + "type":"string", + "max":2048, + "min":0 + }, + "FailureReasons":{ + "type":"list", + "member":{"shape":"FailureReason"}, + "max":2048, + "min":0 + }, + "FieldName":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^.*$" + }, + "FilterList":{ + "type":"list", + "member":{"shape":"FilterPattern"}, + "max":25, + "min":1, + "sensitive":true + }, + "FilterPattern":{ + "type":"string", + "max":1000, + "min":1, + "sensitive":true + }, + "FilteredObjectType":{ + "type":"string", + "max":50, + "min":1, + "sensitive":true + }, + "FixedSizeChunkingConfiguration":{ + "type":"structure", + "required":[ + "maxTokens", + "overlapPercentage" + ], + "members":{ + "maxTokens":{ + "shape":"FixedSizeChunkingConfigurationMaxTokensInteger", + "documentation":"

The maximum number of tokens to include in a chunk.

" + }, + "overlapPercentage":{ + "shape":"FixedSizeChunkingConfigurationOverlapPercentageInteger", + "documentation":"

The percentage of overlap between adjacent chunks of a data source.

" + } + }, + "documentation":"

Configurations for when you choose fixed-size chunking. If you set the chunkingStrategy as NONE, exclude this field.

" + }, + "FixedSizeChunkingConfigurationMaxTokensInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "FixedSizeChunkingConfigurationOverlapPercentageInteger":{ + "type":"integer", + "box":true, + "max":99, + "min":1 + }, + "FlowAliasArn":{ + "type":"string", + "pattern":"^arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/(TSTALIASID|[0-9a-zA-Z]{10})$" + }, + "FlowAliasId":{ + "type":"string", + "pattern":"^(TSTALIASID|[0-9a-zA-Z]{10})$" + }, + "FlowAliasIdentifier":{ + "type":"string", + "pattern":"^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/[0-9a-zA-Z]{10})|(TSTALIASID|[0-9a-zA-Z]{10})$" + }, + "FlowAliasRoutingConfiguration":{ + "type":"list", + "member":{"shape":"FlowAliasRoutingConfigurationListItem"}, + "max":1, + "min":1 + }, + "FlowAliasRoutingConfigurationListItem":{ + "type":"structure", + "members":{ + "flowVersion":{ + "shape":"Version", + "documentation":"

The version that the alias maps to.

" + } + }, + "documentation":"

Contains information about a version that the alias maps to.

" + }, + "FlowAliasSummaries":{ + "type":"list", + "member":{"shape":"FlowAliasSummary"}, + "max":10, + "min":0 + }, + "FlowAliasSummary":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "flowId", + "id", + "name", + "routingConfiguration", + "updatedAt" + ], + "members":{ + "arn":{ + "shape":"FlowAliasArn", + "documentation":"

The Amazon Resource Name (ARN) of the flow alias.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the alias was created.

" + }, + "description":{ + "shape":"Description", + "documentation":"

A description of the alias.

" + }, + "flowId":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow.

" + }, + "id":{ + "shape":"FlowAliasId", + "documentation":"

The unique identifier of the alias of the flow.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the alias.

" + }, + "routingConfiguration":{ + "shape":"FlowAliasRoutingConfiguration", + "documentation":"

A list of configurations about the versions that the alias maps to. Currently, you can only specify one.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the alias was last updated.

" + } + }, + "documentation":"

Contains information about an alias of a flow.

This data type is used in the following API operations:

" + }, + "FlowArn":{ + "type":"string", + "pattern":"^arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}$" + }, + "FlowCondition":{ + "type":"structure", + "required":["name"], + "members":{ + "expression":{ + "shape":"FlowConditionExpression", + "documentation":"

Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in Node types in prompt flows.

" + }, + "name":{ + "shape":"FlowConditionName", + "documentation":"

A name for the condition that you can reference.

" + } + }, + "documentation":"

Defines a condition in the condition node.

", + "sensitive":true + }, + "FlowConditionExpression":{ + "type":"string", + "max":64, + "min":1, + "sensitive":true + }, + "FlowConditionName":{ + "type":"string", + "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$" + }, + "FlowConditionalConnectionConfiguration":{ + "type":"structure", + "required":["condition"], + "members":{ + "condition":{ + "shape":"FlowConditionName", + "documentation":"

The condition that triggers this connection. For more information about how to write conditions, see the Condition node type in the Node types topic in the Amazon Bedrock User Guide.

" + } + }, + "documentation":"

The configuration of a connection between a condition node and another node.

" + }, + "FlowConditions":{ + "type":"list", + "member":{"shape":"FlowCondition"}, + "max":5, + "min":1, + "sensitive":true }, - "FailureReason":{ + "FlowConnection":{ + "type":"structure", + "required":[ + "name", + "source", + "target", + "type" + ], + "members":{ + "configuration":{ + "shape":"FlowConnectionConfiguration", + "documentation":"

The configuration of the connection.

" + }, + "name":{ + "shape":"FlowConnectionName", + "documentation":"

A name for the connection that you can reference.

" + }, + "source":{ + "shape":"FlowNodeName", + "documentation":"

The node that the connection starts at.

" + }, + "target":{ + "shape":"FlowNodeName", + "documentation":"

The node that the connection ends at.

" + }, + "type":{ + "shape":"FlowConnectionType", + "documentation":"

Whether the source node that the connection begins from is a condition node (Conditional) or not (Data).

" + } + }, + "documentation":"

Contains information about a connection between two nodes in the flow.

" + }, + "FlowConnectionConfiguration":{ + "type":"structure", + "members":{ + "conditional":{ + "shape":"FlowConditionalConnectionConfiguration", + "documentation":"

The configuration of a connection originating from a Condition node.

" + }, + "data":{ + "shape":"FlowDataConnectionConfiguration", + "documentation":"

The configuration of a connection originating from a node that isn't a Condition node.

" + } + }, + "documentation":"

The configuration of the connection.

", + "union":true + }, + "FlowConnectionName":{ + "type":"string", + "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){1,100}$" + }, + "FlowConnectionType":{ + "type":"string", + "enum":[ + "Data", + "Conditional" + ] + }, + "FlowConnections":{ + "type":"list", + "member":{"shape":"FlowConnection"}, + "max":20, + "min":0 + }, + "FlowDataConnectionConfiguration":{ + "type":"structure", + "required":[ + "sourceOutput", + "targetInput" + ], + "members":{ + "sourceOutput":{ + "shape":"FlowNodeOutputName", + "documentation":"

The name of the output in the source node that the connection begins from.

" + }, + "targetInput":{ + "shape":"FlowNodeInputName", + "documentation":"

The name of the input in the target node that the connection ends at.

" + } + }, + "documentation":"

The configuration of a connection originating from a node that isn't a Condition node.

" + }, + "FlowDefinition":{ + "type":"structure", + "members":{ + "connections":{ + "shape":"FlowConnections", + "documentation":"

An array of connection definitions in the flow.

" + }, + "nodes":{ + "shape":"FlowNodes", + "documentation":"

An array of node definitions in the flow.

" + } + }, + "documentation":"

The definition of the nodes and connections between nodes in the flow.

" + }, + "FlowDescription":{ + "type":"string", + "max":200, + "min":1 + }, + "FlowExecutionRoleArn":{ "type":"string", "max":2048, + "min":0, + "pattern":"^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/(service-role/)?.+$" + }, + "FlowId":{ + "type":"string", + "pattern":"^[0-9a-zA-Z]{10}$" + }, + "FlowIdentifier":{ + "type":"string", + "pattern":"^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$" + }, + "FlowName":{ + "type":"string", + "pattern":"^([0-9a-zA-Z][_-]?){1,100}$" + }, + "FlowNode":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "configuration":{ + "shape":"FlowNodeConfiguration", + "documentation":"

Contains configurations for the node.

" + }, + "inputs":{ + "shape":"FlowNodeInputs", + "documentation":"

An array of objects, each of which contains information about an input into the node.

" + }, + "name":{ + "shape":"FlowNodeName", + "documentation":"

A name for the node.

" + }, + "outputs":{ + "shape":"FlowNodeOutputs", + "documentation":"

A list of objects, each of which contains information about an output from the node.

" + }, + "type":{ + "shape":"FlowNodeType", + "documentation":"

The type of node. This value must match the name of the key that you provide in the configuration you provide in the FlowNodeConfiguration field.

" + } + }, + "documentation":"

Contains configurations about a node in the flow.

" + }, + "FlowNodeConfiguration":{ + "type":"structure", + "members":{ + "agent":{ + "shape":"AgentFlowNodeConfiguration", + "documentation":"

Contains configurations for an agent node in your flow. Invokes an alias of an agent and returns the response.

" + }, + "collector":{ + "shape":"CollectorFlowNodeConfiguration", + "documentation":"

Contains configurations for a collector node in your flow. Collects an iteration of inputs and consolidates them into an array of outputs.

" + }, + "condition":{ + "shape":"ConditionFlowNodeConfiguration", + "documentation":"

Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow.

" + }, + "input":{ + "shape":"InputFlowNodeConfiguration", + "documentation":"

Contains configurations for an input flow node in your flow. The first node in the flow. inputs can't be specified for this node.

" + }, + "iterator":{ + "shape":"IteratorFlowNodeConfiguration", + "documentation":"

Contains configurations for an iterator node in your flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output.

The output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node.

" + }, + "knowledgeBase":{ + "shape":"KnowledgeBaseFlowNodeConfiguration", + "documentation":"

Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response.

" + }, + "lambdaFunction":{ + "shape":"LambdaFunctionFlowNodeConfiguration", + "documentation":"

Contains configurations for a Lambda function node in your flow. Invokes an Lambda function.

" + }, + "lex":{ + "shape":"LexFlowNodeConfiguration", + "documentation":"

Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output.

" + }, + "output":{ + "shape":"OutputFlowNodeConfiguration", + "documentation":"

Contains configurations for an output flow node in your flow. The last node in the flow. outputs can't be specified for this node.

" + }, + "prompt":{ + "shape":"PromptFlowNodeConfiguration", + "documentation":"

Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node.

" + }, + "retrieval":{ + "shape":"RetrievalFlowNodeConfiguration", + "documentation":"

Contains configurations for a Retrieval node in your flow. Retrieves data from an Amazon S3 location and returns it as the output.

" + }, + "storage":{ + "shape":"StorageFlowNodeConfiguration", + "documentation":"

Contains configurations for a Storage node in your flow. Stores an input in an Amazon S3 location.

" + } + }, + "documentation":"

Contains configurations for a node in your flow. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

", + "union":true + }, + "FlowNodeIODataType":{ + "type":"string", + "enum":[ + "String", + "Number", + "Boolean", + "Object", + "Array" + ] + }, + "FlowNodeInput":{ + "type":"structure", + "required":[ + "expression", + "name", + "type" + ], + "members":{ + "expression":{ + "shape":"FlowNodeInputExpression", + "documentation":"

An expression that formats the input for the node. For an explanation of how to create expressions, see Expressions in Prompt flows in Amazon Bedrock.

" + }, + "name":{ + "shape":"FlowNodeInputName", + "documentation":"

A name for the input that you can reference.

" + }, + "type":{ + "shape":"FlowNodeIODataType", + "documentation":"

The data type of the input. If the input doesn't match this type at runtime, a validation error will be thrown.

" + } + }, + "documentation":"

Contains configurations for an input to a node.

" + }, + "FlowNodeInputExpression":{ + "type":"string", + "max":64, + "min":1, + "sensitive":true + }, + "FlowNodeInputName":{ + "type":"string", + "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$" + }, + "FlowNodeInputs":{ + "type":"list", + "member":{"shape":"FlowNodeInput"}, + "max":5, "min":0 }, - "FailureReasons":{ + "FlowNodeName":{ + "type":"string", + "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$" + }, + "FlowNodeOutput":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "name":{ + "shape":"FlowNodeOutputName", + "documentation":"

A name for the output that you can reference.

" + }, + "type":{ + "shape":"FlowNodeIODataType", + "documentation":"

The data type of the output. If the output doesn't match this type at runtime, a validation error will be thrown.

" + } + }, + "documentation":"

Contains configurations for an output from a node.

" + }, + "FlowNodeOutputName":{ + "type":"string", + "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$" + }, + "FlowNodeOutputs":{ "type":"list", - "member":{"shape":"FailureReason"}, - "max":2048, + "member":{"shape":"FlowNodeOutput"}, + "max":5, "min":0 }, - "FieldName":{ + "FlowNodeType":{ "type":"string", - "max":2048, + "enum":[ + "Input", + "Output", + "KnowledgeBase", + "Condition", + "Lex", + "Prompt", + "LambdaFunction", + "Storage", + "Agent", + "Retrieval", + "Iterator", + "Collector" + ] + }, + "FlowNodes":{ + "type":"list", + "member":{"shape":"FlowNode"}, + "max":20, "min":0, - "pattern":"^.*$" + "sensitive":true + }, + "FlowStatus":{ + "type":"string", + "enum":[ + "Failed", + "Prepared", + "Preparing", + "NotPrepared" + ] + }, + "FlowSummaries":{ + "type":"list", + "member":{"shape":"FlowSummary"}, + "max":10, + "min":0 + }, + "FlowSummary":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "id", + "name", + "status", + "updatedAt", + "version" + ], + "members":{ + "arn":{ + "shape":"FlowArn", + "documentation":"

The Amazon Resource Name (ARN) of the flow.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow was created.

" + }, + "description":{ + "shape":"FlowDescription", + "documentation":"

A description of the flow.

" + }, + "id":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow.

" + }, + "name":{ + "shape":"FlowName", + "documentation":"

The name of the flow.

" + }, + "status":{ + "shape":"FlowStatus", + "documentation":"

The status of the flow. The following statuses are possible:

  • NotPrepared – The flow has been created or updated, but hasn't been prepared. If you just created the flow, you can't test it. If you updated the flow, the DRAFT version won't contain the latest changes for testing. Send a PrepareFlow request to package the latest changes into the DRAFT version.

  • Preparing – The flow is being prepared so that the DRAFT version contains the latest changes for testing.

  • Prepared – The flow is prepared and the DRAFT version contains the latest changes for testing.

  • Failed – The last API operation that you invoked on the flow failed. Send a GetFlow request and check the error message in the validations field.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow was last updated.

" + }, + "version":{ + "shape":"DraftVersion", + "documentation":"

The latest version of the flow.

" + } + }, + "documentation":"

Contains the definition of a flow.

" + }, + "FlowValidation":{ + "type":"structure", + "required":[ + "message", + "severity" + ], + "members":{ + "message":{ + "shape":"NonBlankString", + "documentation":"

A message describing the validation error.

" + }, + "severity":{ + "shape":"FlowValidationSeverity", + "documentation":"

The severity of the issue described in the message.

" + } + }, + "documentation":"

Contains information about validation of the flow.

This data type is used in the following API operations:

" + }, + "FlowValidationSeverity":{ + "type":"string", + "enum":[ + "Warning", + "Error" + ] + }, + "FlowValidations":{ + "type":"list", + "member":{"shape":"FlowValidation"}, + "max":100, + "min":0 }, - "FixedSizeChunkingConfiguration":{ + "FlowVersionSummaries":{ + "type":"list", + "member":{"shape":"FlowVersionSummary"}, + "max":10, + "min":0 + }, + "FlowVersionSummary":{ "type":"structure", "required":[ - "maxTokens", - "overlapPercentage" + "arn", + "createdAt", + "id", + "status", + "version" ], "members":{ - "maxTokens":{ - "shape":"FixedSizeChunkingConfigurationMaxTokensInteger", - "documentation":"

The maximum number of tokens to include in a chunk.

" + "arn":{ + "shape":"FlowArn", + "documentation":"

The Amazon Resource Name (ARN) of the flow that the version belongs to.

" }, - "overlapPercentage":{ - "shape":"FixedSizeChunkingConfigurationOverlapPercentageInteger", - "documentation":"

The percentage of overlap between adjacent chunks of a data source.

" + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at the flow version was created.

" + }, + "id":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow.

" + }, + "status":{ + "shape":"FlowStatus", + "documentation":"

The status of the flow.

" + }, + "version":{ + "shape":"NumericalVersion", + "documentation":"

The version of the flow.

" } }, - "documentation":"

Configurations for when you choose fixed-size chunking. If you set the chunkingStrategy as NONE, exclude this field.

" - }, - "FixedSizeChunkingConfigurationMaxTokensInteger":{ - "type":"integer", - "box":true, - "min":1 - }, - "FixedSizeChunkingConfigurationOverlapPercentageInteger":{ - "type":"integer", - "box":true, - "max":99, - "min":1 + "documentation":"

Contains information about the flow version.

This data type is used in the following API operations:

" }, "Function":{ "type":"structure", @@ -2570,6 +4381,223 @@ } } }, + "GetFlowAliasRequest":{ + "type":"structure", + "required":[ + "aliasIdentifier", + "flowIdentifier" + ], + "members":{ + "aliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

The unique identifier of the alias for which to retrieve information.

", + "location":"uri", + "locationName":"aliasIdentifier" + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow that the alias belongs to.

", + "location":"uri", + "locationName":"flowIdentifier" + } + } + }, + "GetFlowAliasResponse":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "flowId", + "id", + "name", + "routingConfiguration", + "updatedAt" + ], + "members":{ + "arn":{ + "shape":"FlowAliasArn", + "documentation":"

The Amazon Resource Name (ARN) of the flow.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow was created.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the flow.

" + }, + "flowId":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow that the alias belongs to.

" + }, + "id":{ + "shape":"FlowAliasId", + "documentation":"

The unique identifier of the alias of the flow.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the flow alias.

" + }, + "routingConfiguration":{ + "shape":"FlowAliasRoutingConfiguration", + "documentation":"

Contains information about the version that the alias is mapped to.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow alias was last updated.

" + } + } + }, + "GetFlowRequest":{ + "type":"structure", + "required":["flowIdentifier"], + "members":{ + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow.

", + "location":"uri", + "locationName":"flowIdentifier" + } + } + }, + "GetFlowResponse":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "executionRoleArn", + "id", + "name", + "status", + "updatedAt", + "version" + ], + "members":{ + "arn":{ + "shape":"FlowArn", + "documentation":"

The Amazon Resource Name (ARN) of the flow.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow was created.

" + }, + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key that the flow is encrypted with.

" + }, + "definition":{ + "shape":"FlowDefinition", + "documentation":"

The definition of the nodes and connections between the nodes in the flow.

" + }, + "description":{ + "shape":"FlowDescription", + "documentation":"

The description of the flow.

" + }, + "executionRoleArn":{ + "shape":"FlowExecutionRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service row for flows in the Amazon Bedrock User Guide.

" + }, + "id":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow.

" + }, + "name":{ + "shape":"FlowName", + "documentation":"

The name of the flow.

" + }, + "status":{ + "shape":"FlowStatus", + "documentation":"

The status of the flow. The following statuses are possible:

  • NotPrepared – The flow has been created or updated, but hasn't been prepared. If you just created the flow, you can't test it. If you updated the flow, the DRAFT version won't contain the latest changes for testing. Send a PrepareFlow request to package the latest changes into the DRAFT version.

  • Preparing – The flow is being prepared so that the DRAFT version contains the latest changes for testing.

  • Prepared – The flow is prepared and the DRAFT version contains the latest changes for testing.

  • Failed – The last API operation that you invoked on the flow failed. Send a GetFlow request and check the error message in the validations field.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow was last updated.

" + }, + "validations":{ + "shape":"FlowValidations", + "documentation":"

A list of validation error messages related to the last failed operation on the flow.

" + }, + "version":{ + "shape":"DraftVersion", + "documentation":"

The version of the flow for which information was retrieved.

" + } + } + }, + "GetFlowVersionRequest":{ + "type":"structure", + "required":[ + "flowIdentifier", + "flowVersion" + ], + "members":{ + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow for which to get information.

", + "location":"uri", + "locationName":"flowIdentifier" + }, + "flowVersion":{ + "shape":"NumericalVersion", + "documentation":"

The version of the flow for which to get information.

", + "location":"uri", + "locationName":"flowVersion" + } + } + }, + "GetFlowVersionResponse":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "executionRoleArn", + "id", + "name", + "status", + "version" + ], + "members":{ + "arn":{ + "shape":"FlowArn", + "documentation":"

The Amazon Resource Name (ARN) of the flow.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow was created.

" + }, + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key that the version of the flow is encrypted with.

" + }, + "definition":{ + "shape":"FlowDefinition", + "documentation":"

The definition of the nodes and connections between nodes in the flow.

" + }, + "description":{ + "shape":"FlowDescription", + "documentation":"

The description of the flow.

" + }, + "executionRoleArn":{ + "shape":"FlowExecutionRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, + "id":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow.

" + }, + "name":{ + "shape":"FlowName", + "documentation":"

The name of the flow version.

" + }, + "status":{ + "shape":"FlowStatus", + "documentation":"

The status of the flow.

" + }, + "version":{ + "shape":"NumericalVersion", + "documentation":"

The version of the flow for which information was retrieved.

" + } + } + }, "GetIngestionJobRequest":{ "type":"structure", "required":[ @@ -2630,6 +4658,151 @@ } } }, + "GetPromptRequest":{ + "type":"structure", + "required":["promptIdentifier"], + "members":{ + "promptIdentifier":{ + "shape":"PromptIdentifier", + "documentation":"

The unique identifier of the prompt.

", + "location":"uri", + "locationName":"promptIdentifier" + }, + "promptVersion":{ + "shape":"Version", + "documentation":"

The version of the prompt about which you want to retrieve information.

", + "location":"querystring", + "locationName":"promptVersion" + } + } + }, + "GetPromptResponse":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "id", + "name", + "updatedAt", + "version" + ], + "members":{ + "arn":{ + "shape":"PromptArn", + "documentation":"

The Amazon Resource Name (ARN) of the prompt.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the prompt was created.

" + }, + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key that the prompt is encrypted with.

" + }, + "defaultVariant":{ + "shape":"PromptVariantName", + "documentation":"

The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object.

" + }, + "description":{ + "shape":"PromptDescription", + "documentation":"

The descriptino of the prompt.

" + }, + "id":{ + "shape":"PromptId", + "documentation":"

The unique identifier of the prompt.

" + }, + "name":{ + "shape":"PromptName", + "documentation":"

The name of the prompt.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the prompt was last updated.

" + }, + "variants":{ + "shape":"PromptVariantList", + "documentation":"

A list of objects, each containing details about a variant of the prompt.

" + }, + "version":{ + "shape":"Version", + "documentation":"

The version of the prompt.

" + } + } + }, + "GuardrailConfiguration":{ + "type":"structure", + "members":{ + "guardrailIdentifier":{ + "shape":"GuardrailIdentifier", + "documentation":"

The unique identifier of the guardrail.

" + }, + "guardrailVersion":{ + "shape":"GuardrailVersion", + "documentation":"

The version of the guardrail.

" + } + }, + "documentation":"

Details about the guardrail associated with an agent.

" + }, + "GuardrailIdentifier":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$" + }, + "GuardrailVersion":{ + "type":"string", + "pattern":"^(([0-9]{1,8})|(DRAFT))$" + }, + "HierarchicalChunkingConfiguration":{ + "type":"structure", + "required":[ + "levelConfigurations", + "overlapTokens" + ], + "members":{ + "levelConfigurations":{ + "shape":"HierarchicalChunkingLevelConfigurations", + "documentation":"

Token settings for each layer.

" + }, + "overlapTokens":{ + "shape":"HierarchicalChunkingConfigurationOverlapTokensInteger", + "documentation":"

The number of tokens to repeat across chunks in the same layer.

" + } + }, + "documentation":"

Settings for hierarchical document chunking for a data source. Hierarchical chunking splits documents into layers of chunks where the first layer contains large chunks, and the second layer contains smaller chunks derived from the first layer.

You configure the number of tokens to overlap, or repeat across adjacent chunks. For example, if you set overlap tokens to 60, the last 60 tokens in the first chunk are also included at the beginning of the second chunk. For each layer, you must also configure the maximum number of tokens in a chunk.

" + }, + "HierarchicalChunkingConfigurationOverlapTokensInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "HierarchicalChunkingLevelConfiguration":{ + "type":"structure", + "required":["maxTokens"], + "members":{ + "maxTokens":{ + "shape":"HierarchicalChunkingLevelConfigurationMaxTokensInteger", + "documentation":"

The maximum number of tokens that a chunk can contain in this layer.

" + } + }, + "documentation":"

Token settings for a layer in a hierarchical chunking configuration.

" + }, + "HierarchicalChunkingLevelConfigurationMaxTokensInteger":{ + "type":"integer", + "box":true, + "max":8192, + "min":1 + }, + "HierarchicalChunkingLevelConfigurations":{ + "type":"list", + "member":{"shape":"HierarchicalChunkingLevelConfiguration"}, + "max":2, + "min":2 + }, + "HttpsUrl":{ + "type":"string", + "pattern":"^https://[A-Za-z0-9][^\\s]*$" + }, "Id":{ "type":"string", "pattern":"^[0-9a-zA-Z]{10}$" @@ -2877,12 +5050,29 @@ }, "documentation":"

Contains details about an ingestion job.

" }, + "InputFlowNodeConfiguration":{ + "type":"structure", + "members":{ + }, + "documentation":"

Contains configurations for the input flow node for a flow. This node takes the input from flow invocation and passes it to the next node in the data type that you specify.

" + }, "Instruction":{ "type":"string", - "max":1200, + "max":4000, "min":40, "sensitive":true }, + "IntermediateStorage":{ + "type":"structure", + "required":["s3Location"], + "members":{ + "s3Location":{ + "shape":"S3Location", + "documentation":"

An S3 bucket path.

" + } + }, + "documentation":"

A location for storing content from data sources temporarily as it is processed by custom components in the ingestion pipeline.

" + }, "InternalServerException":{ "type":"structure", "members":{ @@ -2893,6 +5083,12 @@ "exception":true, "fault":true }, + "IteratorFlowNodeConfiguration":{ + "type":"structure", + "members":{ + }, + "documentation":"

Contains configurations for an iterator node in a flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output.

The output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node.

" + }, "KmsKeyArn":{ "type":"string", "max":2048, @@ -2974,12 +5170,33 @@ "shape":"KnowledgeBaseType", "documentation":"

The type of data that the data source is converted into for the knowledge base.

" }, - "vectorKnowledgeBaseConfiguration":{ - "shape":"VectorKnowledgeBaseConfiguration", - "documentation":"

Contains details about the embeddings model that'sused to convert the data source.

" + "vectorKnowledgeBaseConfiguration":{ + "shape":"VectorKnowledgeBaseConfiguration", + "documentation":"

Contains details about the embeddings model that'sused to convert the data source.

" + } + }, + "documentation":"

Contains details about the embeddings configuration of the knowledge base.

" + }, + "KnowledgeBaseFlowNodeConfiguration":{ + "type":"structure", + "required":["knowledgeBaseId"], + "members":{ + "knowledgeBaseId":{ + "shape":"KnowledgeBaseId", + "documentation":"

The unique identifier of the knowledge base to query.

" + }, + "modelId":{ + "shape":"ModelIdentifier", + "documentation":"

The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array.

" } }, - "documentation":"

Contains details about the embeddings configuration of the knowledge base.

" + "documentation":"

Contains configurations for a knowledge base node in a flow. This node takes a query as the input and returns, as the output, the retrieved responses directly (as an array) or a response generated based on the retrieved responses. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + }, + "KnowledgeBaseId":{ + "type":"string", + "max":10, + "min":0, + "pattern":"^[0-9a-zA-Z]+$" }, "KnowledgeBaseRoleArn":{ "type":"string", @@ -3061,6 +5278,46 @@ "min":0, "pattern":"^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$" }, + "LambdaFunctionFlowNodeConfiguration":{ + "type":"structure", + "required":["lambdaArn"], + "members":{ + "lambdaArn":{ + "shape":"LambdaArn", + "documentation":"

The Amazon Resource Name (ARN) of the Lambda function to invoke.

" + } + }, + "documentation":"

Contains configurations for a Lambda function node in the flow. You specify the Lambda function to invoke and the inputs into the function. The output is the response that is defined in the Lambda function. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + }, + "LexBotAliasArn":{ + "type":"string", + "max":78, + "min":0, + "pattern":"^arn:aws(|-us-gov):lex:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:bot-alias/[0-9a-zA-Z]+/[0-9a-zA-Z]+$" + }, + "LexBotLocaleId":{ + "type":"string", + "max":10, + "min":1 + }, + "LexFlowNodeConfiguration":{ + "type":"structure", + "required":[ + "botAliasArn", + "localeId" + ], + "members":{ + "botAliasArn":{ + "shape":"LexBotAliasArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke.

" + }, + "localeId":{ + "shape":"LexBotLocaleId", + "documentation":"

The Region to invoke the Amazon Lex bot in.

" + } + }, + "documentation":"

Contains configurations for a Lex node in the flow. You specify a Amazon Lex bot to invoke. This node takes an utterance as the input and returns as the output the intent identified by the Amazon Lex bot. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + }, "ListAgentActionGroupsRequest":{ "type":"structure", "required":[ @@ -3276,6 +5533,113 @@ } } }, + "ListFlowAliasesRequest":{ + "type":"structure", + "required":["flowIdentifier"], + "members":{ + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow for which aliases are being returned.

", + "location":"uri", + "locationName":"flowIdentifier" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListFlowAliasesResponse":{ + "type":"structure", + "required":["flowAliasSummaries"], + "members":{ + "flowAliasSummaries":{ + "shape":"FlowAliasSummaries", + "documentation":"

A list, each member of which contains information about a flow alias.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "ListFlowVersionsRequest":{ + "type":"structure", + "required":["flowIdentifier"], + "members":{ + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow.

", + "location":"uri", + "locationName":"flowIdentifier" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListFlowVersionsResponse":{ + "type":"structure", + "required":["flowVersionSummaries"], + "members":{ + "flowVersionSummaries":{ + "shape":"FlowVersionSummaries", + "documentation":"

A list, each member of which contains information about a flow.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "ListFlowsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListFlowsResponse":{ + "type":"structure", + "required":["flowSummaries"], + "members":{ + "flowSummaries":{ + "shape":"FlowSummaries", + "documentation":"

A list, each member of which contains information about a flow.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, "ListIngestionJobsRequest":{ "type":"structure", "required":[ @@ -3354,6 +5718,43 @@ } } }, + "ListPromptsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "promptIdentifier":{ + "shape":"PromptIdentifier", + "documentation":"

The unique identifier of the prompt.

", + "location":"querystring", + "locationName":"promptIdentifier" + } + } + }, + "ListPromptsResponse":{ + "type":"structure", + "required":["promptSummaries"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + }, + "promptSummaries":{ + "shape":"PromptSummaries", + "documentation":"

A list, each member of which contains information about a prompt using Prompt management.

" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -3387,6 +5788,31 @@ "max":4096, "min":0 }, + "MemoryConfiguration":{ + "type":"structure", + "required":["enabledMemoryTypes"], + "members":{ + "enabledMemoryTypes":{ + "shape":"EnabledMemoryTypes", + "documentation":"

The type of memory that is stored.

" + }, + "storageDays":{ + "shape":"StorageDays", + "documentation":"

The number of days the agent is configured to retain the conversational context.

" + } + }, + "documentation":"

Details of the memory configuration.

" + }, + "MemoryType":{ + "type":"string", + "enum":["SESSION_SUMMARY"] + }, + "Microsoft365TenantId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + }, "ModelIdentifier":{ "type":"string", "max":2048, @@ -3564,6 +5990,12 @@ "min":0, "pattern":"^.*$" }, + "OutputFlowNodeConfiguration":{ + "type":"structure", + "members":{ + }, + "documentation":"

Contains configurations for an output flow node in the flow. You specify the data type expected for the input into the node in the type field and how to return the final output in the expression field.

" + }, "ParameterDescription":{ "type":"string", "max":500, @@ -3593,6 +6025,78 @@ "key":{"shape":"Name"}, "value":{"shape":"ParameterDetail"} }, + "ParsingConfiguration":{ + "type":"structure", + "required":["parsingStrategy"], + "members":{ + "bedrockFoundationModelConfiguration":{ + "shape":"BedrockFoundationModelConfiguration", + "documentation":"

Settings for a foundation model used to parse documents for a data source.

" + }, + "parsingStrategy":{ + "shape":"ParsingStrategy", + "documentation":"

The parsing strategy for the data source.

" + } + }, + "documentation":"

Settings for parsing document contents. By default, the service converts the contents of each document into text before splitting it into chunks. To improve processing of PDF files with tables and images, you can configure the data source to convert the pages of text into images and use a model to describe the contents of each page.

To use a model to parse PDF documents, set the parsing strategy to BEDROCK_FOUNDATION_MODEL and specify the model to use by ARN. You can also override the default parsing prompt with instructions for how to interpret images and tables in your documents. The following models are supported.

  • Anthropic Claude 3 Sonnet - anthropic.claude-3-sonnet-20240229-v1:0

  • Anthropic Claude 3 Haiku - anthropic.claude-3-haiku-20240307-v1:0

You can get the ARN of a model with the action. Standard model usage charges apply for the foundation model parsing strategy.

" + }, + "ParsingPrompt":{ + "type":"structure", + "required":["parsingPromptText"], + "members":{ + "parsingPromptText":{ + "shape":"ParsingPromptText", + "documentation":"

Instructions for interpreting the contents of a document.

" + } + }, + "documentation":"

Instructions for interpreting the contents of a document.

" + }, + "ParsingPromptText":{ + "type":"string", + "max":10000, + "min":1 + }, + "ParsingStrategy":{ + "type":"string", + "enum":["BEDROCK_FOUNDATION_MODEL"] + }, + "PatternObjectFilter":{ + "type":"structure", + "required":["objectType"], + "members":{ + "exclusionFilters":{ + "shape":"FilterList", + "documentation":"

A list of one or more exclusion regular expression patterns to exclude certain object types that adhere to the pattern. If you specify an inclusion and exclusion filter/pattern and both match a document, the exclusion filter takes precedence and the document isn’t crawled.

" + }, + "inclusionFilters":{ + "shape":"FilterList", + "documentation":"

A list of one or more inclusion regular expression patterns to include certain object types that adhere to the pattern. If you specify an inclusion and exclusion filter/pattern and both match a document, the exclusion filter takes precedence and the document isn’t crawled.

" + }, + "objectType":{ + "shape":"FilteredObjectType", + "documentation":"

The supported object type or content type of the data source.

" + } + }, + "documentation":"

The specific filters applied to your data source content. You can filter out or include certain content.

" + }, + "PatternObjectFilterConfiguration":{ + "type":"structure", + "required":["filters"], + "members":{ + "filters":{ + "shape":"PatternObjectFilterList", + "documentation":"

The configuration of specific filters applied to your data source content. You can filter out or include certain content.

" + } + }, + "documentation":"

The configuration of filtering certain objects or content types of the data source.

" + }, + "PatternObjectFilterList":{ + "type":"list", + "member":{"shape":"PatternObjectFilter"}, + "max":25, + "min":1, + "sensitive":true + }, "Payload":{ "type":"string", "sensitive":true @@ -3691,44 +6195,222 @@ "shape":"DateTimestamp", "documentation":"

The time at which the DRAFT version of the agent was last prepared.

" } - } + } + }, + "PrepareFlowRequest":{ + "type":"structure", + "required":["flowIdentifier"], + "members":{ + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow.

", + "location":"uri", + "locationName":"flowIdentifier" + } + } + }, + "PrepareFlowResponse":{ + "type":"structure", + "required":[ + "id", + "status" + ], + "members":{ + "id":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow.

" + }, + "status":{ + "shape":"FlowStatus", + "documentation":"

The status of the flow. When you submit this request, the status will be NotPrepared. If preparation succeeds, the status becomes Prepared. If it fails, the status becomes FAILED.

" + } + } + }, + "PrimitiveLong":{"type":"long"}, + "PromptArn":{ + "type":"string", + "pattern":"^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10}(?::[0-9]{1,5})?)$" + }, + "PromptConfiguration":{ + "type":"structure", + "members":{ + "basePromptTemplate":{ + "shape":"BasePromptTemplate", + "documentation":"

Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see Prompt template placeholder variables. For more information, see Configure the prompt templates.

" + }, + "inferenceConfiguration":{ + "shape":"InferenceConfiguration", + "documentation":"

Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the promptType. For more information, see Inference parameters for foundation models.

" + }, + "parserMode":{ + "shape":"CreationMode", + "documentation":"

Specifies whether to override the default parser Lambda function when parsing the raw foundation model output in the part of the agent sequence defined by the promptType. If you set the field as OVERRIDEN, the overrideLambda field in the PromptOverrideConfiguration must be specified with the ARN of a Lambda function.

" + }, + "promptCreationMode":{ + "shape":"CreationMode", + "documentation":"

Specifies whether to override the default prompt template for this promptType. Set this value to OVERRIDDEN to use the prompt that you provide in the basePromptTemplate. If you leave it as DEFAULT, the agent uses a default prompt template.

" + }, + "promptState":{ + "shape":"PromptState", + "documentation":"

Specifies whether to allow the agent to carry out the step specified in the promptType. If you set this value to DISABLED, the agent skips that step. The default state for each promptType is as follows.

  • PRE_PROCESSINGENABLED

  • ORCHESTRATIONENABLED

  • KNOWLEDGE_BASE_RESPONSE_GENERATIONENABLED

  • POST_PROCESSINGDISABLED

" + }, + "promptType":{ + "shape":"PromptType", + "documentation":"

The step in the agent sequence that this prompt configuration applies to.

" + } + }, + "documentation":"

Contains configurations to override a prompt template in one part of an agent sequence. For more information, see Advanced prompts.

" + }, + "PromptConfigurations":{ + "type":"list", + "member":{"shape":"PromptConfiguration"}, + "max":10, + "min":0 + }, + "PromptDescription":{ + "type":"string", + "max":200, + "min":1 + }, + "PromptFlowNodeConfiguration":{ + "type":"structure", + "required":["sourceConfiguration"], + "members":{ + "sourceConfiguration":{ + "shape":"PromptFlowNodeSourceConfiguration", + "documentation":"

Specifies whether the prompt is from Prompt management or defined inline.

" + } + }, + "documentation":"

Contains configurations for a prompt node in the flow. You can use a prompt from Prompt management or you can define one in this node. If the prompt contains variables, the inputs into this node will fill in the variables. The output from this node is the response generated by the model. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + }, + "PromptFlowNodeInlineConfiguration":{ + "type":"structure", + "required":[ + "modelId", + "templateConfiguration", + "templateType" + ], + "members":{ + "inferenceConfiguration":{ + "shape":"PromptInferenceConfiguration", + "documentation":"

Contains inference configurations for the prompt.

" + }, + "modelId":{ + "shape":"PromptModelIdentifier", + "documentation":"

The unique identifier of the model to run inference with.

" + }, + "templateConfiguration":{ + "shape":"PromptTemplateConfiguration", + "documentation":"

Contains a prompt and variables in the prompt that can be replaced with values at runtime.

" + }, + "templateType":{ + "shape":"PromptTemplateType", + "documentation":"

The type of prompt template.

" + } + }, + "documentation":"

Contains configurations for a prompt defined inline in the node.

" + }, + "PromptFlowNodeResourceConfiguration":{ + "type":"structure", + "required":["promptArn"], + "members":{ + "promptArn":{ + "shape":"PromptArn", + "documentation":"

The Amazon Resource Name (ARN) of the prompt from Prompt management.

" + } + }, + "documentation":"

Contains configurations for a prompt from Prompt management to use in a node.

" + }, + "PromptFlowNodeSourceConfiguration":{ + "type":"structure", + "members":{ + "inline":{ + "shape":"PromptFlowNodeInlineConfiguration", + "documentation":"

Contains configurations for a prompt that is defined inline

" + }, + "resource":{ + "shape":"PromptFlowNodeResourceConfiguration", + "documentation":"

Contains configurations for a prompt from Prompt management.

" + } + }, + "documentation":"

Contains configurations for a prompt and whether it is from Prompt management or defined inline.

", + "union":true + }, + "PromptId":{ + "type":"string", + "pattern":"^[0-9a-zA-Z]{10}$" + }, + "PromptIdentifier":{ + "type":"string", + "pattern":"^([0-9a-zA-Z]{10})|(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10})(?::[0-9]{1,5})?$" + }, + "PromptInferenceConfiguration":{ + "type":"structure", + "members":{ + "text":{ + "shape":"PromptModelInferenceConfiguration", + "documentation":"

Contains inference configurations for a text prompt.

" + } + }, + "documentation":"

Contains inference configurations for the prompt.

", + "union":true }, - "PrimitiveLong":{"type":"long"}, - "PromptConfiguration":{ + "PromptInputVariable":{ "type":"structure", "members":{ - "basePromptTemplate":{ - "shape":"BasePromptTemplate", - "documentation":"

Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see Prompt template placeholder variables.

" - }, - "inferenceConfiguration":{ - "shape":"InferenceConfiguration", - "documentation":"

Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the promptType. For more information, see Inference parameters for foundation models.

" + "name":{ + "shape":"PromptInputVariableName", + "documentation":"

The name of the variable.

" + } + }, + "documentation":"

Contains information about a variable in the prompt.

" + }, + "PromptInputVariableName":{ + "type":"string", + "pattern":"^([0-9a-zA-Z][_-]?){1,100}$" + }, + "PromptInputVariablesList":{ + "type":"list", + "member":{"shape":"PromptInputVariable"}, + "max":5, + "min":0, + "sensitive":true + }, + "PromptModelIdentifier":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" + }, + "PromptModelInferenceConfiguration":{ + "type":"structure", + "members":{ + "maxTokens":{ + "shape":"MaximumLength", + "documentation":"

The maximum number of tokens to return in the response.

" }, - "parserMode":{ - "shape":"CreationMode", - "documentation":"

Specifies whether to override the default parser Lambda function when parsing the raw foundation model output in the part of the agent sequence defined by the promptType. If you set the field as OVERRIDEN, the overrideLambda field in the PromptOverrideConfiguration must be specified with the ARN of a Lambda function.

" + "stopSequences":{ + "shape":"StopSequences", + "documentation":"

A list of strings that define sequences after which the model will stop generating.

" }, - "promptCreationMode":{ - "shape":"CreationMode", - "documentation":"

Specifies whether to override the default prompt template for this promptType. Set this value to OVERRIDDEN to use the prompt that you provide in the basePromptTemplate. If you leave it as DEFAULT, the agent uses a default prompt template.

" + "temperature":{ + "shape":"Temperature", + "documentation":"

Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.

" }, - "promptState":{ - "shape":"PromptState", - "documentation":"

Specifies whether to allow the agent to carry out the step specified in the promptType. If you set this value to DISABLED, the agent skips that step. The default state for each promptType is as follows.

  • PRE_PROCESSINGENABLED

  • ORCHESTRATIONENABLED

  • KNOWLEDGE_BASE_RESPONSE_GENERATIONENABLED

  • POST_PROCESSINGDISABLED

" + "topK":{ + "shape":"TopK", + "documentation":"

The number of most-likely candidates that the model considers for the next token during generation.

" }, - "promptType":{ - "shape":"PromptType", - "documentation":"

The step in the agent sequence that this prompt configuration applies to.

" + "topP":{ + "shape":"TopP", + "documentation":"

The percentage of most-likely candidates that the model considers for the next token.

" } }, - "documentation":"

Contains configurations to override a prompt template in one part of an agent sequence. For more information, see Advanced prompts.

" + "documentation":"

Contains inference configurations related to model inference for a prompt. For more information, see Inference parameters.

" }, - "PromptConfigurations":{ - "type":"list", - "member":{"shape":"PromptConfiguration"}, - "max":10, - "min":0 + "PromptName":{ + "type":"string", + "pattern":"^([0-9a-zA-Z][_-]?){1,100}$" }, "PromptOverrideConfiguration":{ "type":"structure", @@ -3736,7 +6418,7 @@ "members":{ "overrideLambda":{ "shape":"LambdaArn", - "documentation":"

The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN.

" + "documentation":"

The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see Parser Lambda function in Agents for Amazon Bedrock.

" }, "promptConfigurations":{ "shape":"PromptConfigurations", @@ -3753,6 +6435,69 @@ "DISABLED" ] }, + "PromptSummaries":{ + "type":"list", + "member":{"shape":"PromptSummary"}, + "max":10, + "min":0 + }, + "PromptSummary":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "id", + "name", + "updatedAt", + "version" + ], + "members":{ + "arn":{ + "shape":"PromptArn", + "documentation":"

The Amazon Resource Name (ARN) of the prompt.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the prompt was created.

" + }, + "description":{ + "shape":"PromptDescription", + "documentation":"

The description of the prompt.

" + }, + "id":{ + "shape":"PromptId", + "documentation":"

The unique identifier of the prompt.

" + }, + "name":{ + "shape":"PromptName", + "documentation":"

The name of the prompt.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the prompt was last updated.

" + }, + "version":{ + "shape":"Version", + "documentation":"

The version of the prompt that this summary applies to.

" + } + }, + "documentation":"

Contains information about a prompt in your Prompt management tool.

This data type is used in the following API operations:

" + }, + "PromptTemplateConfiguration":{ + "type":"structure", + "members":{ + "text":{ + "shape":"TextPromptTemplateConfiguration", + "documentation":"

Contains configurations for the text in a message for a prompt.

" + } + }, + "documentation":"

Contains the message for a prompt. For more information, see Prompt management in Amazon Bedrock.

", + "union":true + }, + "PromptTemplateType":{ + "type":"string", + "enum":["TEXT"] + }, "PromptType":{ "type":"string", "enum":[ @@ -3762,6 +6507,48 @@ "KNOWLEDGE_BASE_RESPONSE_GENERATION" ] }, + "PromptVariant":{ + "type":"structure", + "required":[ + "name", + "templateType" + ], + "members":{ + "inferenceConfiguration":{ + "shape":"PromptInferenceConfiguration", + "documentation":"

Contains inference configurations for the prompt variant.

" + }, + "modelId":{ + "shape":"PromptModelIdentifier", + "documentation":"

The unique identifier of the model with which to run inference on the prompt.

" + }, + "name":{ + "shape":"PromptVariantName", + "documentation":"

The name of the prompt variant.

" + }, + "templateConfiguration":{ + "shape":"PromptTemplateConfiguration", + "documentation":"

Contains configurations for the prompt template.

" + }, + "templateType":{ + "shape":"PromptTemplateType", + "documentation":"

The type of prompt template to use.

" + } + }, + "documentation":"

Contains details about a variant of the prompt.

", + "sensitive":true + }, + "PromptVariantList":{ + "type":"list", + "member":{"shape":"PromptVariant"}, + "max":3, + "min":0, + "sensitive":true + }, + "PromptVariantName":{ + "type":"string", + "pattern":"^([0-9a-zA-Z][_-]?){1,100}$" + }, "ProvisionedModelIdentifier":{ "type":"string", "max":2048, @@ -3931,6 +6718,39 @@ }, "exception":true }, + "RetrievalFlowNodeConfiguration":{ + "type":"structure", + "required":["serviceConfiguration"], + "members":{ + "serviceConfiguration":{ + "shape":"RetrievalFlowNodeServiceConfiguration", + "documentation":"

Contains configurations for the service to use for retrieving data to return as the output from the node.

" + } + }, + "documentation":"

Contains configurations for a Retrieval node in a flow. This node retrieves data from the Amazon S3 location that you specify and returns it as the output.

" + }, + "RetrievalFlowNodeS3Configuration":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{ + "shape":"S3BucketName", + "documentation":"

The name of the Amazon S3 bucket from which to retrieve data.

" + } + }, + "documentation":"

Contains configurations for the Amazon S3 location from which to retrieve data to return as the output from the node.

" + }, + "RetrievalFlowNodeServiceConfiguration":{ + "type":"structure", + "members":{ + "s3":{ + "shape":"RetrievalFlowNodeS3Configuration", + "documentation":"

Contains configurations for the Amazon S3 location from which to retrieve data to return as the output from the node.

" + } + }, + "documentation":"

Contains configurations for the service to use for retrieving data to return as the output from the node.

", + "union":true + }, "S3BucketArn":{ "type":"string", "max":2048, @@ -3943,24 +6763,30 @@ "min":3, "pattern":"^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$" }, + "S3BucketUri":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^s3://.{1,128}$" + }, "S3DataSourceConfiguration":{ "type":"structure", "required":["bucketArn"], "members":{ "bucketArn":{ "shape":"S3BucketArn", - "documentation":"

The Amazon Resource Name (ARN) of the bucket that contains the data source.

" + "documentation":"

The Amazon Resource Name (ARN) of the S3 bucket that contains your data.

" }, "bucketOwnerAccountId":{ "shape":"BucketOwnerAccountId", - "documentation":"

The bucket account owner ID for the S3 bucket.

" + "documentation":"

The account ID for the owner of the S3 bucket.

" }, "inclusionPrefixes":{ "shape":"S3Prefixes", - "documentation":"

A list of S3 prefixes that define the object containing the data sources. For more information, see Organizing objects using prefixes.

" + "documentation":"

A list of S3 prefixes to include certain files or content. For more information, see Organizing objects using prefixes.

" } }, - "documentation":"

Contains information about the S3 configuration of the data source.

" + "documentation":"

The configuration information to connect to Amazon S3 as your data source.

" }, "S3Identifier":{ "type":"structure", @@ -3971,10 +6797,21 @@ }, "s3ObjectKey":{ "shape":"S3ObjectKey", - "documentation":"

The S3 object key containing the resource.

" + "documentation":"

The S3 object key for the S3 resource.

" + } + }, + "documentation":"

The identifier information for an Amazon S3 bucket.

" + }, + "S3Location":{ + "type":"structure", + "required":["uri"], + "members":{ + "uri":{ + "shape":"S3BucketUri", + "documentation":"

The location's URI. For example, s3://my-bucket/chunk-processor/.

" } }, - "documentation":"

Contains information about the S3 object containing the resource.

" + "documentation":"

An Amazon S3 location.

" }, "S3ObjectKey":{ "type":"string", @@ -3985,7 +6822,8 @@ "S3Prefix":{ "type":"string", "max":300, - "min":1 + "min":1, + "sensitive":true }, "S3Prefixes":{ "type":"list", @@ -3993,10 +6831,118 @@ "max":1, "min":1 }, + "SalesforceAuthType":{ + "type":"string", + "enum":["OAUTH2_CLIENT_CREDENTIALS"] + }, + "SalesforceCrawlerConfiguration":{ + "type":"structure", + "members":{ + "filterConfiguration":{ + "shape":"CrawlFilterConfiguration", + "documentation":"

The configuration of filtering the Salesforce content. For example, configuring regular expression patterns to include or exclude certain content.

" + } + }, + "documentation":"

The configuration of the Salesforce content. For example, configuring specific types of Salesforce content.

" + }, + "SalesforceDataSourceConfiguration":{ + "type":"structure", + "required":["sourceConfiguration"], + "members":{ + "crawlerConfiguration":{ + "shape":"SalesforceCrawlerConfiguration", + "documentation":"

The configuration of the Salesforce content. For example, configuring specific types of Salesforce content.

" + }, + "sourceConfiguration":{ + "shape":"SalesforceSourceConfiguration", + "documentation":"

The endpoint information to connect to your Salesforce data source.

" + } + }, + "documentation":"

The configuration information to connect to Salesforce as your data source.

" + }, + "SalesforceSourceConfiguration":{ + "type":"structure", + "required":[ + "authType", + "credentialsSecretArn", + "hostUrl" + ], + "members":{ + "authType":{ + "shape":"SalesforceAuthType", + "documentation":"

The supported authentication type to authenticate and connect to your Salesforce instance.

" + }, + "credentialsSecretArn":{ + "shape":"SecretArn", + "documentation":"

The Amazon Resource Name of an Secrets Manager secret that stores your authentication credentials for your SharePoint site/sites. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see Salesforce connection configuration.

" + }, + "hostUrl":{ + "shape":"HttpsUrl", + "documentation":"

The Salesforce host URL or instance URL.

" + } + }, + "documentation":"

The endpoint information to connect to your Salesforce data source.

" + }, "SecretArn":{ "type":"string", "pattern":"^arn:aws(|-cn|-us-gov):secretsmanager:[a-z0-9-]{1,20}:([0-9]{12}|):secret:[a-zA-Z0-9!/_+=.@-]{1,512}$" }, + "SeedUrl":{ + "type":"structure", + "members":{ + "url":{ + "shape":"Url", + "documentation":"

A seed or starting point URL.

" + } + }, + "documentation":"

The seed or starting point URL. You should be authorized to crawl the URL.

" + }, + "SeedUrls":{ + "type":"list", + "member":{"shape":"SeedUrl"}, + "max":100, + "min":1 + }, + "SemanticChunkingConfiguration":{ + "type":"structure", + "required":[ + "breakpointPercentileThreshold", + "bufferSize", + "maxTokens" + ], + "members":{ + "breakpointPercentileThreshold":{ + "shape":"SemanticChunkingConfigurationBreakpointPercentileThresholdInteger", + "documentation":"

The dissimilarity threshold for splitting chunks.

" + }, + "bufferSize":{ + "shape":"SemanticChunkingConfigurationBufferSizeInteger", + "documentation":"

The buffer size.

" + }, + "maxTokens":{ + "shape":"SemanticChunkingConfigurationMaxTokensInteger", + "documentation":"

The maximum number of tokens that a chunk can contain.

" + } + }, + "documentation":"

Settings for semantic document chunking for a data source. Semantic chunking splits a document into into smaller documents based on groups of similar content derived from the text with natural language processing.

With semantic chunking, each sentence is compared to the next to determine how similar they are. You specify a threshold in the form of a percentile, where adjacent sentences that are less similar than that percentage of sentence pairs are divided into separate chunks. For example, if you set the threshold to 90, then the 10 percent of sentence pairs that are least similar are split. So if you have 101 sentences, 100 sentence pairs are compared, and the 10 with the least similarity are split, creating 11 chunks. These chunks are further split if they exceed the max token size.

You must also specify a buffer size, which determines whether sentences are compared in isolation, or within a moving context window that includes the previous and following sentence. For example, if you set the buffer size to 1, the embedding for sentence 10 is derived from sentences 9, 10, and 11 combined.

" + }, + "SemanticChunkingConfigurationBreakpointPercentileThresholdInteger":{ + "type":"integer", + "box":true, + "max":99, + "min":50 + }, + "SemanticChunkingConfigurationBufferSizeInteger":{ + "type":"integer", + "box":true, + "max":1, + "min":0 + }, + "SemanticChunkingConfigurationMaxTokensInteger":{ + "type":"integer", + "box":true, + "min":1 + }, "ServerSideEncryptionConfiguration":{ "type":"structure", "members":{ @@ -4017,13 +6963,94 @@ "httpStatusCode":402, "senderFault":true }, - "exception":true - }, - "SessionTTL":{ - "type":"integer", - "box":true, - "max":3600, - "min":60 + "exception":true + }, + "SessionTTL":{ + "type":"integer", + "box":true, + "max":3600, + "min":60 + }, + "SharePointAuthType":{ + "type":"string", + "enum":["OAUTH2_CLIENT_CREDENTIALS"] + }, + "SharePointCrawlerConfiguration":{ + "type":"structure", + "members":{ + "filterConfiguration":{ + "shape":"CrawlFilterConfiguration", + "documentation":"

The configuration of filtering the SharePoint content. For example, configuring regular expression patterns to include or exclude certain content.

" + } + }, + "documentation":"

The configuration of the SharePoint content. For example, configuring specific types of SharePoint content.

" + }, + "SharePointDataSourceConfiguration":{ + "type":"structure", + "required":["sourceConfiguration"], + "members":{ + "crawlerConfiguration":{ + "shape":"SharePointCrawlerConfiguration", + "documentation":"

The configuration of the SharePoint content. For example, configuring specific types of SharePoint content.

" + }, + "sourceConfiguration":{ + "shape":"SharePointSourceConfiguration", + "documentation":"

The endpoint information to connect to your SharePoint data source.

" + } + }, + "documentation":"

The configuration information to connect to SharePoint as your data source.

" + }, + "SharePointDomain":{ + "type":"string", + "max":50, + "min":1 + }, + "SharePointHostType":{ + "type":"string", + "enum":["ONLINE"] + }, + "SharePointSiteUrls":{ + "type":"list", + "member":{"shape":"HttpsUrl"}, + "max":100, + "min":1 + }, + "SharePointSourceConfiguration":{ + "type":"structure", + "required":[ + "authType", + "credentialsSecretArn", + "domain", + "hostType", + "siteUrls" + ], + "members":{ + "authType":{ + "shape":"SharePointAuthType", + "documentation":"

The supported authentication type to authenticate and connect to your SharePoint site/sites.

" + }, + "credentialsSecretArn":{ + "shape":"SecretArn", + "documentation":"

The Amazon Resource Name of an Secrets Manager secret that stores your authentication credentials for your SharePoint site/sites. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see SharePoint connection configuration.

" + }, + "domain":{ + "shape":"SharePointDomain", + "documentation":"

The domain of your SharePoint instance or site URL/URLs.

" + }, + "hostType":{ + "shape":"SharePointHostType", + "documentation":"

The supported host type, whether online/cloud or server/on-premises.

" + }, + "siteUrls":{ + "shape":"SharePointSiteUrls", + "documentation":"

A list of one or more SharePoint site URLs.

" + }, + "tenantId":{ + "shape":"Microsoft365TenantId", + "documentation":"

The identifier of your Microsoft 365 tenant.

" + } + }, + "documentation":"

The endpoint information to connect to your SharePoint data source.

" }, "SortOrder":{ "type":"string", @@ -4072,6 +7099,10 @@ } } }, + "StepType":{ + "type":"string", + "enum":["POST_CHUNKING"] + }, "StopSequences":{ "type":"list", "member":{"shape":"String"}, @@ -4109,6 +7140,45 @@ }, "documentation":"

Contains the storage configuration of the knowledge base.

" }, + "StorageDays":{ + "type":"integer", + "box":true, + "max":30, + "min":0 + }, + "StorageFlowNodeConfiguration":{ + "type":"structure", + "required":["serviceConfiguration"], + "members":{ + "serviceConfiguration":{ + "shape":"StorageFlowNodeServiceConfiguration", + "documentation":"

Contains configurations for the service to use for storing the input into the node.

" + } + }, + "documentation":"

Contains configurations for a Storage node in a flow. This node stores the input in an Amazon S3 location that you specify.

" + }, + "StorageFlowNodeS3Configuration":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{ + "shape":"S3BucketName", + "documentation":"

The name of the Amazon S3 bucket in which to store the input into the node.

" + } + }, + "documentation":"

Contains configurations for the Amazon S3 location in which to store the input into the node.

" + }, + "StorageFlowNodeServiceConfiguration":{ + "type":"structure", + "members":{ + "s3":{ + "shape":"StorageFlowNodeS3Configuration", + "documentation":"

Contains configurations for the Amazon S3 location in which to store the input into the node.

" + } + }, + "documentation":"

Contains configurations for the service to use for storing the input into the node.

", + "union":true + }, "String":{"type":"string"}, "TagKey":{ "type":"string", @@ -4156,7 +7226,7 @@ "type":"string", "max":1011, "min":20, - "pattern":"(^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:(agent|agent-alias|knowledge-base)/[A-Z0-9]{10}(?:/[A-Z0-9]{10})?$)" + "pattern":"(^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:(agent|agent-alias|knowledge-base|flow|prompt)/[A-Z0-9]{10}(?:/[A-Z0-9]{10})?$|^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:flow/([A-Z0-9]{10})/alias/([A-Z0-9]{10})$|^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:prompt/([A-Z0-9]{10})?(?::/d+)?$)" }, "TagsMap":{ "type":"map", @@ -4169,6 +7239,28 @@ "max":1, "min":0 }, + "TextPrompt":{ + "type":"string", + "max":200000, + "min":1, + "sensitive":true + }, + "TextPromptTemplateConfiguration":{ + "type":"structure", + "required":["text"], + "members":{ + "inputVariables":{ + "shape":"PromptInputVariablesList", + "documentation":"

An array of the variables in the prompt template.

" + }, + "text":{ + "shape":"TextPrompt", + "documentation":"

The message for the prompt.

" + } + }, + "documentation":"

Contains configurations for a text prompt template. To include a variable, enclose a word in double curly braces as in {{variable}}.

", + "sensitive":true + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -4193,6 +7285,52 @@ "max":1, "min":0 }, + "Transformation":{ + "type":"structure", + "required":[ + "stepToApply", + "transformationFunction" + ], + "members":{ + "stepToApply":{ + "shape":"StepType", + "documentation":"

When the service applies the transformation.

" + }, + "transformationFunction":{ + "shape":"TransformationFunction", + "documentation":"

A Lambda function that processes documents.

" + } + }, + "documentation":"

A custom processing step for documents moving through a data source ingestion pipeline. To process documents after they have been converted into chunks, set the step to apply to POST_CHUNKING.

" + }, + "TransformationFunction":{ + "type":"structure", + "required":["transformationLambdaConfiguration"], + "members":{ + "transformationLambdaConfiguration":{ + "shape":"TransformationLambdaConfiguration", + "documentation":"

The Lambda function.

" + } + }, + "documentation":"

A Lambda function that processes documents.

" + }, + "TransformationLambdaConfiguration":{ + "type":"structure", + "required":["lambdaArn"], + "members":{ + "lambdaArn":{ + "shape":"LambdaArn", + "documentation":"

The function's ARN identifier.

" + } + }, + "documentation":"

A Lambda function that processes documents.

" + }, + "Transformations":{ + "type":"list", + "member":{"shape":"Transformation"}, + "max":1, + "min":1 + }, "Type":{ "type":"string", "enum":[ @@ -4421,6 +7559,10 @@ "shape":"ModelIdentifier", "documentation":"

Specifies a new foundation model to be used for orchestration by the agent.

" }, + "guardrailConfiguration":{ + "shape":"GuardrailConfiguration", + "documentation":"

The unique Guardrail configuration assigned to the agent when it is updated.

" + }, "idleSessionTTLInSeconds":{ "shape":"SessionTTL", "documentation":"

The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent.

A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout.

" @@ -4429,6 +7571,10 @@ "shape":"Instruction", "documentation":"

Specifies new instructions that tell the agent what it should do and how it should interact with users.

" }, + "memoryConfiguration":{ + "shape":"MemoryConfiguration", + "documentation":"

Specifies the new memory configuration for the agent.

" + }, "promptOverrideConfiguration":{ "shape":"PromptOverrideConfiguration", "documentation":"

Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts.

" @@ -4456,11 +7602,11 @@ "members":{ "dataDeletionPolicy":{ "shape":"DataDeletionPolicy", - "documentation":"

The data deletion policy of the updated data source.

" + "documentation":"

The data deletion policy for the data source that you want to update.

" }, "dataSourceConfiguration":{ "shape":"DataSourceConfiguration", - "documentation":"

Contains details about the storage configuration of the data source.

" + "documentation":"

The connection configuration for the data source that you want to update.

" }, "dataSourceId":{ "shape":"Id", @@ -4474,7 +7620,7 @@ }, "knowledgeBaseId":{ "shape":"Id", - "documentation":"

The unique identifier of the knowledge base to which the data source belongs.

", + "documentation":"

The unique identifier of the knowledge base for the data source.

", "location":"uri", "locationName":"knowledgeBaseId" }, @@ -4502,6 +7648,182 @@ } } }, + "UpdateFlowAliasRequest":{ + "type":"structure", + "required":[ + "aliasIdentifier", + "flowIdentifier", + "name", + "routingConfiguration" + ], + "members":{ + "aliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

The unique identifier of the alias.

", + "location":"uri", + "locationName":"aliasIdentifier" + }, + "description":{ + "shape":"Description", + "documentation":"

A description for the flow alias.

" + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow.

", + "location":"uri", + "locationName":"flowIdentifier" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the flow alias.

" + }, + "routingConfiguration":{ + "shape":"FlowAliasRoutingConfiguration", + "documentation":"

Contains information about the version to which to map the alias.

" + } + } + }, + "UpdateFlowAliasResponse":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "flowId", + "id", + "name", + "routingConfiguration", + "updatedAt" + ], + "members":{ + "arn":{ + "shape":"FlowAliasArn", + "documentation":"

The Amazon Resource Name (ARN) of the flow.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow was created.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the flow.

" + }, + "flowId":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow.

" + }, + "id":{ + "shape":"FlowAliasId", + "documentation":"

The unique identifier of the alias.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the flow alias.

" + }, + "routingConfiguration":{ + "shape":"FlowAliasRoutingConfiguration", + "documentation":"

Contains information about the version that the alias is mapped to.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow alias was last updated.

" + } + } + }, + "UpdateFlowRequest":{ + "type":"structure", + "required":[ + "executionRoleArn", + "flowIdentifier", + "name" + ], + "members":{ + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key to encrypt the flow.

" + }, + "definition":{ + "shape":"FlowDefinition", + "documentation":"

A definition of the nodes and the connections between the nodes in the flow.

" + }, + "description":{ + "shape":"FlowDescription", + "documentation":"

A description for the flow.

" + }, + "executionRoleArn":{ + "shape":"FlowExecutionRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the service role with permissions to create and manage a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

The unique identifier of the flow.

", + "location":"uri", + "locationName":"flowIdentifier" + }, + "name":{ + "shape":"FlowName", + "documentation":"

A name for the flow.

" + } + } + }, + "UpdateFlowResponse":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "executionRoleArn", + "id", + "name", + "status", + "updatedAt", + "version" + ], + "members":{ + "arn":{ + "shape":"FlowArn", + "documentation":"

The Amazon Resource Name (ARN) of the flow.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow was created.

" + }, + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key that the flow was encrypted with.

" + }, + "definition":{ + "shape":"FlowDefinition", + "documentation":"

A definition of the nodes and the connections between nodes in the flow.

" + }, + "description":{ + "shape":"FlowDescription", + "documentation":"

The description of the flow.

" + }, + "executionRoleArn":{ + "shape":"FlowExecutionRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

" + }, + "id":{ + "shape":"FlowId", + "documentation":"

The unique identifier of the flow.

" + }, + "name":{ + "shape":"FlowName", + "documentation":"

The name of the flow.

" + }, + "status":{ + "shape":"FlowStatus", + "documentation":"

The status of the flow. When you submit this request, the status will be NotPrepared. If updating fails, the status becomes Failed.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the flow was last updated.

" + }, + "version":{ + "shape":"DraftVersion", + "documentation":"

The version of the flow. When you update a flow, the version updated is the DRAFT version.

" + } + } + }, "UpdateKnowledgeBaseRequest":{ "type":"structure", "required":[ @@ -4550,6 +7872,108 @@ } } }, + "UpdatePromptRequest":{ + "type":"structure", + "required":[ + "name", + "promptIdentifier" + ], + "members":{ + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key to encrypt the prompt.

" + }, + "defaultVariant":{ + "shape":"PromptVariantName", + "documentation":"

The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object.

" + }, + "description":{ + "shape":"PromptDescription", + "documentation":"

A description for the prompt.

" + }, + "name":{ + "shape":"PromptName", + "documentation":"

A name for the prompt.

" + }, + "promptIdentifier":{ + "shape":"PromptIdentifier", + "documentation":"

The unique identifier of the prompt.

", + "location":"uri", + "locationName":"promptIdentifier" + }, + "variants":{ + "shape":"PromptVariantList", + "documentation":"

A list of objects, each containing details about a variant of the prompt.

" + } + } + }, + "UpdatePromptResponse":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "id", + "name", + "updatedAt", + "version" + ], + "members":{ + "arn":{ + "shape":"PromptArn", + "documentation":"

The Amazon Resource Name (ARN) of the prompt.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the prompt was created.

" + }, + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key to encrypt the prompt.

" + }, + "defaultVariant":{ + "shape":"PromptVariantName", + "documentation":"

The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object.

" + }, + "description":{ + "shape":"PromptDescription", + "documentation":"

The description of the prompt.

" + }, + "id":{ + "shape":"PromptId", + "documentation":"

The unique identifier of the prompt.

" + }, + "name":{ + "shape":"PromptName", + "documentation":"

The name of the prompt.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The time at which the prompt was last updated.

" + }, + "variants":{ + "shape":"PromptVariantList", + "documentation":"

A list of objects, each containing details about a variant of the prompt.

" + }, + "version":{ + "shape":"Version", + "documentation":"

The version of the prompt. When you update a prompt, the version updated is the DRAFT version.

" + } + } + }, + "Url":{ + "type":"string", + "pattern":"^https?://[A-Za-z0-9][^\\s]*$" + }, + "UrlConfiguration":{ + "type":"structure", + "members":{ + "seedUrls":{ + "shape":"SeedUrls", + "documentation":"

One or more seed or starting point URLs.

" + } + }, + "documentation":"

The configuration of web URLs that you want to crawl. You should be authorized to crawl the URLs.

" + }, "ValidationException":{ "type":"structure", "members":{ @@ -4594,6 +8018,14 @@ "chunkingConfiguration":{ "shape":"ChunkingConfiguration", "documentation":"

Details about how to chunk the documents in the data source. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried.

" + }, + "customTransformationConfiguration":{ + "shape":"CustomTransformationConfiguration", + "documentation":"

A custom document transformer for parsed data source documents.

" + }, + "parsingConfiguration":{ + "shape":"ParsingConfiguration", + "documentation":"

A custom parser for data source documents.

" } }, "documentation":"

Contains details about how to ingest the documents in a data source.

" @@ -4605,6 +8037,10 @@ "embeddingModelArn":{ "shape":"BedrockEmbeddingModelArn", "documentation":"

The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.

" + }, + "embeddingModelConfiguration":{ + "shape":"EmbeddingModelConfiguration", + "documentation":"

The embeddings model configuration details for the vector model used in Knowledge Base.

" } }, "documentation":"

Contains details about the model used to create vector embeddings for the knowledge base.

" @@ -4614,6 +8050,77 @@ "max":5, "min":1, "pattern":"^(DRAFT|[0-9]{0,4}[1-9][0-9]{0,4})$" + }, + "WebCrawlerConfiguration":{ + "type":"structure", + "members":{ + "crawlerLimits":{ + "shape":"WebCrawlerLimits", + "documentation":"

The configuration of crawl limits for the web URLs.

" + }, + "exclusionFilters":{ + "shape":"FilterList", + "documentation":"

A list of one or more exclusion regular expression patterns to exclude certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn’t crawled.

" + }, + "inclusionFilters":{ + "shape":"FilterList", + "documentation":"

A list of one or more inclusion regular expression patterns to include certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn’t crawled.

" + }, + "scope":{ + "shape":"WebScopeType", + "documentation":"

The scope of what is crawled for your URLs.

You can choose to crawl only web pages that belong to the same host or primary domain. For example, only web pages that contain the seed URL \"https://docs.aws.amazon.com/bedrock/latest/userguide/\" and no other domains. You can choose to include sub domains in addition to the host or primary domain. For example, web pages that contain \"aws.amazon.com\" can also include sub domain \"docs.aws.amazon.com\".

" + } + }, + "documentation":"

The configuration of web URLs that you want to crawl. You should be authorized to crawl the URLs.

" + }, + "WebCrawlerLimits":{ + "type":"structure", + "members":{ + "rateLimit":{ + "shape":"WebCrawlerLimitsRateLimitInteger", + "documentation":"

The max rate at which pages are crawled, up to 300 per minute per host.

" + } + }, + "documentation":"

The rate limits for the URLs that you want to crawl. You should be authorized to crawl the URLs.

" + }, + "WebCrawlerLimitsRateLimitInteger":{ + "type":"integer", + "box":true, + "max":300, + "min":1 + }, + "WebDataSourceConfiguration":{ + "type":"structure", + "required":["sourceConfiguration"], + "members":{ + "crawlerConfiguration":{ + "shape":"WebCrawlerConfiguration", + "documentation":"

The Web Crawler configuration details for the web data source.

" + }, + "sourceConfiguration":{ + "shape":"WebSourceConfiguration", + "documentation":"

The source configuration details for the web data source.

" + } + }, + "documentation":"

The configuration details for the web data source.

" + }, + "WebScopeType":{ + "type":"string", + "enum":[ + "HOST_ONLY", + "SUBDOMAINS" + ] + }, + "WebSourceConfiguration":{ + "type":"structure", + "required":["urlConfiguration"], + "members":{ + "urlConfiguration":{ + "shape":"UrlConfiguration", + "documentation":"

The configuration of the URL/URLs.

" + } + }, + "documentation":"

The configuration of the URL/URLs for the web content that you want to crawl. You should be authorized to crawl the URLs.

" } }, "documentation":"

Describes the API operations for creating and managing Amazon Bedrock agents.

" diff --git a/botocore/data/bedrock-runtime/2023-09-30/service-2.json b/botocore/data/bedrock-runtime/2023-09-30/service-2.json index 5d545913e0..b40d0148e2 100644 --- a/botocore/data/bedrock-runtime/2023-09-30/service-2.json +++ b/botocore/data/bedrock-runtime/2023-09-30/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2023-09-30", + "auth":["aws.auth#sigv4"], "endpointPrefix":"bedrock-runtime", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Bedrock Runtime", "serviceId":"Bedrock Runtime", "signatureVersion":"v4", @@ -12,6 +13,69 @@ "uid":"bedrock-runtime-2023-09-30" }, "operations":{ + "ApplyGuardrail":{ + "name":"ApplyGuardrail", + "http":{ + "method":"POST", + "requestUri":"/guardrail/{guardrailIdentifier}/version/{guardrailVersion}/apply", + "responseCode":200 + }, + "input":{"shape":"ApplyGuardrailRequest"}, + "output":{"shape":"ApplyGuardrailResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

The action to apply a guardrail.

" + }, + "Converse":{ + "name":"Converse", + "http":{ + "method":"POST", + "requestUri":"/model/{modelId}/converse", + "responseCode":200 + }, + "input":{"shape":"ConverseRequest"}, + "output":{"shape":"ConverseResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ModelTimeoutException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ModelNotReadyException"}, + {"shape":"ModelErrorException"} + ], + "documentation":"

Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. If a model has unique inference parameters, you can also pass those unique parameters to the model.

Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response.

For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide

For example code, see Converse API examples in the Amazon Bedrock User Guide.

This operation requires permission for the bedrock:InvokeModel action.

" + }, + "ConverseStream":{ + "name":"ConverseStream", + "http":{ + "method":"POST", + "requestUri":"/model/{modelId}/converse-stream", + "responseCode":200 + }, + "input":{"shape":"ConverseStreamRequest"}, + "output":{"shape":"ConverseStreamResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ModelTimeoutException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ModelNotReadyException"}, + {"shape":"ModelErrorException"} + ], + "documentation":"

Sends messages to the specified Amazon Bedrock model and returns the response in a stream. ConverseStream provides a consistent API that works with all Amazon Bedrock models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model.

To find out if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported field in the response.

The CLI doesn't support streaming operations in Amazon Bedrock, including ConverseStream.

Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response.

For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide

For example code, see Conversation streaming example in the Amazon Bedrock User Guide.

This operation requires permission for the bedrock:InvokeModelWithResponseStream action.

" + }, "InvokeModel":{ "name":"InvokeModel", "http":{ @@ -27,6 +91,7 @@ {"shape":"ThrottlingException"}, {"shape":"ModelTimeoutException"}, {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, {"shape":"ValidationException"}, {"shape":"ModelNotReadyException"}, {"shape":"ServiceQuotaExceededException"}, @@ -49,44 +114,1326 @@ {"shape":"ThrottlingException"}, {"shape":"ModelTimeoutException"}, {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, {"shape":"ModelStreamErrorException"}, {"shape":"ValidationException"}, {"shape":"ModelNotReadyException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"ModelErrorException"} ], - "documentation":"

Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream.

To see if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported field in the response.

The CLI doesn't support InvokeModelWithResponseStream.

For example code, see Invoke model with streaming code example in the Amazon Bedrock User Guide.

This operation requires permissions to perform the bedrock:InvokeModelWithResponseStream action.

" + "documentation":"

Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream.

To see if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported field in the response.

The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeModelWithResponseStream.

For example code, see Invoke model with streaming code example in the Amazon Bedrock User Guide.

This operation requires permissions to perform the bedrock:InvokeModelWithResponseStream action.

" } }, "shapes":{ "AccessDeniedException":{ "type":"structure", "members":{ - "message":{"shape":"NonBlankString"} + "message":{"shape":"NonBlankString"} + }, + "documentation":"

The request is denied because of missing access permissions.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AnyToolChoice":{ + "type":"structure", + "members":{ + }, + "documentation":"

The model must request at least one tool (no text is generated). For example, {\"any\" : {}}.

" + }, + "ApplyGuardrailRequest":{ + "type":"structure", + "required":[ + "guardrailIdentifier", + "guardrailVersion", + "source", + "content" + ], + "members":{ + "guardrailIdentifier":{ + "shape":"GuardrailIdentifier", + "documentation":"

The guardrail identifier used in the request to apply the guardrail.

", + "location":"uri", + "locationName":"guardrailIdentifier" + }, + "guardrailVersion":{ + "shape":"GuardrailVersion", + "documentation":"

The guardrail version used in the request to apply the guardrail.

", + "location":"uri", + "locationName":"guardrailVersion" + }, + "source":{ + "shape":"GuardrailContentSource", + "documentation":"

The source of data used in the request to apply the guardrail.

" + }, + "content":{ + "shape":"GuardrailContentBlockList", + "documentation":"

The content details used in the request to apply the guardrail.

" + } + } + }, + "ApplyGuardrailResponse":{ + "type":"structure", + "required":[ + "usage", + "action", + "outputs", + "assessments" + ], + "members":{ + "usage":{ + "shape":"GuardrailUsage", + "documentation":"

The usage details in the response from the guardrail.

" + }, + "action":{ + "shape":"GuardrailAction", + "documentation":"

The action taken in the response from the guardrail.

" + }, + "outputs":{ + "shape":"GuardrailOutputContentList", + "documentation":"

The output details in the response from the guardrail.

" + }, + "assessments":{ + "shape":"GuardrailAssessmentList", + "documentation":"

The assessment details in the response from the guardrail.

" + } + } + }, + "AutoToolChoice":{ + "type":"structure", + "members":{ + }, + "documentation":"

The Model automatically decides if a tool should be called or whether to generate text instead. For example, {\"auto\" : {}}.

" + }, + "Body":{ + "type":"blob", + "max":25000000, + "min":0, + "sensitive":true + }, + "ContentBlock":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"

Text to include in the message.

" + }, + "image":{ + "shape":"ImageBlock", + "documentation":"

Image to include in the message.

This field is only supported by Anthropic Claude 3 models.

" + }, + "document":{ + "shape":"DocumentBlock", + "documentation":"

A document to include in the message.

" + }, + "toolUse":{ + "shape":"ToolUseBlock", + "documentation":"

Information about a tool use request from a model.

" + }, + "toolResult":{ + "shape":"ToolResultBlock", + "documentation":"

The result for a tool request that a model makes.

" + }, + "guardContent":{ + "shape":"GuardrailConverseContentBlock", + "documentation":"

Contains the content to assess with the guardrail. If you don't specify guardContent in a call to the Converse API, the guardrail (if passed in the Converse API) assesses the entire message.

For more information, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.

 </p> 
" + } + }, + "documentation":"

A block of content for a message that you pass to, or receive from, a model with the Converse or ConverseStream API operations.

", + "union":true + }, + "ContentBlockDelta":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"

The content text.

" + }, + "toolUse":{ + "shape":"ToolUseBlockDelta", + "documentation":"

Information about a tool that the model is requesting to use.

" + } + }, + "documentation":"

A bock of content in a streaming response.

", + "union":true + }, + "ContentBlockDeltaEvent":{ + "type":"structure", + "required":[ + "delta", + "contentBlockIndex" + ], + "members":{ + "delta":{ + "shape":"ContentBlockDelta", + "documentation":"

The delta for a content block delta event.

" + }, + "contentBlockIndex":{ + "shape":"NonNegativeInteger", + "documentation":"

The block index for a content block delta event.

" + } + }, + "documentation":"

The content block delta event.

", + "event":true + }, + "ContentBlockStart":{ + "type":"structure", + "members":{ + "toolUse":{ + "shape":"ToolUseBlockStart", + "documentation":"

Information about a tool that the model is requesting to use.

" + } + }, + "documentation":"

Content block start information.

", + "union":true + }, + "ContentBlockStartEvent":{ + "type":"structure", + "required":[ + "start", + "contentBlockIndex" + ], + "members":{ + "start":{ + "shape":"ContentBlockStart", + "documentation":"

Start information about a content block start event.

" + }, + "contentBlockIndex":{ + "shape":"NonNegativeInteger", + "documentation":"

The index for a content block start event.

" + } + }, + "documentation":"

Content block start event.

", + "event":true + }, + "ContentBlockStopEvent":{ + "type":"structure", + "required":["contentBlockIndex"], + "members":{ + "contentBlockIndex":{ + "shape":"NonNegativeInteger", + "documentation":"

The index for a content block.

" + } + }, + "documentation":"

A content block stop event.

", + "event":true + }, + "ContentBlocks":{ + "type":"list", + "member":{"shape":"ContentBlock"} + }, + "ConversationRole":{ + "type":"string", + "enum":[ + "user", + "assistant" + ] + }, + "ConversationalModelId":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)" + }, + "ConverseMetrics":{ + "type":"structure", + "required":["latencyMs"], + "members":{ + "latencyMs":{ + "shape":"Long", + "documentation":"

The latency of the call to Converse, in milliseconds.

" + } + }, + "documentation":"

Metrics for a call to Converse.

" + }, + "ConverseOutput":{ + "type":"structure", + "members":{ + "message":{ + "shape":"Message", + "documentation":"

The message that the model generates.

" + } + }, + "documentation":"

The output from a call to Converse.

", + "union":true + }, + "ConverseRequest":{ + "type":"structure", + "required":[ + "modelId", + "messages" + ], + "members":{ + "modelId":{ + "shape":"ConversationalModelId", + "documentation":"

The identifier for the model that you want to call.

The modelId to provide depends on the type of model that you use:

", + "location":"uri", + "locationName":"modelId" + }, + "messages":{ + "shape":"Messages", + "documentation":"

The messages that you want to send to the model.

" + }, + "system":{ + "shape":"SystemContentBlocks", + "documentation":"

A system prompt to pass to the model.

" + }, + "inferenceConfig":{ + "shape":"InferenceConfiguration", + "documentation":"

Inference parameters to pass to the model. Converse supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field.

" + }, + "toolConfig":{ + "shape":"ToolConfiguration", + "documentation":"

Configuration information for the tools that the model can use when generating a response.

This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.

" + }, + "guardrailConfig":{ + "shape":"GuardrailConfiguration", + "documentation":"

Configuration information for a guardrail that you want to use in the request.

" + }, + "additionalModelRequestFields":{ + "shape":"Document", + "documentation":"

Additional inference parameters that the model supports, beyond the base set of inference parameters that Converse supports in the inferenceConfig field. For more information, see Model parameters.

" + }, + "additionalModelResponseFieldPaths":{ + "shape":"ConverseRequestAdditionalModelResponseFieldPathsList", + "documentation":"

Additional model parameters field paths to return in the response. Converse returns the requested fields as a JSON Pointer object in the additionalModelResponseFields field. The following is example JSON for additionalModelResponseFieldPaths.

[ \"/stop_sequence\" ]

For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation.

Converse rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by Converse.

" + } + } + }, + "ConverseRequestAdditionalModelResponseFieldPathsList":{ + "type":"list", + "member":{"shape":"ConverseRequestAdditionalModelResponseFieldPathsListMemberString"}, + "max":10, + "min":0 + }, + "ConverseRequestAdditionalModelResponseFieldPathsListMemberString":{ + "type":"string", + "max":256, + "min":1 + }, + "ConverseResponse":{ + "type":"structure", + "required":[ + "output", + "stopReason", + "usage", + "metrics" + ], + "members":{ + "output":{ + "shape":"ConverseOutput", + "documentation":"

The result from the call to Converse.

" + }, + "stopReason":{ + "shape":"StopReason", + "documentation":"

The reason why the model stopped generating output.

" + }, + "usage":{ + "shape":"TokenUsage", + "documentation":"

The total number of tokens used in the call to Converse. The total includes the tokens input to the model and the tokens generated by the model.

" + }, + "metrics":{ + "shape":"ConverseMetrics", + "documentation":"

Metrics for the call to Converse.

" + }, + "additionalModelResponseFields":{ + "shape":"Document", + "documentation":"

Additional fields in the response that are unique to the model.

" + }, + "trace":{ + "shape":"ConverseTrace", + "documentation":"

A trace object that contains information about the Guardrail behavior.

" + } + } + }, + "ConverseStreamMetadataEvent":{ + "type":"structure", + "required":[ + "usage", + "metrics" + ], + "members":{ + "usage":{ + "shape":"TokenUsage", + "documentation":"

Usage information for the conversation stream event.

" + }, + "metrics":{ + "shape":"ConverseStreamMetrics", + "documentation":"

The metrics for the conversation stream metadata event.

" + }, + "trace":{ + "shape":"ConverseStreamTrace", + "documentation":"

The trace object in the response from ConverseStream that contains information about the guardrail behavior.

" + } + }, + "documentation":"

A conversation stream metadata event.

", + "event":true + }, + "ConverseStreamMetrics":{ + "type":"structure", + "required":["latencyMs"], + "members":{ + "latencyMs":{ + "shape":"Long", + "documentation":"

The latency for the streaming request, in milliseconds.

" + } + }, + "documentation":"

Metrics for the stream.

" + }, + "ConverseStreamOutput":{ + "type":"structure", + "members":{ + "messageStart":{ + "shape":"MessageStartEvent", + "documentation":"

Message start information.

" + }, + "contentBlockStart":{ + "shape":"ContentBlockStartEvent", + "documentation":"

Start information for a content block.

" + }, + "contentBlockDelta":{ + "shape":"ContentBlockDeltaEvent", + "documentation":"

The messages output content block delta.

" + }, + "contentBlockStop":{ + "shape":"ContentBlockStopEvent", + "documentation":"

Stop information for a content block.

" + }, + "messageStop":{ + "shape":"MessageStopEvent", + "documentation":"

Message stop information.

" + }, + "metadata":{ + "shape":"ConverseStreamMetadataEvent", + "documentation":"

Metadata for the converse output stream.

" + }, + "internalServerException":{ + "shape":"InternalServerException", + "documentation":"

An internal server error occurred. Retry your request.

" + }, + "modelStreamErrorException":{ + "shape":"ModelStreamErrorException", + "documentation":"

A streaming error occurred. Retry your request.

" + }, + "validationException":{ + "shape":"ValidationException", + "documentation":"

Input validation failed. Check your request parameters and retry the request.

" + }, + "throttlingException":{ + "shape":"ThrottlingException", + "documentation":"

The number of requests exceeds the limit. Resubmit your request later.

" + }, + "serviceUnavailableException":{ + "shape":"ServiceUnavailableException", + "documentation":"

The service isn't currently available. Try again later.

" + } + }, + "documentation":"

The messages output stream

", + "eventstream":true + }, + "ConverseStreamRequest":{ + "type":"structure", + "required":[ + "modelId", + "messages" + ], + "members":{ + "modelId":{ + "shape":"ConversationalModelId", + "documentation":"

The ID for the model.

The modelId to provide depends on the type of model that you use:

", + "location":"uri", + "locationName":"modelId" + }, + "messages":{ + "shape":"Messages", + "documentation":"

The messages that you want to send to the model.

" + }, + "system":{ + "shape":"SystemContentBlocks", + "documentation":"

A system prompt to send to the model.

" + }, + "inferenceConfig":{ + "shape":"InferenceConfiguration", + "documentation":"

Inference parameters to pass to the model. ConverseStream supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field.

" + }, + "toolConfig":{ + "shape":"ToolConfiguration", + "documentation":"

Configuration information for the tools that the model can use when generating a response.

This field is only supported by Anthropic Claude 3 models.

" + }, + "guardrailConfig":{ + "shape":"GuardrailStreamConfiguration", + "documentation":"

Configuration information for a guardrail that you want to use in the request.

" + }, + "additionalModelRequestFields":{ + "shape":"Document", + "documentation":"

Additional inference parameters that the model supports, beyond the base set of inference parameters that ConverseStream supports in the inferenceConfig field.

" + }, + "additionalModelResponseFieldPaths":{ + "shape":"ConverseStreamRequestAdditionalModelResponseFieldPathsList", + "documentation":"

Additional model parameters field paths to return in the response. ConverseStream returns the requested fields as a JSON Pointer object in the additionalModelResponseFields field. The following is example JSON for additionalModelResponseFieldPaths.

[ \"/stop_sequence\" ]

For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation.

ConverseStream rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by ConverseStream.

" + } + } + }, + "ConverseStreamRequestAdditionalModelResponseFieldPathsList":{ + "type":"list", + "member":{"shape":"ConverseStreamRequestAdditionalModelResponseFieldPathsListMemberString"}, + "max":10, + "min":0 + }, + "ConverseStreamRequestAdditionalModelResponseFieldPathsListMemberString":{ + "type":"string", + "max":256, + "min":1 + }, + "ConverseStreamResponse":{ + "type":"structure", + "members":{ + "stream":{ + "shape":"ConverseStreamOutput", + "documentation":"

The output stream that the model generated.

" + } + }, + "payload":"stream" + }, + "ConverseStreamTrace":{ + "type":"structure", + "members":{ + "guardrail":{ + "shape":"GuardrailTraceAssessment", + "documentation":"

The guardrail trace object.

" + } + }, + "documentation":"

The trace object in a response from ConverseStream. Currently, you can only trace guardrails.

" + }, + "ConverseTrace":{ + "type":"structure", + "members":{ + "guardrail":{ + "shape":"GuardrailTraceAssessment", + "documentation":"

The guardrail trace object.

" + } + }, + "documentation":"

The trace object in a response from Converse. Currently, you can only trace guardrails.

" + }, + "Document":{ + "type":"structure", + "members":{ + }, + "document":true + }, + "DocumentBlock":{ + "type":"structure", + "required":[ + "format", + "name", + "source" + ], + "members":{ + "format":{ + "shape":"DocumentFormat", + "documentation":"

The format of a document, or its extension.

" + }, + "name":{ + "shape":"DocumentBlockNameString", + "documentation":"

A name for the document. The name can only contain the following characters:

  • Alphanumeric characters

  • Whitespace characters (no more than one in a row)

  • Hyphens

  • Parentheses

  • Square brackets

This field is vulnerable to prompt injections, because the model might inadvertently interpret it as instructions. Therefore, we recommend that you specify a neutral name.

" + }, + "source":{ + "shape":"DocumentSource", + "documentation":"

Contains the content of the document.

" + } + }, + "documentation":"

A document to include in a message.

" + }, + "DocumentBlockNameString":{ + "type":"string", + "max":200, + "min":1 + }, + "DocumentFormat":{ + "type":"string", + "enum":[ + "pdf", + "csv", + "doc", + "docx", + "xls", + "xlsx", + "html", + "txt", + "md" + ] + }, + "DocumentSource":{ + "type":"structure", + "members":{ + "bytes":{ + "shape":"DocumentSourceBytesBlob", + "documentation":"

The raw bytes for the document. If you use an Amazon Web Services SDK, you don't need to encode the bytes in base64.

" + } + }, + "documentation":"

Contains the content of a document.

", + "union":true + }, + "DocumentSourceBytesBlob":{ + "type":"blob", + "min":1 + }, + "GuardrailAction":{ + "type":"string", + "enum":[ + "NONE", + "GUARDRAIL_INTERVENED" + ] + }, + "GuardrailAssessment":{ + "type":"structure", + "members":{ + "topicPolicy":{ + "shape":"GuardrailTopicPolicyAssessment", + "documentation":"

The topic policy.

" + }, + "contentPolicy":{ + "shape":"GuardrailContentPolicyAssessment", + "documentation":"

The content policy.

" + }, + "wordPolicy":{ + "shape":"GuardrailWordPolicyAssessment", + "documentation":"

The word policy.

" + }, + "sensitiveInformationPolicy":{ + "shape":"GuardrailSensitiveInformationPolicyAssessment", + "documentation":"

The sensitive information policy.

" + }, + "contextualGroundingPolicy":{ + "shape":"GuardrailContextualGroundingPolicyAssessment", + "documentation":"

The contextual grounding policy used for the guardrail assessment.

" + } + }, + "documentation":"

A behavior assessment of the guardrail policies used in a call to the Converse API.

" + }, + "GuardrailAssessmentList":{ + "type":"list", + "member":{"shape":"GuardrailAssessment"} + }, + "GuardrailAssessmentListMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"GuardrailAssessmentList"} + }, + "GuardrailAssessmentMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"GuardrailAssessment"} + }, + "GuardrailConfiguration":{ + "type":"structure", + "required":[ + "guardrailIdentifier", + "guardrailVersion" + ], + "members":{ + "guardrailIdentifier":{ + "shape":"GuardrailIdentifier", + "documentation":"

The identifier for the guardrail.

" + }, + "guardrailVersion":{ + "shape":"GuardrailVersion", + "documentation":"

The version of the guardrail.

" + }, + "trace":{ + "shape":"GuardrailTrace", + "documentation":"

The trace behavior for the guardrail.

" + } + }, + "documentation":"

Configuration information for a guardrail that you use with the Converse operation.

" + }, + "GuardrailContentBlock":{ + "type":"structure", + "members":{ + "text":{ + "shape":"GuardrailTextBlock", + "documentation":"

Text within content block to be evaluated by the guardrail.

" + } + }, + "documentation":"

The content block to be evaluated by the guardrail.

", + "union":true + }, + "GuardrailContentBlockList":{ + "type":"list", + "member":{"shape":"GuardrailContentBlock"} + }, + "GuardrailContentFilter":{ + "type":"structure", + "required":[ + "type", + "confidence", + "action" + ], + "members":{ + "type":{ + "shape":"GuardrailContentFilterType", + "documentation":"

The guardrail type.

" + }, + "confidence":{ + "shape":"GuardrailContentFilterConfidence", + "documentation":"

The guardrail confidence.

" + }, + "action":{ + "shape":"GuardrailContentPolicyAction", + "documentation":"

The guardrail action.

" + } + }, + "documentation":"

The content filter for a guardrail.

" + }, + "GuardrailContentFilterConfidence":{ + "type":"string", + "enum":[ + "NONE", + "LOW", + "MEDIUM", + "HIGH" + ] + }, + "GuardrailContentFilterList":{ + "type":"list", + "member":{"shape":"GuardrailContentFilter"} + }, + "GuardrailContentFilterType":{ + "type":"string", + "enum":[ + "INSULTS", + "HATE", + "SEXUAL", + "VIOLENCE", + "MISCONDUCT", + "PROMPT_ATTACK" + ] + }, + "GuardrailContentPolicyAction":{ + "type":"string", + "enum":["BLOCKED"] + }, + "GuardrailContentPolicyAssessment":{ + "type":"structure", + "required":["filters"], + "members":{ + "filters":{ + "shape":"GuardrailContentFilterList", + "documentation":"

The content policy filters.

" + } + }, + "documentation":"

An assessment of a content policy for a guardrail.

" + }, + "GuardrailContentPolicyUnitsProcessed":{ + "type":"integer", + "box":true + }, + "GuardrailContentQualifier":{ + "type":"string", + "enum":[ + "grounding_source", + "query", + "guard_content" + ] + }, + "GuardrailContentQualifierList":{ + "type":"list", + "member":{"shape":"GuardrailContentQualifier"} + }, + "GuardrailContentSource":{ + "type":"string", + "enum":[ + "INPUT", + "OUTPUT" + ] + }, + "GuardrailContextualGroundingFilter":{ + "type":"structure", + "required":[ + "type", + "threshold", + "score", + "action" + ], + "members":{ + "type":{ + "shape":"GuardrailContextualGroundingFilterType", + "documentation":"

The contextual grounding filter type.

" + }, + "threshold":{ + "shape":"GuardrailContextualGroundingFilterThresholdDouble", + "documentation":"

The threshold used by contextual grounding filter to determine whether the content is grounded or not.

" + }, + "score":{ + "shape":"GuardrailContextualGroundingFilterScoreDouble", + "documentation":"

The score generated by contextual grounding filter.

" + }, + "action":{ + "shape":"GuardrailContextualGroundingPolicyAction", + "documentation":"

The action performed by the guardrails contextual grounding filter.

" + } + }, + "documentation":"

The details for the guardrails contextual grounding filter.

" + }, + "GuardrailContextualGroundingFilterScoreDouble":{ + "type":"double", + "box":true, + "max":1, + "min":0 + }, + "GuardrailContextualGroundingFilterThresholdDouble":{ + "type":"double", + "box":true, + "max":1, + "min":0 + }, + "GuardrailContextualGroundingFilterType":{ + "type":"string", + "enum":[ + "GROUNDING", + "RELEVANCE" + ] + }, + "GuardrailContextualGroundingFilters":{ + "type":"list", + "member":{"shape":"GuardrailContextualGroundingFilter"} + }, + "GuardrailContextualGroundingPolicyAction":{ + "type":"string", + "enum":[ + "BLOCKED", + "NONE" + ] + }, + "GuardrailContextualGroundingPolicyAssessment":{ + "type":"structure", + "members":{ + "filters":{ + "shape":"GuardrailContextualGroundingFilters", + "documentation":"

The filter details for the guardrails contextual grounding filter.

" + } + }, + "documentation":"

The policy assessment details for the guardrails contextual grounding filter.

" + }, + "GuardrailContextualGroundingPolicyUnitsProcessed":{ + "type":"integer", + "box":true + }, + "GuardrailConverseContentBlock":{ + "type":"structure", + "members":{ + "text":{ + "shape":"GuardrailConverseTextBlock", + "documentation":"

The text to guard.

" + } + }, + "documentation":"

A content block for selective guarding with the Converse or ConverseStream API operations.

", + "union":true + }, + "GuardrailConverseContentQualifier":{ + "type":"string", + "enum":[ + "grounding_source", + "query", + "guard_content" + ] + }, + "GuardrailConverseContentQualifierList":{ + "type":"list", + "member":{"shape":"GuardrailConverseContentQualifier"} + }, + "GuardrailConverseTextBlock":{ + "type":"structure", + "required":["text"], + "members":{ + "text":{ + "shape":"String", + "documentation":"

The text that you want to guard.

" + }, + "qualifiers":{ + "shape":"GuardrailConverseContentQualifierList", + "documentation":"

The qualifier details for the guardrails contextual grounding filter.

" + } + }, + "documentation":"

A text block that contains text that you want to assess with a guardrail. For more information, see GuardrailConverseContentBlock.

" + }, + "GuardrailCustomWord":{ + "type":"structure", + "required":[ + "match", + "action" + ], + "members":{ + "match":{ + "shape":"String", + "documentation":"

The match for the custom word.

" + }, + "action":{ + "shape":"GuardrailWordPolicyAction", + "documentation":"

The action for the custom word.

" + } + }, + "documentation":"

A custom word configured in a guardrail.

" + }, + "GuardrailCustomWordList":{ + "type":"list", + "member":{"shape":"GuardrailCustomWord"} + }, + "GuardrailIdentifier":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))" + }, + "GuardrailManagedWord":{ + "type":"structure", + "required":[ + "match", + "type", + "action" + ], + "members":{ + "match":{ + "shape":"String", + "documentation":"

The match for the managed word.

" + }, + "type":{ + "shape":"GuardrailManagedWordType", + "documentation":"

The type for the managed word.

" + }, + "action":{ + "shape":"GuardrailWordPolicyAction", + "documentation":"

The action for the managed word.

" + } + }, + "documentation":"

A managed word configured in a guardrail.

" + }, + "GuardrailManagedWordList":{ + "type":"list", + "member":{"shape":"GuardrailManagedWord"} + }, + "GuardrailManagedWordType":{ + "type":"string", + "enum":["PROFANITY"] + }, + "GuardrailOutputContent":{ + "type":"structure", + "members":{ + "text":{ + "shape":"GuardrailOutputText", + "documentation":"

The specific text for the output content produced by the guardrail.

" + } + }, + "documentation":"

The output content produced by the guardrail.

" + }, + "GuardrailOutputContentList":{ + "type":"list", + "member":{"shape":"GuardrailOutputContent"} + }, + "GuardrailOutputText":{"type":"string"}, + "GuardrailPiiEntityFilter":{ + "type":"structure", + "required":[ + "match", + "type", + "action" + ], + "members":{ + "match":{ + "shape":"String", + "documentation":"

The PII entity filter match.

" + }, + "type":{ + "shape":"GuardrailPiiEntityType", + "documentation":"

The PII entity filter type.

" + }, + "action":{ + "shape":"GuardrailSensitiveInformationPolicyAction", + "documentation":"

The PII entity filter action.

" + } + }, + "documentation":"

A Personally Identifiable Information (PII) entity configured in a guardrail.

" + }, + "GuardrailPiiEntityFilterList":{ + "type":"list", + "member":{"shape":"GuardrailPiiEntityFilter"} + }, + "GuardrailPiiEntityType":{ + "type":"string", + "enum":[ + "ADDRESS", + "AGE", + "AWS_ACCESS_KEY", + "AWS_SECRET_KEY", + "CA_HEALTH_NUMBER", + "CA_SOCIAL_INSURANCE_NUMBER", + "CREDIT_DEBIT_CARD_CVV", + "CREDIT_DEBIT_CARD_EXPIRY", + "CREDIT_DEBIT_CARD_NUMBER", + "DRIVER_ID", + "EMAIL", + "INTERNATIONAL_BANK_ACCOUNT_NUMBER", + "IP_ADDRESS", + "LICENSE_PLATE", + "MAC_ADDRESS", + "NAME", + "PASSWORD", + "PHONE", + "PIN", + "SWIFT_CODE", + "UK_NATIONAL_HEALTH_SERVICE_NUMBER", + "UK_NATIONAL_INSURANCE_NUMBER", + "UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER", + "URL", + "USERNAME", + "US_BANK_ACCOUNT_NUMBER", + "US_BANK_ROUTING_NUMBER", + "US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER", + "US_PASSPORT_NUMBER", + "US_SOCIAL_SECURITY_NUMBER", + "VEHICLE_IDENTIFICATION_NUMBER" + ] + }, + "GuardrailRegexFilter":{ + "type":"structure", + "required":["action"], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The regex filter name.

" + }, + "match":{ + "shape":"String", + "documentation":"

The regesx filter match.

" + }, + "regex":{ + "shape":"String", + "documentation":"

The regex query.

" + }, + "action":{ + "shape":"GuardrailSensitiveInformationPolicyAction", + "documentation":"

The region filter action.

" + } + }, + "documentation":"

A Regex filter configured in a guardrail.

" + }, + "GuardrailRegexFilterList":{ + "type":"list", + "member":{"shape":"GuardrailRegexFilter"} + }, + "GuardrailSensitiveInformationPolicyAction":{ + "type":"string", + "enum":[ + "ANONYMIZED", + "BLOCKED" + ] + }, + "GuardrailSensitiveInformationPolicyAssessment":{ + "type":"structure", + "required":[ + "piiEntities", + "regexes" + ], + "members":{ + "piiEntities":{ + "shape":"GuardrailPiiEntityFilterList", + "documentation":"

The PII entities in the assessment.

" + }, + "regexes":{ + "shape":"GuardrailRegexFilterList", + "documentation":"

The regex queries in the assessment.

" + } + }, + "documentation":"

The assessment for aPersonally Identifiable Information (PII) policy.

" + }, + "GuardrailSensitiveInformationPolicyFreeUnitsProcessed":{ + "type":"integer", + "box":true + }, + "GuardrailSensitiveInformationPolicyUnitsProcessed":{ + "type":"integer", + "box":true + }, + "GuardrailStreamConfiguration":{ + "type":"structure", + "required":[ + "guardrailIdentifier", + "guardrailVersion" + ], + "members":{ + "guardrailIdentifier":{ + "shape":"GuardrailIdentifier", + "documentation":"

The identifier for the guardrail.

" + }, + "guardrailVersion":{ + "shape":"GuardrailVersion", + "documentation":"

The version of the guardrail.

" + }, + "trace":{ + "shape":"GuardrailTrace", + "documentation":"

The trace behavior for the guardrail.

" + }, + "streamProcessingMode":{ + "shape":"GuardrailStreamProcessingMode", + "documentation":"

The processing mode.

The processing mode. For more information, see Configure streaming response behavior in the Amazon Bedrock User Guide.

" + } + }, + "documentation":"

Configuration information for a guardrail that you use with the ConverseStream action.

" + }, + "GuardrailStreamProcessingMode":{ + "type":"string", + "enum":[ + "sync", + "async" + ] + }, + "GuardrailTextBlock":{ + "type":"structure", + "required":["text"], + "members":{ + "text":{ + "shape":"String", + "documentation":"

The input text details to be evaluated by the guardrail.

" + }, + "qualifiers":{ + "shape":"GuardrailContentQualifierList", + "documentation":"

The qualifiers describing the text block.

" + } + }, + "documentation":"

The text block to be evaluated by the guardrail.

" + }, + "GuardrailTopic":{ + "type":"structure", + "required":[ + "name", + "type", + "action" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name for the guardrail.

" + }, + "type":{ + "shape":"GuardrailTopicType", + "documentation":"

The type behavior that the guardrail should perform when the model detects the topic.

" + }, + "action":{ + "shape":"GuardrailTopicPolicyAction", + "documentation":"

The action the guardrail should take when it intervenes on a topic.

" + } }, - "documentation":"

The request is denied because of missing access permissions.

", - "error":{ - "httpStatusCode":403, - "senderFault":true + "documentation":"

Information about a topic guardrail.

" + }, + "GuardrailTopicList":{ + "type":"list", + "member":{"shape":"GuardrailTopic"} + }, + "GuardrailTopicPolicyAction":{ + "type":"string", + "enum":["BLOCKED"] + }, + "GuardrailTopicPolicyAssessment":{ + "type":"structure", + "required":["topics"], + "members":{ + "topics":{ + "shape":"GuardrailTopicList", + "documentation":"

The topics in the assessment.

" + } }, - "exception":true + "documentation":"

A behavior assessment of a topic policy.

" }, - "Body":{ - "type":"blob", - "max":25000000, - "min":0, - "sensitive":true + "GuardrailTopicPolicyUnitsProcessed":{ + "type":"integer", + "box":true }, - "GuardrailIdentifier":{ + "GuardrailTopicType":{ "type":"string", - "max":2048, - "min":0, - "pattern":"(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))" + "enum":["DENY"] + }, + "GuardrailTrace":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, + "GuardrailTraceAssessment":{ + "type":"structure", + "members":{ + "modelOutput":{ + "shape":"ModelOutputs", + "documentation":"

The output from the model.

" + }, + "inputAssessment":{ + "shape":"GuardrailAssessmentMap", + "documentation":"

The input assessment.

" + }, + "outputAssessments":{ + "shape":"GuardrailAssessmentListMap", + "documentation":"

the output assessments.

" + } + }, + "documentation":"

A Top level guardrail trace object. For more information, see ConverseTrace.

" + }, + "GuardrailUsage":{ + "type":"structure", + "required":[ + "topicPolicyUnits", + "contentPolicyUnits", + "wordPolicyUnits", + "sensitiveInformationPolicyUnits", + "sensitiveInformationPolicyFreeUnits", + "contextualGroundingPolicyUnits" + ], + "members":{ + "topicPolicyUnits":{ + "shape":"GuardrailTopicPolicyUnitsProcessed", + "documentation":"

The topic policy units processed by the guardrail.

" + }, + "contentPolicyUnits":{ + "shape":"GuardrailContentPolicyUnitsProcessed", + "documentation":"

The content policy units processed by the guardrail.

" + }, + "wordPolicyUnits":{ + "shape":"GuardrailWordPolicyUnitsProcessed", + "documentation":"

The word policy units processed by the guardrail.

" + }, + "sensitiveInformationPolicyUnits":{ + "shape":"GuardrailSensitiveInformationPolicyUnitsProcessed", + "documentation":"

The sensitive information policy units processed by the guardrail.

" + }, + "sensitiveInformationPolicyFreeUnits":{ + "shape":"GuardrailSensitiveInformationPolicyFreeUnitsProcessed", + "documentation":"

The sensitive information policy free units processed by the guardrail.

" + }, + "contextualGroundingPolicyUnits":{ + "shape":"GuardrailContextualGroundingPolicyUnitsProcessed", + "documentation":"

The contextual grounding policy units processed by the guardrail.

" + } + }, + "documentation":"

The details on the use of the guardrail.

" }, "GuardrailVersion":{ "type":"string", "pattern":"(([1-9][0-9]{0,7})|(DRAFT))" }, + "GuardrailWordPolicyAction":{ + "type":"string", + "enum":["BLOCKED"] + }, + "GuardrailWordPolicyAssessment":{ + "type":"structure", + "required":[ + "customWords", + "managedWordLists" + ], + "members":{ + "customWords":{ + "shape":"GuardrailCustomWordList", + "documentation":"

Custom words in the assessment.

" + }, + "managedWordLists":{ + "shape":"GuardrailManagedWordList", + "documentation":"

Managed word lists in the assessment.

" + } + }, + "documentation":"

The word policy assessment.

" + }, + "GuardrailWordPolicyUnitsProcessed":{ + "type":"integer", + "box":true + }, + "ImageBlock":{ + "type":"structure", + "required":[ + "format", + "source" + ], + "members":{ + "format":{ + "shape":"ImageFormat", + "documentation":"

The format of the image.

" + }, + "source":{ + "shape":"ImageSource", + "documentation":"

The source for the image.

" + } + }, + "documentation":"

Image content for a message.

" + }, + "ImageFormat":{ + "type":"string", + "enum":[ + "png", + "jpeg", + "gif", + "webp" + ] + }, + "ImageSource":{ + "type":"structure", + "members":{ + "bytes":{ + "shape":"ImageSourceBytesBlob", + "documentation":"

The raw image bytes for the image. If you use an AWS SDK, you don't need to encode the image bytes in base64.

" + } + }, + "documentation":"

The source for an image.

", + "union":true + }, + "ImageSourceBytesBlob":{ + "type":"blob", + "min":1 + }, + "InferenceConfiguration":{ + "type":"structure", + "members":{ + "maxTokens":{ + "shape":"InferenceConfigurationMaxTokensInteger", + "documentation":"

The maximum number of tokens to allow in the generated response. The default value is the maximum allowed value for the model that you are using. For more information, see Inference parameters for foundation models.

" + }, + "temperature":{ + "shape":"InferenceConfigurationTemperatureFloat", + "documentation":"

The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options.

The default value is the default value for the model that you are using. For more information, see Inference parameters for foundation models.

" + }, + "topP":{ + "shape":"InferenceConfigurationTopPFloat", + "documentation":"

The percentage of most-likely candidates that the model considers for the next token. For example, if you choose a value of 0.8 for topP, the model selects from the top 80% of the probability distribution of tokens that could be next in the sequence.

The default value is the default value for the model that you are using. For more information, see Inference parameters for foundation models.

" + }, + "stopSequences":{ + "shape":"InferenceConfigurationStopSequencesList", + "documentation":"

A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating the response.

" + } + }, + "documentation":"

Base inference parameters to pass to a model in a call to Converse or ConverseStream. For more information, see Inference parameters for foundation models.

If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field in the call to Converse or ConverseStream. For more information, see Model parameters.

" + }, + "InferenceConfigurationMaxTokensInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "InferenceConfigurationStopSequencesList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":4, + "min":0 + }, + "InferenceConfigurationTemperatureFloat":{ + "type":"float", + "box":true, + "max":1, + "min":0 + }, + "InferenceConfigurationTopPFloat":{ + "type":"float", + "box":true, + "max":1, + "min":0 + }, "InternalServerException":{ "type":"structure", "members":{ @@ -112,11 +1459,11 @@ "members":{ "body":{ "shape":"Body", - "documentation":"

The prompt and inference parameters in the format specified in the contentType in the header. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide.

" + "documentation":"

The prompt and inference parameters in the format specified in the contentType in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide.

" }, "contentType":{ "shape":"MimeType", - "documentation":"

The MIME type of the input data in the request. The default value is application/json.

", + "documentation":"

The MIME type of the input data in the request. You must specify application/json.

", "location":"header", "locationName":"Content-Type" }, @@ -182,11 +1529,11 @@ "members":{ "body":{ "shape":"Body", - "documentation":"

The prompt and inference parameters in the format specified in the contentType in the header. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide.

" + "documentation":"

The prompt and inference parameters in the format specified in the contentType in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide.

" }, "contentType":{ "shape":"MimeType", - "documentation":"

The MIME type of the input data in the request. The default value is application/json.

", + "documentation":"

The MIME type of the input data in the request. You must specify application/json.

", "location":"header", "locationName":"Content-Type" }, @@ -243,6 +1590,60 @@ }, "payload":"body" }, + "Long":{ + "type":"long", + "box":true + }, + "Message":{ + "type":"structure", + "required":[ + "role", + "content" + ], + "members":{ + "role":{ + "shape":"ConversationRole", + "documentation":"

The role that the message plays in the message.

" + }, + "content":{ + "shape":"ContentBlocks", + "documentation":"

The message content. Note the following restrictions:

  • You can include up to 20 images. Each image's size, height, and width must be no more than 3.75 MB, 8000 px, and 8000 px, respectively.

  • You can include up to five documents. Each document's size must be no more than 4.5 MB.

  • If you include a ContentBlock with a document field in the array, you must also include a ContentBlock with a text field.

  • You can only include images and documents if the role is user.

" + } + }, + "documentation":"

A message input, or returned from, a call to Converse or ConverseStream.

" + }, + "MessageStartEvent":{ + "type":"structure", + "required":["role"], + "members":{ + "role":{ + "shape":"ConversationRole", + "documentation":"

The role for the message.

" + } + }, + "documentation":"

The start of a message.

", + "event":true + }, + "MessageStopEvent":{ + "type":"structure", + "required":["stopReason"], + "members":{ + "stopReason":{ + "shape":"StopReason", + "documentation":"

The reason why the model stopped generating output.

" + }, + "additionalModelResponseFields":{ + "shape":"Document", + "documentation":"

The additional model response fields.

" + } + }, + "documentation":"

The stop event for a message.

", + "event":true + }, + "Messages":{ + "type":"list", + "member":{"shape":"Message"} + }, "MimeType":{"type":"string"}, "ModelErrorException":{ "type":"structure", @@ -276,6 +1677,10 @@ }, "exception":true }, + "ModelOutputs":{ + "type":"list", + "member":{"shape":"GuardrailOutputText"} + }, "ModelStreamErrorException":{ "type":"structure", "members":{ @@ -312,6 +1717,15 @@ "type":"string", "pattern":"[\\s\\S]*" }, + "NonEmptyString":{ + "type":"string", + "min":1 + }, + "NonNegativeInteger":{ + "type":"integer", + "box":true, + "min":0 + }, "PartBody":{ "type":"blob", "max":1000000, @@ -363,12 +1777,13 @@ }, "throttlingException":{ "shape":"ThrottlingException", - "documentation":"

The number or frequency of requests exceeds the limit. Resubmit your request later.

" + "documentation":"

Your request was throttled because of service-wide limitations. Resubmit your request later or in a different region. You can also purchase Provisioned Throughput to increase the rate or number of tokens you can process.

" }, "modelTimeoutException":{ "shape":"ModelTimeoutException", "documentation":"

The request took too long to process. Processing time exceeded the model timeout length.

" - } + }, + "serviceUnavailableException":{"shape":"ServiceUnavailableException"} }, "documentation":"

Definition of content in the response stream.

", "eventstream":true @@ -378,31 +1793,324 @@ "members":{ "message":{"shape":"NonBlankString"} }, - "documentation":"

The number of requests exceeds the service quota. Resubmit your request later.

", + "documentation":"

Your request exceeds the service quota for your account. You can view your quotas at Viewing service quotas. You can resubmit your request later.

", "error":{ "httpStatusCode":400, "senderFault":true }, "exception":true }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

The service isn't currently available. Try again later.

", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "SpecificToolChoice":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"ToolName", + "documentation":"

The name of the tool that the model must request.

" + } + }, + "documentation":"

The model must request a specific tool. For example, {\"tool\" : {\"name\" : \"Your tool name\"}}.

This field is only supported by Anthropic Claude 3 models.

" + }, "StatusCode":{ "type":"integer", "box":true, "max":599, "min":100 }, + "StopReason":{ + "type":"string", + "enum":[ + "end_turn", + "tool_use", + "max_tokens", + "stop_sequence", + "guardrail_intervened", + "content_filtered" + ] + }, + "String":{"type":"string"}, + "SystemContentBlock":{ + "type":"structure", + "members":{ + "text":{ + "shape":"NonEmptyString", + "documentation":"

A system prompt for the model.

" + }, + "guardContent":{ + "shape":"GuardrailConverseContentBlock", + "documentation":"

A content block to assess with the guardrail. Use with the Converse or ConverseStream API operations.

For more information, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.

" + } + }, + "documentation":"

A system content block.

", + "union":true + }, + "SystemContentBlocks":{ + "type":"list", + "member":{"shape":"SystemContentBlock"} + }, "ThrottlingException":{ "type":"structure", "members":{ "message":{"shape":"NonBlankString"} }, - "documentation":"

The number of requests exceeds the limit. Resubmit your request later.

", + "documentation":"

Your request was throttled because of service-wide limitations. Resubmit your request later or in a different region. You can also purchase Provisioned Throughput to increase the rate or number of tokens you can process.

", "error":{ "httpStatusCode":429, "senderFault":true }, "exception":true }, + "TokenUsage":{ + "type":"structure", + "required":[ + "inputTokens", + "outputTokens", + "totalTokens" + ], + "members":{ + "inputTokens":{ + "shape":"TokenUsageInputTokensInteger", + "documentation":"

The number of tokens sent in the request to the model.

" + }, + "outputTokens":{ + "shape":"TokenUsageOutputTokensInteger", + "documentation":"

The number of tokens that the model generated for the request.

" + }, + "totalTokens":{ + "shape":"TokenUsageTotalTokensInteger", + "documentation":"

The total of input tokens and tokens generated by the model.

" + } + }, + "documentation":"

The tokens used in a message API inference call.

" + }, + "TokenUsageInputTokensInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "TokenUsageOutputTokensInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "TokenUsageTotalTokensInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "Tool":{ + "type":"structure", + "members":{ + "toolSpec":{ + "shape":"ToolSpecification", + "documentation":"

The specfication for the tool.

" + } + }, + "documentation":"

Information about a tool that you can use with the Converse API. For more information, see Tool use (function calling) in the Amazon Bedrock User Guide.

", + "union":true + }, + "ToolChoice":{ + "type":"structure", + "members":{ + "auto":{ + "shape":"AutoToolChoice", + "documentation":"

(Default). The Model automatically decides if a tool should be called or whether to generate text instead.

" + }, + "any":{ + "shape":"AnyToolChoice", + "documentation":"

The model must request at least one tool (no text is generated).

" + }, + "tool":{ + "shape":"SpecificToolChoice", + "documentation":"

The Model must request the specified tool. Only supported by Anthropic Claude 3 models.

" + } + }, + "documentation":"

Determines which tools the model should request in a call to Converse or ConverseStream. ToolChoice is only supported by Anthropic Claude 3 models and by Mistral AI Mistral Large.

", + "union":true + }, + "ToolConfiguration":{ + "type":"structure", + "required":["tools"], + "members":{ + "tools":{ + "shape":"ToolConfigurationToolsList", + "documentation":"

An array of tools that you want to pass to a model.

" + }, + "toolChoice":{ + "shape":"ToolChoice", + "documentation":"

If supported by model, forces the model to request a tool.

" + } + }, + "documentation":"

Configuration information for the tools that you pass to a model. For more information, see Tool use (function calling) in the Amazon Bedrock User Guide.

This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.

" + }, + "ToolConfigurationToolsList":{ + "type":"list", + "member":{"shape":"Tool"}, + "min":1 + }, + "ToolInputSchema":{ + "type":"structure", + "members":{ + "json":{ + "shape":"Document", + "documentation":"

The JSON schema for the tool. For more information, see JSON Schema Reference.

" + } + }, + "documentation":"

The schema for the tool. The top level schema type must be object.

", + "union":true + }, + "ToolName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9_]*" + }, + "ToolResultBlock":{ + "type":"structure", + "required":[ + "toolUseId", + "content" + ], + "members":{ + "toolUseId":{ + "shape":"ToolUseId", + "documentation":"

The ID of the tool request that this is the result for.

" + }, + "content":{ + "shape":"ToolResultContentBlocks", + "documentation":"

The content for tool result content block.

" + }, + "status":{ + "shape":"ToolResultStatus", + "documentation":"

The status for the tool result content block.

This field is only supported Anthropic Claude 3 models.

" + } + }, + "documentation":"

A tool result block that contains the results for a tool request that the model previously made.

" + }, + "ToolResultContentBlock":{ + "type":"structure", + "members":{ + "json":{ + "shape":"Document", + "documentation":"

A tool result that is JSON format data.

" + }, + "text":{ + "shape":"String", + "documentation":"

A tool result that is text.

" + }, + "image":{ + "shape":"ImageBlock", + "documentation":"

A tool result that is an image.

This field is only supported by Anthropic Claude 3 models.

" + }, + "document":{ + "shape":"DocumentBlock", + "documentation":"

A tool result that is a document.

" + } + }, + "documentation":"

The tool result content block.

", + "union":true + }, + "ToolResultContentBlocks":{ + "type":"list", + "member":{"shape":"ToolResultContentBlock"} + }, + "ToolResultStatus":{ + "type":"string", + "enum":[ + "success", + "error" + ] + }, + "ToolSpecification":{ + "type":"structure", + "required":[ + "name", + "inputSchema" + ], + "members":{ + "name":{ + "shape":"ToolName", + "documentation":"

The name for the tool.

" + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

The description for the tool.

" + }, + "inputSchema":{ + "shape":"ToolInputSchema", + "documentation":"

The input schema for the tool in JSON format.

" + } + }, + "documentation":"

The specification for the tool.

" + }, + "ToolUseBlock":{ + "type":"structure", + "required":[ + "toolUseId", + "name", + "input" + ], + "members":{ + "toolUseId":{ + "shape":"ToolUseId", + "documentation":"

The ID for the tool request.

" + }, + "name":{ + "shape":"ToolName", + "documentation":"

The name of the tool that the model wants to use.

" + }, + "input":{ + "shape":"Document", + "documentation":"

The input to pass to the tool.

" + } + }, + "documentation":"

A tool use content block. Contains information about a tool that the model is requesting be run., The model uses the result from the tool to generate a response.

" + }, + "ToolUseBlockDelta":{ + "type":"structure", + "required":["input"], + "members":{ + "input":{ + "shape":"String", + "documentation":"

The input for a requested tool.

" + } + }, + "documentation":"

The delta for a tool use block.

" + }, + "ToolUseBlockStart":{ + "type":"structure", + "required":[ + "toolUseId", + "name" + ], + "members":{ + "toolUseId":{ + "shape":"ToolUseId", + "documentation":"

The ID for the tool request.

" + }, + "name":{ + "shape":"ToolName", + "documentation":"

The name of the tool that the model is requesting to use.

" + } + }, + "documentation":"

The start of a tool use block.

" + }, + "ToolUseId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, "Trace":{ "type":"string", "enum":[ diff --git a/botocore/data/bedrock/2023-04-20/paginators-1.json b/botocore/data/bedrock/2023-04-20/paginators-1.json index f9017e8619..8d49059e7c 100644 --- a/botocore/data/bedrock/2023-04-20/paginators-1.json +++ b/botocore/data/bedrock/2023-04-20/paginators-1.json @@ -29,6 +29,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "guardrails" + }, + "ListModelCopyJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "modelCopyJobSummaries" } } } diff --git a/botocore/data/bedrock/2023-04-20/service-2.json b/botocore/data/bedrock/2023-04-20/service-2.json index 694f91927d..03c4c77137 100644 --- a/botocore/data/bedrock/2023-04-20/service-2.json +++ b/botocore/data/bedrock/2023-04-20/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2023-04-20", + "auth":["aws.auth#sigv4"], "endpointPrefix":"bedrock", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Bedrock", "serviceId":"Bedrock", "signatureVersion":"v4", @@ -30,7 +31,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and model evaluation jobs that use human workers. To learn more about the requirements for creating a model evaluation job see, Model evaluations.

", + "documentation":"

API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and model evaluation jobs that use human workers. To learn more about the requirements for creating a model evaluation job see, Model evaluation.

", "idempotent":true }, "CreateGuardrail":{ @@ -52,7 +53,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates a guardrail to block topics and to filter out harmful content.

  • Specify a name and optional description.

  • Specify messages for when the guardrail successfully blocks a prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields.

  • Specify topics for the guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the topicsConfig list pertains to one topic.

    • Give a name and description so that the guardrail can properly identify the topic.

    • Specify DENY in the type field.

    • (Optional) Provide up to five prompts that you would categorize as belonging to the topic in the examples list.

  • Specify filter strengths for the harmful categories defined in Amazon Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful category. For more information, see Content filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig.

    • Specify the category in the type field.

    • Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig.

  • (Optional) For security, include the ARN of a KMS key in the kmsKeyId field.

  • (Optional) Attach any tags to the guardrail in the tags object. For more information, see Tag resources.

" + "documentation":"

Creates a guardrail to block topics and to implement safeguards for your generative AI applications.

You can configure the following policies in a guardrail to avoid undesirable and harmful content, filter out denied topics and words, and remove sensitive information for privacy protection.

  • Content filters - Adjust filter strengths to block input prompts or model responses containing harmful content.

  • Denied topics - Define a set of topics that are undesirable in the context of your application. These topics will be blocked if detected in user queries or model responses.

  • Word filters - Configure filters to block undesirable words, phrases, and profanity. Such words can include offensive terms, competitor names etc.

  • Sensitive information filters - Block or mask sensitive information such as personally identifiable information (PII) or custom regex in user inputs and model responses.

In addition to the above policies, you can also configure the messages to be returned to the user if a user input or model response is in violation of the policies defined in the guardrail.

For more information, see Guardrails for Amazon Bedrock in the Amazon Bedrock User Guide.

" }, "CreateGuardrailVersion":{ "name":"CreateGuardrailVersion", @@ -74,6 +75,24 @@ ], "documentation":"

Creates a version of the guardrail. Use this API to create a snapshot of the guardrail when you are satisfied with a configuration, or to compare the configuration with another version.

" }, + "CreateModelCopyJob":{ + "name":"CreateModelCopyJob", + "http":{ + "method":"POST", + "requestUri":"/model-copy-jobs", + "responseCode":201 + }, + "input":{"shape":"CreateModelCopyJobRequest"}, + "output":{"shape":"CreateModelCopyJobResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"TooManyTagsException"} + ], + "documentation":"

Copies a model to another region so that it can be used there. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.

", + "idempotent":true + }, "CreateModelCustomizationJob":{ "name":"CreateModelCustomizationJob", "http":{ @@ -93,7 +112,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates a fine-tuning job to customize a base model.

You specify the base foundation model and the location of the training data. After the model-customization job completes successfully, your custom model resource will be ready to use. Amazon Bedrock returns validation loss metrics and output generations after the job completes.

For information on the format of training and validation data, see Prepare the datasets.

Model-customization jobs are asynchronous and the completion time depends on the base model and the training/validation data size. To monitor a job, use the GetModelCustomizationJob operation to retrieve the job status.

For more information, see Custom models in the Amazon Bedrock User Guide.

", + "documentation":"

Creates a fine-tuning job to customize a base model.

You specify the base foundation model and the location of the training data. After the model-customization job completes successfully, your custom model resource will be ready to use. Amazon Bedrock returns validation loss metrics and output generations after the job completes.

For information on the format of training and validation data, see Prepare the datasets.

Model-customization jobs are asynchronous and the completion time depends on the base model and the training/validation data size. To monitor a job, use the GetModelCustomizationJob operation to retrieve the job status.

For more information, see Custom models in the Amazon Bedrock User Guide.

", "idempotent":true }, "CreateProvisionedModelThroughput":{ @@ -114,7 +133,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates dedicated throughput for a base or custom model with the model units and for the duration that you specify. For pricing details, see Amazon Bedrock Pricing. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", + "documentation":"

Creates dedicated throughput for a base or custom model with the model units and for the duration that you specify. For pricing details, see Amazon Bedrock Pricing. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", "idempotent":true }, "DeleteCustomModel":{ @@ -134,7 +153,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes a custom model that you created earlier. For more information, see Custom models in the Amazon Bedrock User Guide.

", + "documentation":"

Deletes a custom model that you created earlier. For more information, see Custom models in the Amazon Bedrock User Guide.

", "idempotent":true }, "DeleteGuardrail":{ @@ -191,7 +210,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes a Provisioned Throughput. You can't delete a Provisioned Throughput before the commitment term is over. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", + "documentation":"

Deletes a Provisioned Throughput. You can't delete a Provisioned Throughput before the commitment term is over. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", "idempotent":true }, "GetCustomModel":{ @@ -210,7 +229,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Get the properties associated with a Amazon Bedrock custom model that you have created.For more information, see Custom models in the Amazon Bedrock User Guide.

" + "documentation":"

Get the properties associated with a Amazon Bedrock custom model that you have created.For more information, see Custom models in the Amazon Bedrock User Guide.

" }, "GetEvaluationJob":{ "name":"GetEvaluationJob", @@ -228,7 +247,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves the properties associated with a model evaluation job, including the status of the job. For more information, see Model evaluations.

" + "documentation":"

Retrieves the properties associated with a model evaluation job, including the status of the job. For more information, see Model evaluation.

" }, "GetFoundationModel":{ "name":"GetFoundationModel", @@ -266,6 +285,24 @@ ], "documentation":"

Gets details about a guardrail. If you don't specify a version, the response returns details for the DRAFT version.

" }, + "GetModelCopyJob":{ + "name":"GetModelCopyJob", + "http":{ + "method":"GET", + "requestUri":"/model-copy-jobs/{jobArn}", + "responseCode":200 + }, + "input":{"shape":"GetModelCopyJobRequest"}, + "output":{"shape":"GetModelCopyJobResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves information about a model copy job. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.

" + }, "GetModelCustomizationJob":{ "name":"GetModelCustomizationJob", "http":{ @@ -282,7 +319,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves the properties associated with a model-customization job, including the status of the job. For more information, see Custom models in the Amazon Bedrock User Guide.

" + "documentation":"

Retrieves the properties associated with a model-customization job, including the status of the job. For more information, see Custom models in the Amazon Bedrock User Guide.

" }, "GetModelInvocationLoggingConfiguration":{ "name":"GetModelInvocationLoggingConfiguration", @@ -316,7 +353,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns details for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

" + "documentation":"

Returns details for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

" }, "ListCustomModels":{ "name":"ListCustomModels", @@ -333,7 +370,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns a list of the custom models that you have created with the CreateModelCustomizationJob operation.

For more information, see Custom models in the Amazon Bedrock User Guide.

" + "documentation":"

Returns a list of the custom models that you have created with the CreateModelCustomizationJob operation.

For more information, see Custom models in the Amazon Bedrock User Guide.

" }, "ListEvaluationJobs":{ "name":"ListEvaluationJobs", @@ -367,7 +404,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists Amazon Bedrock foundation models that you can use. You can filter the results with the request parameters. For more information, see Foundation models in the Amazon Bedrock User Guide.

" + "documentation":"

Lists Amazon Bedrock foundation models that you can use. You can filter the results with the request parameters. For more information, see Foundation models in the Amazon Bedrock User Guide.

" }, "ListGuardrails":{ "name":"ListGuardrails", @@ -387,6 +424,24 @@ ], "documentation":"

Lists details about all the guardrails in an account. To list the DRAFT version of all your guardrails, don't specify the guardrailIdentifier field. To list all versions of a guardrail, specify the ARN of the guardrail in the guardrailIdentifier field.

You can set the maximum number of results to return in a response in the maxResults field. If there are more results than the number you set, the response returns a nextToken that you can send in another ListGuardrails request to see the next batch of results.

" }, + "ListModelCopyJobs":{ + "name":"ListModelCopyJobs", + "http":{ + "method":"GET", + "requestUri":"/model-copy-jobs", + "responseCode":200 + }, + "input":{"shape":"ListModelCopyJobsRequest"}, + "output":{"shape":"ListModelCopyJobsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of model copy jobs that you have submitted. You can filter the jobs to return based on one or more criteria. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.

" + }, "ListModelCustomizationJobs":{ "name":"ListModelCustomizationJobs", "http":{ @@ -402,7 +457,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns a list of model customization jobs that you have submitted. You can filter the jobs to return based on one or more criteria.

For more information, see Custom models in the Amazon Bedrock User Guide.

" + "documentation":"

Returns a list of model customization jobs that you have submitted. You can filter the jobs to return based on one or more criteria.

For more information, see Custom models in the Amazon Bedrock User Guide.

" }, "ListProvisionedModelThroughputs":{ "name":"ListProvisionedModelThroughputs", @@ -419,7 +474,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists the Provisioned Throughputs in the account. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

" + "documentation":"

Lists the Provisioned Throughputs in the account. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -437,7 +492,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

List the tags associated with the specified resource.

For more information, see Tagging resources in the Amazon Bedrock User Guide.

" + "documentation":"

List the tags associated with the specified resource.

For more information, see Tagging resources in the Amazon Bedrock User Guide.

" }, "PutModelInvocationLoggingConfiguration":{ "name":"PutModelInvocationLoggingConfiguration", @@ -493,7 +548,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Stops an active model customization job. For more information, see Custom models in the Amazon Bedrock User Guide.

", + "documentation":"

Stops an active model customization job. For more information, see Custom models in the Amazon Bedrock User Guide.

", "idempotent":true }, "TagResource":{ @@ -513,7 +568,7 @@ {"shape":"TooManyTagsException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

" + "documentation":"

Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

" }, "UntagResource":{ "name":"UntagResource", @@ -531,7 +586,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Remove one or more tags from a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

" + "documentation":"

Remove one or more tags from a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

" }, "UpdateGuardrail":{ "name":"UpdateGuardrail", @@ -551,7 +606,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates a guardrail with the values you specify.

  • Specify a name and optional description.

  • Specify messages for when the guardrail successfully blocks a prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields.

  • Specify topics for the guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the topicsConfig list pertains to one topic.

    • Give a name and description so that the guardrail can properly identify the topic.

    • Specify DENY in the type field.

    • (Optional) Provide up to five prompts that you would categorize as belonging to the topic in the examples list.

  • Specify filter strengths for the harmful categories defined in Amazon Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful category. For more information, see Content filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig.

    • Specify the category in the type field.

    • Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig.

  • (Optional) For security, include the ARN of a KMS key in the kmsKeyId field.

  • (Optional) Attach any tags to the guardrail in the tags object. For more information, see Tag resources.

", + "documentation":"

Updates a guardrail with the values you specify.

  • Specify a name and optional description.

  • Specify messages for when the guardrail successfully blocks a prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields.

  • Specify topics for the guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the topicsConfig list pertains to one topic.

    • Give a name and description so that the guardrail can properly identify the topic.

    • Specify DENY in the type field.

    • (Optional) Provide up to five prompts that you would categorize as belonging to the topic in the examples list.

  • Specify filter strengths for the harmful categories defined in Amazon Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful category. For more information, see Content filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig.

    • Specify the category in the type field.

    • Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig.

  • (Optional) For security, include the ARN of a KMS key in the kmsKeyId field.

", "idempotent":true }, "UpdateProvisionedModelThroughput":{ @@ -570,7 +625,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates the name or associated model for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", + "documentation":"

Updates the name or associated model for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", "idempotent":true } }, @@ -587,6 +642,10 @@ }, "exception":true }, + "AccountId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, "AutomatedEvaluationConfig":{ "type":"structure", "required":["datasetMetricConfigs"], @@ -757,6 +816,10 @@ "shape":"GuardrailSensitiveInformationPolicyConfig", "documentation":"

The sensitive information policy to configure for the guardrail.

" }, + "contextualGroundingPolicyConfig":{ + "shape":"GuardrailContextualGroundingPolicyConfig", + "documentation":"

The contextual grounding policy configuration used to create a guardrail.

" + }, "blockedInputMessaging":{ "shape":"GuardrailBlockedMessaging", "documentation":"

The message to return when the guardrail blocks a prompt.

" @@ -795,11 +858,11 @@ }, "guardrailArn":{ "shape":"GuardrailArn", - "documentation":"

The ARN of the guardrail that was created.

" + "documentation":"

The ARN of the guardrail.

" }, "version":{ "shape":"GuardrailDraftVersion", - "documentation":"

The version of the guardrail that was created. This value should be 1.

" + "documentation":"

The version of the guardrail that was created. This value will always be DRAFT.

" }, "createdAt":{ "shape":"Timestamp", @@ -813,7 +876,7 @@ "members":{ "guardrailIdentifier":{ "shape":"GuardrailIdentifier", - "documentation":"

The unique identifier of the guardrail.

", + "documentation":"

The unique identifier of the guardrail. This can be an ID or the ARN.

", "location":"uri", "locationName":"guardrailIdentifier" }, @@ -845,6 +908,46 @@ } } }, + "CreateModelCopyJobRequest":{ + "type":"structure", + "required":[ + "sourceModelArn", + "targetModelName" + ], + "members":{ + "sourceModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the model to be copied.

" + }, + "targetModelName":{ + "shape":"CustomModelName", + "documentation":"

A name for the copied model.

" + }, + "modelKmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The ARN of the KMS key that you use to encrypt the model copy.

" + }, + "targetModelTags":{ + "shape":"TagList", + "documentation":"

Tags to associate with the target model. For more information, see Tag resources in the Amazon Bedrock User Guide.

" + }, + "clientRequestToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "idempotencyToken":true + } + } + }, + "CreateModelCopyJobResponse":{ + "type":"structure", + "required":["jobArn"], + "members":{ + "jobArn":{ + "shape":"ModelCopyJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the model copy job.

" + } + } + }, "CreateModelCustomizationJobRequest":{ "type":"structure", "required":[ @@ -941,7 +1044,7 @@ }, "modelUnits":{ "shape":"PositiveInteger", - "documentation":"

Number of model units to allocate. A model unit delivers a specific throughput level for the specified model. The throughput level of a model unit specifies the total number of input and output tokens that it can process and generate within a span of one minute. By default, your account has no model units for purchasing Provisioned Throughputs with commitment. You must first visit the Amazon Web Services support center to request MUs.

For model unit quotas, see Provisioned Throughput quotas in the Amazon Bedrock User Guide.

For more information about what an MU specifies, contact your Amazon Web Services account manager.

" + "documentation":"

Number of model units to allocate. A model unit delivers a specific throughput level for the specified model. The throughput level of a model unit specifies the total number of input and output tokens that it can process and generate within a span of one minute. By default, your account has no model units for purchasing Provisioned Throughputs with commitment. You must first visit the Amazon Web Services support center to request MUs.

For model unit quotas, see Provisioned Throughput quotas in the Amazon Bedrock User Guide.

For more information about what an MU specifies, contact your Amazon Web Services account manager.

" }, "provisionedModelName":{ "shape":"ProvisionedModelName", @@ -949,11 +1052,11 @@ }, "modelId":{ "shape":"ModelIdentifier", - "documentation":"

The Amazon Resource Name (ARN) or name of the model to associate with this Provisioned Throughput. For a list of models for which you can purchase Provisioned Throughput, see Amazon Bedrock model IDs for purchasing Provisioned Throughput in the Amazon Bedrock User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) or name of the model to associate with this Provisioned Throughput. For a list of models for which you can purchase Provisioned Throughput, see Amazon Bedrock model IDs for purchasing Provisioned Throughput in the Amazon Bedrock User Guide.

" }, "commitmentDuration":{ "shape":"CommitmentDuration", - "documentation":"

The commitment duration requested for the Provisioned Throughput. Billing occurs hourly and is discounted for longer commitment terms. To request a no-commit Provisioned Throughput, omit this field.

Custom models support all levels of commitment. To see which base models support no commitment, see Supported regions and models for Provisioned Throughput in the Amazon Bedrock User Guide

" + "documentation":"

The commitment duration requested for the Provisioned Throughput. Billing occurs hourly and is discounted for longer commitment terms. To request a no-commit Provisioned Throughput, omit this field.

Custom models support all levels of commitment. To see which base models support no commitment, see Supported regions and models for Provisioned Throughput in the Amazon Bedrock User Guide

" }, "tags":{ "shape":"TagList", @@ -981,7 +1084,7 @@ "type":"string", "max":63, "min":1, - "pattern":"([0-9a-zA-Z][_-]?)+" + "pattern":"([0-9a-zA-Z][_-]?){1,63}" }, "CustomModelSummary":{ "type":"structure", @@ -1016,6 +1119,10 @@ "customizationType":{ "shape":"CustomizationType", "documentation":"

Specifies whether to carry out continued pre-training of a model or whether to fine-tune it. For more information, see Custom models.

" + }, + "ownerAccountId":{ + "shape":"AccountId", + "documentation":"

The unique identifier of the account that owns the model.

" } }, "documentation":"

Summary information for a custom model.

" @@ -1054,7 +1161,7 @@ "members":{ "guardrailIdentifier":{ "shape":"GuardrailIdentifier", - "documentation":"

The unique identifier of the guardrail.

", + "documentation":"

The unique identifier of the guardrail. This can be an ID or the ARN.

", "location":"uri", "locationName":"guardrailIdentifier" }, @@ -1723,7 +1830,7 @@ "members":{ "guardrailIdentifier":{ "shape":"GuardrailIdentifier", - "documentation":"

The unique identifier of the guardrail for which to get details.

", + "documentation":"

The unique identifier of the guardrail for which to get details. This can be an ID or the ARN.

", "location":"uri", "locationName":"guardrailIdentifier" }, @@ -1763,7 +1870,7 @@ }, "guardrailArn":{ "shape":"GuardrailArn", - "documentation":"

The ARN of the guardrail that was created.

" + "documentation":"

The ARN of the guardrail.

" }, "version":{ "shape":"GuardrailVersion", @@ -1789,6 +1896,10 @@ "shape":"GuardrailSensitiveInformationPolicy", "documentation":"

The sensitive information policy that was configured for the guardrail.

" }, + "contextualGroundingPolicy":{ + "shape":"GuardrailContextualGroundingPolicy", + "documentation":"

The contextual grounding policy used in the guardrail.

" + }, "createdAt":{ "shape":"Timestamp", "documentation":"

The date and time at which the guardrail was created.

" @@ -1819,6 +1930,75 @@ } } }, + "GetModelCopyJobRequest":{ + "type":"structure", + "required":["jobArn"], + "members":{ + "jobArn":{ + "shape":"ModelCopyJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the model copy job.

", + "location":"uri", + "locationName":"jobArn" + } + } + }, + "GetModelCopyJobResponse":{ + "type":"structure", + "required":[ + "jobArn", + "status", + "creationTime", + "targetModelArn", + "sourceAccountId", + "sourceModelArn" + ], + "members":{ + "jobArn":{ + "shape":"ModelCopyJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the model copy job.

" + }, + "status":{ + "shape":"ModelCopyJobStatus", + "documentation":"

The status of the model copy job.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the model copy job was created.

" + }, + "targetModelArn":{ + "shape":"CustomModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the copied model.

" + }, + "targetModelName":{ + "shape":"CustomModelName", + "documentation":"

The name of the copied model.

" + }, + "sourceAccountId":{ + "shape":"AccountId", + "documentation":"

The unique identifier of the account that the model being copied originated from.

" + }, + "sourceModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the original model being copied.

" + }, + "targetModelKmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key encrypting the copied model.

" + }, + "targetModelTags":{ + "shape":"TagList", + "documentation":"

The tags associated with the copied model.

" + }, + "failureMessage":{ + "shape":"ErrorMessage", + "documentation":"

An error message for why the model copy job failed.

" + }, + "sourceModelName":{ + "shape":"CustomModelName", + "documentation":"

The name of the original model being copied.

" + } + } + }, "GetModelCustomizationJobRequest":{ "type":"structure", "required":["jobIdentifier"], @@ -2083,7 +2263,7 @@ "documentation":"

The strength of the content filter to apply to model responses. As you increase the filter strength, the likelihood of filtering harmful content increases and the probability of seeing harmful content in your application reduces.

" } }, - "documentation":"

Contains filter strengths for harmful content. Guardrails support the following content filters to detect and filter harmful user inputs and FM-generated outputs.

  • Hate – Describes language or a statement that discriminates, criticizes, insults, denounces, or dehumanizes a person or group on the basis of an identity (such as race, ethnicity, gender, religion, sexual orientation, ability, and national origin).

  • Insults – Describes language or a statement that includes demeaning, humiliating, mocking, insulting, or belittling language. This type of language is also labeled as bullying.

  • Sexual – Describes language or a statement that indicates sexual interest, activity, or arousal using direct or indirect references to body parts, physical traits, or sex.

  • Violence – Describes language or a statement that includes glorification of or threats to inflict physical pain, hurt, or injury toward a person, group or thing.

Content filtering depends on the confidence classification of user inputs and FM responses across each of the four harmful categories. All input and output statements are classified into one of four confidence levels (NONE, LOW, MEDIUM, HIGH) for each harmful category. For example, if a statement is classified as Hate with HIGH confidence, the likelihood of the statement representing hateful content is high. A single statement can be classified across multiple categories with varying confidence levels. For example, a single statement can be classified as Hate with HIGH confidence, Insults with LOW confidence, Sexual with NONE confidence, and Violence with MEDIUM confidence.

For more information, see Guardrails content filters.

This data type is used in the following API operations:

" + "documentation":"

Contains filter strengths for harmful content. Guardrails support the following content filters to detect and filter harmful user inputs and FM-generated outputs.

  • Hate – Describes language or a statement that discriminates, criticizes, insults, denounces, or dehumanizes a person or group on the basis of an identity (such as race, ethnicity, gender, religion, sexual orientation, ability, and national origin).

  • Insults – Describes language or a statement that includes demeaning, humiliating, mocking, insulting, or belittling language. This type of language is also labeled as bullying.

  • Sexual – Describes language or a statement that indicates sexual interest, activity, or arousal using direct or indirect references to body parts, physical traits, or sex.

  • Violence – Describes language or a statement that includes glorification of or threats to inflict physical pain, hurt, or injury toward a person, group or thing.

Content filtering depends on the confidence classification of user inputs and FM responses across each of the four harmful categories. All input and output statements are classified into one of four confidence levels (NONE, LOW, MEDIUM, HIGH) for each harmful category. For example, if a statement is classified as Hate with HIGH confidence, the likelihood of the statement representing hateful content is high. A single statement can be classified across multiple categories with varying confidence levels. For example, a single statement can be classified as Hate with HIGH confidence, Insults with LOW confidence, Sexual with NONE confidence, and Violence with MEDIUM confidence.

For more information, see Guardrails content filters.

" }, "GuardrailContentFilterType":{ "type":"string", @@ -2127,7 +2307,92 @@ "documentation":"

Contains the type of the content filter and how strongly it should apply to prompts and model responses.

" } }, - "documentation":"

Contains details about how to handle harmful content.

This data type is used in the following API operations:

" + "documentation":"

Contains details about how to handle harmful content.

" + }, + "GuardrailContextualGroundingFilter":{ + "type":"structure", + "required":[ + "type", + "threshold" + ], + "members":{ + "type":{ + "shape":"GuardrailContextualGroundingFilterType", + "documentation":"

The filter type details for the guardrails contextual grounding filter.

" + }, + "threshold":{ + "shape":"GuardrailContextualGroundingFilterThresholdDouble", + "documentation":"

The threshold details for the guardrails contextual grounding filter.

" + } + }, + "documentation":"

The details for the guardrails contextual grounding filter.

" + }, + "GuardrailContextualGroundingFilterConfig":{ + "type":"structure", + "required":[ + "type", + "threshold" + ], + "members":{ + "type":{ + "shape":"GuardrailContextualGroundingFilterType", + "documentation":"

The filter details for the guardrails contextual grounding filter.

" + }, + "threshold":{ + "shape":"GuardrailContextualGroundingFilterConfigThresholdDouble", + "documentation":"

The threshold details for the guardrails contextual grounding filter.

" + } + }, + "documentation":"

The filter configuration details for the guardrails contextual grounding filter.

" + }, + "GuardrailContextualGroundingFilterConfigThresholdDouble":{ + "type":"double", + "box":true, + "min":0 + }, + "GuardrailContextualGroundingFilterThresholdDouble":{ + "type":"double", + "box":true, + "min":0 + }, + "GuardrailContextualGroundingFilterType":{ + "type":"string", + "enum":[ + "GROUNDING", + "RELEVANCE" + ] + }, + "GuardrailContextualGroundingFilters":{ + "type":"list", + "member":{"shape":"GuardrailContextualGroundingFilter"}, + "min":1 + }, + "GuardrailContextualGroundingFiltersConfig":{ + "type":"list", + "member":{"shape":"GuardrailContextualGroundingFilterConfig"}, + "min":1 + }, + "GuardrailContextualGroundingPolicy":{ + "type":"structure", + "required":["filters"], + "members":{ + "filters":{ + "shape":"GuardrailContextualGroundingFilters", + "documentation":"

The filter details for the guardrails contextual grounding policy.

" + } + }, + "documentation":"

The details for the guardrails contextual grounding policy.

" + }, + "GuardrailContextualGroundingPolicyConfig":{ + "type":"structure", + "required":["filtersConfig"], + "members":{ + "filtersConfig":{ + "shape":"GuardrailContextualGroundingFiltersConfig", + "documentation":"

The filter configuration details for the guardrails contextual grounding policy.

" + } + }, + "documentation":"

The policy configuration details for the guardrails contextual grounding policy.

" }, "GuardrailDescription":{ "type":"string", @@ -2191,7 +2456,7 @@ "documentation":"

ManagedWords$type The managed word type that was configured for the guardrail. (For now, we only offer profanity word list)

" } }, - "documentation":"

The managed word list that was configured for the guardrail. (This is a list of words that are pre-defined and managed by Guardrails only.)

" + "documentation":"

The managed word list that was configured for the guardrail. (This is a list of words that are pre-defined and managed by guardrails only.)

" }, "GuardrailManagedWordsConfig":{ "type":"structure", @@ -2238,7 +2503,7 @@ "members":{ "type":{ "shape":"GuardrailPiiEntityType", - "documentation":"

The type of PII entity. For example, Social Security Number.

" + "documentation":"

The type of PII entity. For exampvle, Social Security Number.

" }, "action":{ "shape":"GuardrailSensitiveInformationAction", @@ -2256,7 +2521,7 @@ "members":{ "type":{ "shape":"GuardrailPiiEntityType", - "documentation":"

Configure guardrail type when the PII entity is detected.

" + "documentation":"

Configure guardrail type when the PII entity is detected.

The following PIIs are used to block or mask sensitive information:

  • General

    • ADDRESS

      A physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12, Building 123\". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood.

    • AGE

      An individual's age, including the quantity and unit of time. For example, in the phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" as an age.

    • NAME

      An individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. guardrails doesn't apply this entity type to names that are part of organizations or addresses. For example, guardrails recognizes the \"John Doe Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.

    • EMAIL

      An email address, such as marymajor@email.com.

    • PHONE

      A phone number. This entity type also includes fax and pager numbers.

    • USERNAME

      A user name that identifies an account, such as a login name, screen name, nick name, or handle.

    • PASSWORD

      An alphanumeric string that is used as a password, such as \"*very20special#pass*\".

    • DRIVER_ID

      The number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters.

    • LICENSE_PLATE

      A license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country.

    • VEHICLE_IDENTIFICATION_NUMBER

      A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the ISO 3779 specification. Each country has specific codes and formats for VINs.

  • Finance

    • REDIT_DEBIT_CARD_CVV

      A three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code.

    • CREDIT_DEBIT_CARD_EXPIRY

      The expiration date for a credit or debit card. This number is usually four digits long and is often formatted as month/year or MM/YY. Guardrails recognizes expiration dates such as 01/21, 01/2021, and Jan 2021.

    • CREDIT_DEBIT_CARD_NUMBER

      The number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present.

    • PIN

      A four-digit personal identification number (PIN) with which you can access your bank account.

    • INTERNATIONAL_BANK_ACCOUNT_NUMBER

      An International Bank Account Number has specific formats in each country. For more information, see www.iban.com/structure.

    • SWIFT_CODE

      A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers.

      SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office.

  • IT

    • IP_ADDRESS

      An IPv4 address, such as 198.51.100.0.

    • MAC_ADDRESS

      A media access control (MAC) address is a unique identifier assigned to a network interface controller (NIC).

    • URL

      A web address, such as www.example.com.

    • AWS_ACCESS_KEY

      A unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically.

    • AWS_SECRET_KEY

      A unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically.

  • USA specific

    • US_BANK_ACCOUNT_NUMBER

      A US bank account number, which is typically 10 to 12 digits long.

    • US_BANK_ROUTING_NUMBER

      A US bank account routing number. These are typically nine digits long,

    • US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER

      A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits.

    • US_PASSPORT_NUMBER

      A US passport number. Passport numbers range from six to nine alphanumeric characters.

    • US_SOCIAL_SECURITY_NUMBER

      A US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents.

  • Canada specific

    • CA_HEALTH_NUMBER

      A Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits.

    • CA_SOCIAL_INSURANCE_NUMBER

      A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits.

      The SIN is formatted as three groups of three digits, such as 123-456-789. A SIN can be validated through a simple check-digit process called the Luhn algorithm.

  • UK Specific

    • UK_NATIONAL_HEALTH_SERVICE_NUMBER

      A UK National Health Service Number is a 10-17 digit number, such as 485 777 3456. The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum.

    • UK_NATIONAL_INSURANCE_NUMBER

      A UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system.

      The number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits.

    • UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER

      A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business.

  • Custom

    • Regex filter - You can use a regular expressions to define patterns for a guardrail to recognize and act upon such as serial number, booking ID etc..

" }, "action":{ "shape":"GuardrailSensitiveInformationAction", @@ -2557,7 +2822,7 @@ "documentation":"

Specifies to deny the topic.

" } }, - "documentation":"

Details about topics for the guardrail to identify and deny.

This data type is used in the following API operations:

" + "documentation":"

Details about topics for the guardrail to identify and deny.

" }, "GuardrailTopicDefinition":{ "type":"string", @@ -2604,7 +2869,7 @@ "documentation":"

A list of policies related to topics that the guardrail should deny.

" } }, - "documentation":"

Contains details about topics that the guardrail should identify and deny.

This data type is used in the following API operations:

" + "documentation":"

Contains details about topics that the guardrail should identify and deny.

" }, "GuardrailTopicType":{ "type":"string", @@ -2852,13 +3117,13 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

Maximum number of results to return in the response.

", + "documentation":"

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", "location":"querystring", "locationName":"maxResults" }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

Continuation token from the previous response, for Amazon Bedrock to list the next set of results.

", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -2873,6 +3138,12 @@ "documentation":"

The sort order of the results.

", "location":"querystring", "locationName":"sortOrder" + }, + "isOwned":{ + "shape":"Boolean", + "documentation":"

Return custom models depending on if the current account owns them (true) or if they were shared with the current account (false).

", + "location":"querystring", + "locationName":"isOwned" } } }, @@ -2881,7 +3152,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

Continuation token for the next request to list the next set of results.

" + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" }, "modelSummaries":{ "shape":"CustomModelSummaryList", @@ -2966,7 +3237,7 @@ }, "byCustomizationType":{ "shape":"ModelCustomization", - "documentation":"

Return models that support the customization type that you specify. For more information, see Custom models in the Amazon Bedrock User Guide.

", + "documentation":"

Return models that support the customization type that you specify. For more information, see Custom models in the Amazon Bedrock User Guide.

", "location":"querystring", "locationName":"byCustomizationType" }, @@ -2978,7 +3249,7 @@ }, "byInferenceType":{ "shape":"InferenceType", - "documentation":"

Return models that support the inference type that you specify. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", + "documentation":"

Return models that support the inference type that you specify. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", "location":"querystring", "locationName":"byInferenceType" } @@ -2998,7 +3269,7 @@ "members":{ "guardrailIdentifier":{ "shape":"GuardrailIdentifier", - "documentation":"

The unique identifier of the guardrail.

", + "documentation":"

The unique identifier of the guardrail. This can be an ID or the ARN.

", "location":"querystring", "locationName":"guardrailIdentifier" }, @@ -3030,6 +3301,84 @@ } } }, + "ListModelCopyJobsRequest":{ + "type":"structure", + "members":{ + "creationTimeAfter":{ + "shape":"Timestamp", + "documentation":"

Filters for model copy jobs created after the specified time.

", + "location":"querystring", + "locationName":"creationTimeAfter" + }, + "creationTimeBefore":{ + "shape":"Timestamp", + "documentation":"

Filters for model copy jobs created before the specified time.

", + "location":"querystring", + "locationName":"creationTimeBefore" + }, + "statusEquals":{ + "shape":"ModelCopyJobStatus", + "documentation":"

Filters for model copy jobs whose status matches the value that you specify.

", + "location":"querystring", + "locationName":"statusEquals" + }, + "sourceAccountEquals":{ + "shape":"AccountId", + "documentation":"

Filters for model copy jobs in which the account that the source model belongs to is equal to the value that you specify.

", + "location":"querystring", + "locationName":"sourceAccountEquals" + }, + "sourceModelArnEquals":{ + "shape":"ModelArn", + "documentation":"

Filters for model copy jobs in which the Amazon Resource Name (ARN) of the source model to is equal to the value that you specify.

", + "location":"querystring", + "locationName":"sourceModelArnEquals" + }, + "targetModelNameContains":{ + "shape":"CustomModelName", + "documentation":"

Filters for model copy jobs in which the name of the copied model contains the string that you specify.

", + "location":"querystring", + "locationName":"outputModelNameContains" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "sortBy":{ + "shape":"SortJobsBy", + "documentation":"

The field to sort by in the returned list of model copy jobs.

", + "location":"querystring", + "locationName":"sortBy" + }, + "sortOrder":{ + "shape":"SortOrder", + "documentation":"

Specifies whether to sort the results in ascending or descending order.

", + "location":"querystring", + "locationName":"sortOrder" + } + } + }, + "ListModelCopyJobsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + }, + "modelCopyJobSummaries":{ + "shape":"ModelCopyJobSummaries", + "documentation":"

A list of information about each model copy job.

" + } + } + }, "ListModelCustomizationJobsRequest":{ "type":"structure", "members":{ @@ -3059,13 +3408,13 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

Maximum number of results to return in the response.

", + "documentation":"

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", "location":"querystring", "locationName":"maxResults" }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

Continuation token from the previous response, for Amazon Bedrock to list the next set of results.

", + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -3088,7 +3437,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

Page continuation token to use in the next request.

" + "documentation":"

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" }, "modelCustomizationJobSummaries":{ "shape":"ModelCustomizationJobSummaries", @@ -3234,6 +3583,82 @@ "min":20, "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}))" }, + "ModelCopyJobArn":{ + "type":"string", + "max":1011, + "min":0, + "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:model-copy-job/[a-z0-9]{12}" + }, + "ModelCopyJobStatus":{ + "type":"string", + "enum":[ + "InProgress", + "Completed", + "Failed" + ] + }, + "ModelCopyJobSummaries":{ + "type":"list", + "member":{"shape":"ModelCopyJobSummary"} + }, + "ModelCopyJobSummary":{ + "type":"structure", + "required":[ + "jobArn", + "status", + "creationTime", + "targetModelArn", + "sourceAccountId", + "sourceModelArn" + ], + "members":{ + "jobArn":{ + "shape":"ModelCopyJobArn", + "documentation":"

The Amazon Resoource Name (ARN) of the model copy job.

" + }, + "status":{ + "shape":"ModelCopyJobStatus", + "documentation":"

The status of the model copy job.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The time that the model copy job was created.

" + }, + "targetModelArn":{ + "shape":"CustomModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the copied model.

" + }, + "targetModelName":{ + "shape":"CustomModelName", + "documentation":"

The name of the copied model.

" + }, + "sourceAccountId":{ + "shape":"AccountId", + "documentation":"

The unique identifier of the account that the model being copied originated from.

" + }, + "sourceModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the original model being copied.

" + }, + "targetModelKmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key used to encrypt the copied model.

" + }, + "targetModelTags":{ + "shape":"TagList", + "documentation":"

Tags associated with the copied model.

" + }, + "failureMessage":{ + "shape":"ErrorMessage", + "documentation":"

If a model fails to be copied, a message describing why the job failed is included here.

" + }, + "sourceModelName":{ + "shape":"CustomModelName", + "documentation":"

The name of the original model being copied.

" + } + }, + "documentation":"

Contains details about each model copy job.

This data type is used in the following API operations:

" + }, "ModelCustomization":{ "type":"string", "enum":[ @@ -3782,7 +4207,7 @@ "members":{ "guardrailIdentifier":{ "shape":"GuardrailIdentifier", - "documentation":"

The unique identifier of the guardrail

", + "documentation":"

The unique identifier of the guardrail. This can be an ID or the ARN.

", "location":"uri", "locationName":"guardrailIdentifier" }, @@ -3810,6 +4235,10 @@ "shape":"GuardrailSensitiveInformationPolicyConfig", "documentation":"

The sensitive information policy to configure for the guardrail.

" }, + "contextualGroundingPolicyConfig":{ + "shape":"GuardrailContextualGroundingPolicyConfig", + "documentation":"

The contextual grounding policy configuration used to update a guardrail.

" + }, "blockedInputMessaging":{ "shape":"GuardrailBlockedMessaging", "documentation":"

The message to return when the guardrail blocks a prompt.

" @@ -3839,7 +4268,7 @@ }, "guardrailArn":{ "shape":"GuardrailArn", - "documentation":"

The ARN of the guardrail that was created.

" + "documentation":"

The ARN of the guardrail.

" }, "version":{ "shape":"GuardrailDraftVersion", diff --git a/botocore/data/chatbot/2017-10-11/service-2.json b/botocore/data/chatbot/2017-10-11/service-2.json index 0dd16cbcf2..4cc327f4b6 100644 --- a/botocore/data/chatbot/2017-10-11/service-2.json +++ b/botocore/data/chatbot/2017-10-11/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"chatbot", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Chatbot", "serviceId":"chatbot", "signatureVersion":"v4", @@ -321,6 +322,55 @@ ], "documentation":"Lists all Microsoft Teams user identities with a mapped role." }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/list-tags-for-resource", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Retrieves the list of tags applied to a configuration." + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tag-resource", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyTagsException"} + ], + "documentation":"Applies the supplied tags to a configuration." + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/untag-resource", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Removes the supplied tags from a configuration" + }, "UpdateAccountPreferences":{ "name":"UpdateAccountPreferences", "http":{ @@ -404,6 +454,12 @@ }, "documentation":"Preferences which apply for AWS Chatbot usage in the calling AWS account." }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"^arn:aws:(wheatley|chatbot):[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, "Arn":{ "type":"string", "max":1224, @@ -455,6 +511,10 @@ "LoggingLevel":{ "shape":"CustomerCwLogLevel", "documentation":"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.Logging levels include ERROR, INFO, or NONE." + }, + "Tags":{ + "shape":"Tags", + "documentation":"A list of tags applied to the configuration." } }, "documentation":"An AWS Chatbot configuration for Amazon Chime." @@ -557,6 +617,10 @@ "LoggingLevel":{ "shape":"CustomerCwLogLevel", "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "Tags":{ + "shape":"Tags", + "documentation":"A list of tags to apply to the configuration." } } }, @@ -622,6 +686,10 @@ "UserAuthorizationRequired":{ "shape":"BooleanAccountPreference", "documentation":"Enables use of a user role requirement in your chat configuration." + }, + "Tags":{ + "shape":"Tags", + "documentation":"A list of tags to apply to the configuration." } } }, @@ -696,6 +764,10 @@ "UserAuthorizationRequired":{ "shape":"BooleanAccountPreference", "documentation":"Enables use of a user role requirement in your chat configuration." + }, + "Tags":{ + "shape":"Tags", + "documentation":"A list of tags to apply to the configuration." } } }, @@ -1120,6 +1192,14 @@ "type":"list", "member":{"shape":"GuardrailPolicyArn"} }, + "InternalServiceError":{ + "type":"structure", + "members":{ + }, + "documentation":"Customer/consumer-facing internal service exception. https://w.amazon.com/index.php/AWS/API_Standards/Exceptions#InternalServiceError", + "error":{"httpStatusCode":500}, + "exception":true + }, "InvalidParameterException":{ "type":"structure", "members":{ @@ -1218,6 +1298,25 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"The ARN of the configuration." + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"A list of tags applied to the configuration." + } + } + }, "ListTeamsChannelConfigurationsException":{ "type":"structure", "members":{ @@ -1279,6 +1378,14 @@ "error":{"httpStatusCode":404}, "exception":true }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":429}, + "exception":true + }, "SlackChannelConfiguration":{ "type":"structure", "required":[ @@ -1335,6 +1442,10 @@ "UserAuthorizationRequired":{ "shape":"BooleanAccountPreference", "documentation":"Enables use of a user role requirement in your chat configuration." + }, + "Tags":{ + "shape":"Tags", + "documentation":"A list of tags applied to the configuration." } }, "documentation":"An AWS Chatbot configuration for Slack." @@ -1434,6 +1545,72 @@ "member":{"shape":"Arn"} }, "String":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":[ + "TagKey", + "TagValue" + ], + "members":{ + "TagKey":{ + "shape":"TagKey", + "documentation":"The tag key." + }, + "TagValue":{ + "shape":"TagValue", + "documentation":"The tag value." + } + }, + "documentation":"A tag applied to the configuration." + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"The ARN of the configuration." + }, + "Tags":{ + "shape":"TagList", + "documentation":"A list of tags to apply to the configuration." + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, "TeamChannelConfigurationsList":{ "type":"list", "member":{"shape":"TeamsChannelConfiguration"} @@ -1503,6 +1680,10 @@ "UserAuthorizationRequired":{ "shape":"BooleanAccountPreference", "documentation":"Enables use of a user role requirement in your chat configuration." + }, + "Tags":{ + "shape":"Tags", + "documentation":"A list of tags applied to the configuration." } }, "documentation":"An AWS Chatbot configuration for Microsoft Teams." @@ -1562,12 +1743,42 @@ }, "documentation":"Identifes a user level permission for a channel configuration." }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + }, + "documentation":"The supplied list of tags contains too many tags.", + "error":{"httpStatusCode":400}, + "exception":true + }, "UUID":{ "type":"string", "max":36, "min":36, "pattern":"^[0-9A-Fa-f]{8}(?:-[0-9A-Fa-f]{4}){3}-[0-9A-Fa-f]{12}$" }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"The ARN of the configuration." + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"A list of tag keys to remove from the configuration." + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateAccountPreferencesException":{ "type":"structure", "members":{ diff --git a/botocore/data/chime-sdk-media-pipelines/2021-07-15/endpoint-rule-set-1.json b/botocore/data/chime-sdk-media-pipelines/2021-07-15/endpoint-rule-set-1.json index a451babf8a..c82bcc16b6 100644 --- a/botocore/data/chime-sdk-media-pipelines/2021-07-15/endpoint-rule-set-1.json +++ b/botocore/data/chime-sdk-media-pipelines/2021-07-15/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/chime-sdk-media-pipelines/2021-07-15/service-2.json b/botocore/data/chime-sdk-media-pipelines/2021-07-15/service-2.json index e61a6b93bc..c4e1dbd728 100644 --- a/botocore/data/chime-sdk-media-pipelines/2021-07-15/service-2.json +++ b/botocore/data/chime-sdk-media-pipelines/2021-07-15/service-2.json @@ -4,11 +4,13 @@ "apiVersion":"2021-07-15", "endpointPrefix":"media-pipelines-chime", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Chime SDK Media Pipelines", "serviceId":"Chime SDK Media Pipelines", "signatureVersion":"v4", "signingName":"chime", - "uid":"chime-sdk-media-pipelines-2021-07-15" + "uid":"chime-sdk-media-pipelines-2021-07-15", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateMediaCapturePipeline":{ @@ -132,7 +134,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates an Kinesis video stream pool for the media pipeline.

" + "documentation":"

Creates an Amazon Kinesis Video Stream pool for use with media stream pipelines.

If a meeting uses an opt-in Region as its MediaRegion, the KVS stream must be in that same Region. For example, if a meeting uses the af-south-1 Region, the KVS stream must also be in af-south-1. However, if the meeting uses a Region that AWS turns on by default, the KVS stream can be in any available Region, including an opt-in Region. For example, if the meeting uses ca-central-1, the KVS stream can be in eu-west-2, us-east-1, af-south-1, or any other Region that the Amazon Chime SDK supports.

To learn which AWS Region a meeting uses, call the GetMeeting API and use the MediaRegion parameter from the response.

For more information about opt-in Regions, refer to Available Regions in the Amazon Chime SDK Developer Guide, and Specify which AWS Regions your account can use, in the AWS Account Management Reference Guide.

" }, "CreateMediaStreamPipeline":{ "name":"CreateMediaStreamPipeline", @@ -232,7 +234,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes an Kinesis video stream pool.

" + "documentation":"

Deletes an Amazon Kinesis Video Stream pool.

" }, "GetMediaCapturePipeline":{ "name":"GetMediaCapturePipeline", @@ -636,7 +638,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates an Kinesis video stream pool in a media pipeline.

" + "documentation":"

Updates an Amazon Kinesis Video Stream pool in a media pipeline.

" } }, "shapes":{ @@ -779,6 +781,10 @@ "shape":"Boolean", "documentation":"

Turns language identification on or off.

" }, + "IdentifyMultipleLanguages":{ + "shape":"Boolean", + "documentation":"

Turns language identification on or off for multiple languages.

" + }, "LanguageOptions":{ "shape":"LanguageOptions", "documentation":"

The language options for the transcription, such as automatic language detection.

" @@ -796,7 +802,7 @@ "documentation":"

The names of the custom vocabulary filter or filters using during transcription.

" } }, - "documentation":"

A structure that contains the configuration settings for an Amazon Transcribe processor.

" + "documentation":"

A structure that contains the configuration settings for an Amazon Transcribe processor.

Calls to this API must include a LanguageCode, IdentifyLanguage, or IdentifyMultipleLanguages parameter. If you include more than one of those parameters, your transcription job fails.

" }, "Arn":{ "type":"string", @@ -1465,11 +1471,11 @@ "members":{ "StreamConfiguration":{ "shape":"KinesisVideoStreamConfiguration", - "documentation":"

The configuration settings for the video stream.

" + "documentation":"

The configuration settings for the stream.

" }, "PoolName":{ "shape":"KinesisVideoStreamPoolName", - "documentation":"

The name of the video stream pool.

" + "documentation":"

The name of the pool.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", @@ -1478,7 +1484,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

The tags assigned to the video stream pool.

" + "documentation":"

The tags assigned to the stream pool.

" } } }, @@ -1487,7 +1493,7 @@ "members":{ "KinesisVideoStreamPoolConfiguration":{ "shape":"KinesisVideoStreamPoolConfiguration", - "documentation":"

The configuration for the Kinesis video stream pool.

" + "documentation":"

The configuration for applying the streams to the pool.

" } } }, @@ -1575,7 +1581,7 @@ "members":{ "Identifier":{ "shape":"NonEmptyString", - "documentation":"

The ID of the pool being deleted.

", + "documentation":"

The unique identifier of the requested resource. Valid values include the name and ARN of the media pipeline Kinesis Video Stream pool.

", "location":"uri", "locationName":"identifier" } @@ -1709,7 +1715,7 @@ "members":{ "Identifier":{ "shape":"NonEmptyString", - "documentation":"

The ID of the video stream pool.

", + "documentation":"

The unique identifier of the requested resource. Valid values include the name and ARN of the media pipeline Kinesis Video Stream pool.

", "location":"uri", "locationName":"identifier" } @@ -1964,7 +1970,7 @@ "documentation":"

The amount of time that data is retained.

" } }, - "documentation":"

The configuration of an Kinesis video stream.

" + "documentation":"

The configuration of an Kinesis video stream.

If a meeting uses an opt-in Region as its MediaRegion, the KVS stream must be in that same Region. For example, if a meeting uses the af-south-1 Region, the KVS stream must also be in af-south-1. However, if the meeting uses a Region that AWS turns on by default, the KVS stream can be in any available Region, including an opt-in Region. For example, if the meeting uses ca-central-1, the KVS stream can be in eu-west-2, us-east-1, af-south-1, or any other Region that the Amazon Chime SDK supports.

To learn which AWS Region a meeting uses, call the GetMeeting API and use the MediaRegion parameter from the response.

For more information about opt-in Regions, refer to Available Regions in the Amazon Chime SDK Developer Guide, and Specify which AWS Regions your account can use, in the AWS Account Management Reference Guide.

" }, "KinesisVideoStreamConfigurationUpdate":{ "type":"structure", @@ -2872,7 +2878,7 @@ "members":{ "SinkArn":{ "shape":"Arn", - "documentation":"

The ARN of the media stream sink.

" + "documentation":"

The ARN of the Kinesis Video Stream pool returned by the CreateMediaPipelineKinesisVideoStreamPool API.

" }, "SinkType":{ "shape":"MediaStreamPipelineSinkType", @@ -2908,7 +2914,7 @@ }, "SourceArn":{ "shape":"Arn", - "documentation":"

The ARN of the media stream source.

" + "documentation":"

The ARN of the meeting.

" } }, "documentation":"

Structure that contains the settings for media stream sources.

" @@ -3104,7 +3110,7 @@ "type":"list", "member":{"shape":"RecordingStreamConfiguration"}, "max":2, - "min":2 + "min":1 }, "ReservedStreamCapacity":{ "type":"integer", @@ -3704,7 +3710,7 @@ "members":{ "Identifier":{ "shape":"NonEmptyString", - "documentation":"

The ID of the video stream pool.

", + "documentation":"

The unique identifier of the requested resource. Valid values include the name and ARN of the media pipeline Kinesis Video Stream pool.

", "location":"uri", "locationName":"identifier" }, diff --git a/botocore/data/cleanrooms/2022-02-17/paginators-1.json b/botocore/data/cleanrooms/2022-02-17/paginators-1.json index 1bbb0a6cfd..419570b326 100644 --- a/botocore/data/cleanrooms/2022-02-17/paginators-1.json +++ b/botocore/data/cleanrooms/2022-02-17/paginators-1.json @@ -89,6 +89,24 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "privacyBudgetSummaries" + }, + "ListCollaborationIdNamespaceAssociations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "collaborationIdNamespaceAssociationSummaries" + }, + "ListIdMappingTables": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "idMappingTableSummaries" + }, + "ListIdNamespaceAssociations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "idNamespaceAssociationSummaries" } } } diff --git a/botocore/data/cleanrooms/2022-02-17/service-2.json b/botocore/data/cleanrooms/2022-02-17/service-2.json index 42c9b6b09f..b427d20b03 100644 --- a/botocore/data/cleanrooms/2022-02-17/service-2.json +++ b/botocore/data/cleanrooms/2022-02-17/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2022-02-17", + "auth":["aws.auth#sigv4"], "endpointPrefix":"cleanrooms", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Clean Rooms Service", "serviceId":"CleanRooms", "signatureVersion":"v4", @@ -185,6 +186,66 @@ ], "documentation":"

Creates a configured table association. A configured table association links a configured table with a collaboration.

" }, + "CreateConfiguredTableAssociationAnalysisRule":{ + "name":"CreateConfiguredTableAssociationAnalysisRule", + "http":{ + "method":"POST", + "requestUri":"/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule", + "responseCode":200 + }, + "input":{"shape":"CreateConfiguredTableAssociationAnalysisRuleInput"}, + "output":{"shape":"CreateConfiguredTableAssociationAnalysisRuleOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates a new analysis rule for an associated configured table.

", + "idempotent":true + }, + "CreateIdMappingTable":{ + "name":"CreateIdMappingTable", + "http":{ + "method":"POST", + "requestUri":"/memberships/{membershipIdentifier}/idmappingtables", + "responseCode":200 + }, + "input":{"shape":"CreateIdMappingTableInput"}, + "output":{"shape":"CreateIdMappingTableOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates an ID mapping table.

" + }, + "CreateIdNamespaceAssociation":{ + "name":"CreateIdNamespaceAssociation", + "http":{ + "method":"POST", + "requestUri":"/memberships/{membershipIdentifier}/idnamespaceassociations", + "responseCode":200 + }, + "input":{"shape":"CreateIdNamespaceAssociationInput"}, + "output":{"shape":"CreateIdNamespaceAssociationOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates an ID namespace association.

" + }, "CreateMembership":{ "name":"CreateMembership", "http":{ @@ -340,6 +401,64 @@ "documentation":"

Deletes a configured table association.

", "idempotent":true }, + "DeleteConfiguredTableAssociationAnalysisRule":{ + "name":"DeleteConfiguredTableAssociationAnalysisRule", + "http":{ + "method":"DELETE", + "requestUri":"/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule/{analysisRuleType}", + "responseCode":204 + }, + "input":{"shape":"DeleteConfiguredTableAssociationAnalysisRuleInput"}, + "output":{"shape":"DeleteConfiguredTableAssociationAnalysisRuleOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes an analysis rule for a configured table association.

", + "idempotent":true + }, + "DeleteIdMappingTable":{ + "name":"DeleteIdMappingTable", + "http":{ + "method":"DELETE", + "requestUri":"/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}", + "responseCode":204 + }, + "input":{"shape":"DeleteIdMappingTableInput"}, + "output":{"shape":"DeleteIdMappingTableOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes an ID mapping table.

", + "idempotent":true + }, + "DeleteIdNamespaceAssociation":{ + "name":"DeleteIdNamespaceAssociation", + "http":{ + "method":"DELETE", + "requestUri":"/memberships/{membershipIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}", + "responseCode":204 + }, + "input":{"shape":"DeleteIdNamespaceAssociationInput"}, + "output":{"shape":"DeleteIdNamespaceAssociationOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes an ID namespace association.

", + "idempotent":true + }, "DeleteMember":{ "name":"DeleteMember", "http":{ @@ -470,6 +589,24 @@ ], "documentation":"

Retrieves a configured audience model association within a collaboration.

" }, + "GetCollaborationIdNamespaceAssociation":{ + "name":"GetCollaborationIdNamespaceAssociation", + "http":{ + "method":"GET", + "requestUri":"/collaborations/{collaborationIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}", + "responseCode":200 + }, + "input":{"shape":"GetCollaborationIdNamespaceAssociationInput"}, + "output":{"shape":"GetCollaborationIdNamespaceAssociationOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves an ID namespace association from a specific collaboration.

" + }, "GetCollaborationPrivacyBudgetTemplate":{ "name":"GetCollaborationPrivacyBudgetTemplate", "http":{ @@ -560,6 +697,60 @@ ], "documentation":"

Retrieves a configured table association.

" }, + "GetConfiguredTableAssociationAnalysisRule":{ + "name":"GetConfiguredTableAssociationAnalysisRule", + "http":{ + "method":"GET", + "requestUri":"/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule/{analysisRuleType}", + "responseCode":200 + }, + "input":{"shape":"GetConfiguredTableAssociationAnalysisRuleInput"}, + "output":{"shape":"GetConfiguredTableAssociationAnalysisRuleOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves the analysis rule for a configured table association.

" + }, + "GetIdMappingTable":{ + "name":"GetIdMappingTable", + "http":{ + "method":"GET", + "requestUri":"/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}", + "responseCode":200 + }, + "input":{"shape":"GetIdMappingTableInput"}, + "output":{"shape":"GetIdMappingTableOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves an ID mapping table.

" + }, + "GetIdNamespaceAssociation":{ + "name":"GetIdNamespaceAssociation", + "http":{ + "method":"GET", + "requestUri":"/memberships/{membershipIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}", + "responseCode":200 + }, + "input":{"shape":"GetIdNamespaceAssociationInput"}, + "output":{"shape":"GetIdNamespaceAssociationOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves an ID namespace association.

" + }, "GetMembership":{ "name":"GetMembership", "http":{ @@ -704,6 +895,24 @@ ], "documentation":"

Lists configured audience model associations within a collaboration.

" }, + "ListCollaborationIdNamespaceAssociations":{ + "name":"ListCollaborationIdNamespaceAssociations", + "http":{ + "method":"GET", + "requestUri":"/collaborations/{collaborationIdentifier}/idnamespaceassociations", + "responseCode":200 + }, + "input":{"shape":"ListCollaborationIdNamespaceAssociationsInput"}, + "output":{"shape":"ListCollaborationIdNamespaceAssociationsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Returns a list of the ID namespace associations in a collaboration.

" + }, "ListCollaborationPrivacyBudgetTemplates":{ "name":"ListCollaborationPrivacyBudgetTemplates", "http":{ @@ -810,6 +1019,42 @@ ], "documentation":"

Lists configured tables.

" }, + "ListIdMappingTables":{ + "name":"ListIdMappingTables", + "http":{ + "method":"GET", + "requestUri":"/memberships/{membershipIdentifier}/idmappingtables", + "responseCode":200 + }, + "input":{"shape":"ListIdMappingTablesInput"}, + "output":{"shape":"ListIdMappingTablesOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Returns a list of ID mapping tables.

" + }, + "ListIdNamespaceAssociations":{ + "name":"ListIdNamespaceAssociations", + "http":{ + "method":"GET", + "requestUri":"/memberships/{membershipIdentifier}/idnamespaceassociations", + "responseCode":200 + }, + "input":{"shape":"ListIdNamespaceAssociationsInput"}, + "output":{"shape":"ListIdNamespaceAssociationsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Returns a list of ID namespace associations.

" + }, "ListMembers":{ "name":"ListMembers", "http":{ @@ -932,6 +1177,25 @@ ], "documentation":"

Lists all of the tags that have been added to a resource.

" }, + "PopulateIdMappingTable":{ + "name":"PopulateIdMappingTable", + "http":{ + "method":"POST", + "requestUri":"/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}/populate", + "responseCode":200 + }, + "input":{"shape":"PopulateIdMappingTableInput"}, + "output":{"shape":"PopulateIdMappingTableOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Defines the information that's necessary to populate an ID mapping table.

" + }, "PreviewPrivacyImpact":{ "name":"PreviewPrivacyImpact", "http":{ @@ -1109,6 +1373,61 @@ ], "documentation":"

Updates a configured table association.

" }, + "UpdateConfiguredTableAssociationAnalysisRule":{ + "name":"UpdateConfiguredTableAssociationAnalysisRule", + "http":{ + "method":"PATCH", + "requestUri":"/memberships/{membershipIdentifier}/configuredTableAssociations/{configuredTableAssociationIdentifier}/analysisRule/{analysisRuleType}", + "responseCode":200 + }, + "input":{"shape":"UpdateConfiguredTableAssociationAnalysisRuleInput"}, + "output":{"shape":"UpdateConfiguredTableAssociationAnalysisRuleOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Updates the analysis rule for a configured table association.

" + }, + "UpdateIdMappingTable":{ + "name":"UpdateIdMappingTable", + "http":{ + "method":"PATCH", + "requestUri":"/memberships/{membershipIdentifier}/idmappingtables/{idMappingTableIdentifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateIdMappingTableInput"}, + "output":{"shape":"UpdateIdMappingTableOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Provides the details that are necessary to update an ID mapping table.

" + }, + "UpdateIdNamespaceAssociation":{ + "name":"UpdateIdNamespaceAssociation", + "http":{ + "method":"PATCH", + "requestUri":"/memberships/{membershipIdentifier}/idnamespaceassociations/{idNamespaceAssociationIdentifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateIdNamespaceAssociationInput"}, + "output":{"shape":"UpdateIdNamespaceAssociationOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Provides the details that are necessary to update an ID namespace association.

" + }, "UpdateMembership":{ "name":"UpdateMembership", "http":{ @@ -1195,6 +1514,20 @@ "min":12, "pattern":"\\d+" }, + "AdditionalAnalyses":{ + "type":"string", + "enum":[ + "ALLOWED", + "REQUIRED", + "NOT_ALLOWED" + ] + }, + "AdditionalAnalysesResourceArn":{ + "type":"string", + "max":256, + "min":0, + "pattern":"arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:([\\d]{12}|\\*):membership/[\\*\\d\\w-]+/configuredaudiencemodelassociation/[\\*\\d\\w-]+" + }, "AggregateColumn":{ "type":"structure", "required":[ @@ -1266,12 +1599,22 @@ "type":"string", "enum":["COUNT_DISTINCT"] }, + "AllowedAdditionalAnalyses":{ + "type":"list", + "member":{"shape":"AdditionalAnalysesResourceArn"}, + "max":25, + "min":0 + }, "AllowedColumnList":{ "type":"list", "member":{"shape":"ColumnName"}, "max":225, "min":1 }, + "AllowedResultReceivers":{ + "type":"list", + "member":{"shape":"AccountId"} + }, "AnalysisFormat":{ "type":"string", "enum":["SQL"] @@ -1384,6 +1727,10 @@ "outputConstraints":{ "shape":"AggregationConstraints", "documentation":"

Columns that must meet a specific threshold value (after an aggregation function is applied to it) for each output row to be returned.

" + }, + "additionalAnalyses":{ + "shape":"AdditionalAnalyses", + "documentation":"

An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query.

The additionalAnalyses parameter is currently supported for the list analysis rule (AnalysisRuleList) and the custom analysis rule (AnalysisRuleCustom).

" } }, "documentation":"

A type of analysis rule that enables query structure and specified queries that produce aggregate statistics.

" @@ -1415,6 +1762,14 @@ "shape":"AnalysisRuleCustomAllowedAnalysisProvidersList", "documentation":"

The IDs of the Amazon Web Services accounts that are allowed to query by the custom analysis rule. Required when allowedAnalyses is ANY_QUERY.

" }, + "additionalAnalyses":{ + "shape":"AdditionalAnalyses", + "documentation":"

An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query.

" + }, + "disallowedOutputColumns":{ + "shape":"AnalysisRuleColumnList", + "documentation":"

A list of columns that aren't allowed to be shown in the query output.

" + }, "differentialPrivacy":{ "shape":"DifferentialPrivacyConfiguration", "documentation":"

The differential privacy configuration.

" @@ -1432,6 +1787,34 @@ "member":{"shape":"AccountId"}, "min":0 }, + "AnalysisRuleIdMappingTable":{ + "type":"structure", + "required":[ + "joinColumns", + "queryConstraints" + ], + "members":{ + "joinColumns":{ + "shape":"AnalysisRuleIdMappingTableJoinColumnsList", + "documentation":"

The columns that query runners are allowed to use in an INNER JOIN statement.

" + }, + "queryConstraints":{ + "shape":"QueryConstraintList", + "documentation":"

The query constraints of the analysis rule ID mapping table.

" + }, + "dimensionColumns":{ + "shape":"AnalysisRuleColumnList", + "documentation":"

The columns that query runners are allowed to select, group by, or filter by.

" + } + }, + "documentation":"

Defines details for the analysis rule ID mapping table.

" + }, + "AnalysisRuleIdMappingTableJoinColumnsList":{ + "type":"list", + "member":{"shape":"AnalysisRuleColumnName"}, + "max":2, + "min":2 + }, "AnalysisRuleList":{ "type":"structure", "required":[ @@ -1450,6 +1833,10 @@ "listColumns":{ "shape":"AnalysisRuleColumnList", "documentation":"

Columns that can be listed in the output.

" + }, + "additionalAnalyses":{ + "shape":"AdditionalAnalyses", + "documentation":"

An indicator as to whether additional analyses (such as Clean Rooms ML) can be applied to the output of the direct query.

" } }, "documentation":"

A type of analysis rule that enables row-level analysis.

" @@ -1484,6 +1871,10 @@ "custom":{ "shape":"AnalysisRuleCustom", "documentation":"

Analysis rule type that enables custom SQL queries on a configured table.

" + }, + "idMappingTable":{ + "shape":"AnalysisRuleIdMappingTable", + "documentation":"

The ID mapping table.

" } }, "documentation":"

Controls on the query specifications that can be run on configured table.

", @@ -1494,7 +1885,8 @@ "enum":[ "AGGREGATION", "LIST", - "CUSTOM" + "CUSTOM", + "ID_MAPPING_TABLE" ] }, "AnalysisRuleTypeList":{ @@ -1746,6 +2138,13 @@ "type":"string", "enum":["DIFFERENTIAL_PRIVACY"] }, + "AnalysisType":{ + "type":"string", + "enum":[ + "DIRECT_ANALYSIS", + "ADDITIONAL_ANALYSIS" + ] + }, "BatchGetCollaborationAnalysisTemplateError":{ "type":"structure", "required":[ @@ -2287,6 +2686,135 @@ "min":1, "pattern":"(?!\\s*$)[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t\\r\\n]*" }, + "CollaborationIdNamespaceAssociation":{ + "type":"structure", + "required":[ + "id", + "arn", + "collaborationId", + "collaborationArn", + "name", + "creatorAccountId", + "createTime", + "updateTime", + "inputReferenceConfig", + "inputReferenceProperties" + ], + "members":{ + "id":{ + "shape":"IdNamespaceAssociationIdentifier", + "documentation":"

The unique identifier of the collaboration ID namespace association.

" + }, + "arn":{ + "shape":"IdNamespaceAssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of the collaboration ID namespace association.

" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the collaboration that contains the collaboration ID namespace association.

" + }, + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"

The Amazon Resource Name (ARN) of the collaboration that contains the collaboration ID namespace association.

" + }, + "name":{ + "shape":"GenericResourceName", + "documentation":"

The name of the collaboration ID namespace association.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the collaboration ID namespace association.

" + }, + "creatorAccountId":{ + "shape":"AccountId", + "documentation":"

The unique identifier of the Amazon Web Services account that created the collaboration ID namespace association.

" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the collaboration ID namespace association was created.

" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"

The most recent time at which the collaboration ID namespace was updated.

" + }, + "inputReferenceConfig":{ + "shape":"IdNamespaceAssociationInputReferenceConfig", + "documentation":"

The input reference configuration that's necessary to create the collaboration ID namespace association.

" + }, + "inputReferenceProperties":{ + "shape":"IdNamespaceAssociationInputReferenceProperties", + "documentation":"

The input reference properties that are needed to create the collaboration ID namespace association.

" + }, + "idMappingConfig":{"shape":"IdMappingConfig"} + }, + "documentation":"

Defines details for the collaboration ID namespace association.

" + }, + "CollaborationIdNamespaceAssociationSummary":{ + "type":"structure", + "required":[ + "arn", + "createTime", + "id", + "updateTime", + "collaborationArn", + "collaborationId", + "creatorAccountId", + "inputReferenceConfig", + "name", + "inputReferenceProperties" + ], + "members":{ + "arn":{ + "shape":"IdNamespaceAssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of the collaboration ID namespace association.

" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the collaboration ID namespace association was created.

" + }, + "id":{ + "shape":"IdNamespaceAssociationIdentifier", + "documentation":"

The unique identifier of the collaboration ID namespace association.

" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"

The most recent time at which the collaboration ID namespace association was updated.

" + }, + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"

The Amazon Resource Name (ARN) of the collaboration that contains this collaboration ID namespace association.

" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the collaboration that contains this collaboration ID namespace association.

" + }, + "creatorAccountId":{ + "shape":"AccountId", + "documentation":"

The Amazon Web Services account that created this collaboration ID namespace association.

" + }, + "inputReferenceConfig":{ + "shape":"IdNamespaceAssociationInputReferenceConfig", + "documentation":"

The input reference configuration that's used to create the collaboration ID namespace association.

" + }, + "name":{ + "shape":"GenericResourceName", + "documentation":"

The name of the collaboration ID namespace association.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the collaboration ID namepsace association.

" + }, + "inputReferenceProperties":{ + "shape":"IdNamespaceAssociationInputReferencePropertiesSummary", + "documentation":"

The input reference properties that are used to create the collaboration ID namespace association.

" + } + }, + "documentation":"

Provides summary information about the collaboration ID namespace association.

" + }, + "CollaborationIdNamespaceAssociationSummaryList":{ + "type":"list", + "member":{"shape":"CollaborationIdNamespaceAssociationSummary"} + }, "CollaborationIdentifier":{ "type":"string", "max":36, @@ -2572,6 +3100,17 @@ "min":0, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t]*" }, + "ConfigurationDetails":{ + "type":"structure", + "members":{ + "directAnalysisConfigurationDetails":{ + "shape":"DirectAnalysisConfigurationDetails", + "documentation":"

The direct analysis configuration details.

" + } + }, + "documentation":"

The configuration details.

", + "union":true + }, "ConfiguredAudienceModelArn":{ "type":"string", "max":2048, @@ -2920,6 +3459,10 @@ "shape":"TableDescription", "documentation":"

A description of the configured table association.

" }, + "analysisRuleTypes":{ + "shape":"ConfiguredTableAssociationAnalysisRuleTypeList", + "documentation":"

The analysis rule types for the configured table association.

" + }, "createTime":{ "shape":"Timestamp", "documentation":"

The time the configured table association was created.

" @@ -2931,6 +3474,133 @@ }, "documentation":"

A configured table association links a configured table to a collaboration.

" }, + "ConfiguredTableAssociationAnalysisRule":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "configuredTableAssociationId", + "configuredTableAssociationArn", + "policy", + "type", + "createTime", + "updateTime" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The membership identifier for the configured table association analysis rule.

" + }, + "configuredTableAssociationId":{ + "shape":"ConfiguredTableAssociationIdentifier", + "documentation":"

The unique identifier for the configured table association.

" + }, + "configuredTableAssociationArn":{ + "shape":"ConfiguredTableAssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of the configured table association.

" + }, + "policy":{ + "shape":"ConfiguredTableAssociationAnalysisRulePolicy", + "documentation":"

The policy of the configured table association analysis rule.

" + }, + "type":{ + "shape":"ConfiguredTableAssociationAnalysisRuleType", + "documentation":"

The type of the configured table association analysis rule.

" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of the configured table association analysis rule.

" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"

The update time of the configured table association analysis rule.

" + } + }, + "documentation":"

An analysis rule for a configured table association. This analysis rule specifies how data from the table can be used within its associated collaboration. In the console, the ConfiguredTableAssociationAnalysisRule is referred to as the collaboration analysis rule.

" + }, + "ConfiguredTableAssociationAnalysisRuleAggregation":{ + "type":"structure", + "members":{ + "allowedResultReceivers":{ + "shape":"AllowedResultReceivers", + "documentation":"

The list of collaboration members who are allowed to receive results of queries run with this configured table.

" + }, + "allowedAdditionalAnalyses":{ + "shape":"AllowedAdditionalAnalyses", + "documentation":"

The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.

The allowedAdditionalAnalyses parameter is currently supported for the list analysis rule (AnalysisRuleList) and the custom analysis rule (AnalysisRuleCustom).

" + } + }, + "documentation":"

The configured table association analysis rule applied to a configured table with the aggregation analysis rule.

" + }, + "ConfiguredTableAssociationAnalysisRuleCustom":{ + "type":"structure", + "members":{ + "allowedResultReceivers":{ + "shape":"AllowedResultReceivers", + "documentation":"

The list of collaboration members who are allowed to receive results of queries run with this configured table.

" + }, + "allowedAdditionalAnalyses":{ + "shape":"AllowedAdditionalAnalyses", + "documentation":"

The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.

" + } + }, + "documentation":"

The configured table association analysis rule applied to a configured table with the custom analysis rule.

" + }, + "ConfiguredTableAssociationAnalysisRuleList":{ + "type":"structure", + "members":{ + "allowedResultReceivers":{ + "shape":"AllowedResultReceivers", + "documentation":"

The list of collaboration members who are allowed to receive results of queries run with this configured table.

" + }, + "allowedAdditionalAnalyses":{ + "shape":"AllowedAdditionalAnalyses", + "documentation":"

The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.

" + } + }, + "documentation":"

The configured table association analysis rule applied to a configured table with the list analysis rule.

" + }, + "ConfiguredTableAssociationAnalysisRulePolicy":{ + "type":"structure", + "members":{ + "v1":{ + "shape":"ConfiguredTableAssociationAnalysisRulePolicyV1", + "documentation":"

The policy for the configured table association analysis rule.

" + } + }, + "documentation":"

Controls on the query specifications that can be run on an associated configured table.

", + "union":true + }, + "ConfiguredTableAssociationAnalysisRulePolicyV1":{ + "type":"structure", + "members":{ + "list":{ + "shape":"ConfiguredTableAssociationAnalysisRuleList", + "documentation":"

Analysis rule type that enables only list queries on a configured table.

" + }, + "aggregation":{ + "shape":"ConfiguredTableAssociationAnalysisRuleAggregation", + "documentation":"

Analysis rule type that enables only aggregation queries on a configured table.

" + }, + "custom":{ + "shape":"ConfiguredTableAssociationAnalysisRuleCustom", + "documentation":"

Analysis rule type that enables the table owner to approve custom SQL queries on their configured tables. It supports differential privacy.

" + } + }, + "documentation":"

Controls on the query specifications that can be run on an associated configured table.

", + "union":true + }, + "ConfiguredTableAssociationAnalysisRuleType":{ + "type":"string", + "enum":[ + "AGGREGATION", + "LIST", + "CUSTOM" + ] + }, + "ConfiguredTableAssociationAnalysisRuleTypeList":{ + "type":"list", + "member":{"shape":"ConfiguredTableAssociationAnalysisRuleType"} + }, "ConfiguredTableAssociationArn":{ "type":"string", "max":100, @@ -3189,7 +3859,7 @@ "members":{ "collaboration":{ "shape":"Collaboration", - "documentation":"

The entire created collaboration object.

" + "documentation":"

The collaboration.

" } } }, @@ -3260,7 +3930,7 @@ }, "analysisRulePolicy":{ "shape":"ConfiguredTableAnalysisRulePolicy", - "documentation":"

The entire created configured table analysis rule object.

" + "documentation":"

The analysis rule policy that was created for the configured table.

" } } }, @@ -3270,7 +3940,48 @@ "members":{ "analysisRule":{ "shape":"ConfiguredTableAnalysisRule", - "documentation":"

The entire created analysis rule.

" + "documentation":"

The analysis rule that was created for the configured table.

" + } + } + }, + "CreateConfiguredTableAssociationAnalysisRuleInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "configuredTableAssociationIdentifier", + "analysisRuleType", + "analysisRulePolicy" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "configuredTableAssociationIdentifier":{ + "shape":"ConfiguredTableAssociationIdentifier", + "documentation":"

The unique ID for the configured table association. Currently accepts the configured table association ID.

", + "location":"uri", + "locationName":"configuredTableAssociationIdentifier" + }, + "analysisRuleType":{ + "shape":"ConfiguredTableAssociationAnalysisRuleType", + "documentation":"

The type of analysis rule.

" + }, + "analysisRulePolicy":{ + "shape":"ConfiguredTableAssociationAnalysisRulePolicy", + "documentation":"

The analysis rule policy that was created for the configured table association.

" + } + } + }, + "CreateConfiguredTableAssociationAnalysisRuleOutput":{ + "type":"structure", + "required":["analysisRule"], + "members":{ + "analysisRule":{ + "shape":"ConfiguredTableAssociationAnalysisRule", + "documentation":"

The analysis rule for the configured table association. In the console, the ConfiguredTableAssociationAnalysisRule is referred to as the collaboration analysis rule.

" } } }, @@ -3317,7 +4028,7 @@ "members":{ "configuredTableAssociation":{ "shape":"ConfiguredTableAssociation", - "documentation":"

The entire configured table association object.

" + "documentation":"

The configured table association.

" } } }, @@ -3366,79 +4077,171 @@ } } }, - "CreateMembershipInput":{ + "CreateIdMappingTableInput":{ "type":"structure", "required":[ - "collaborationIdentifier", - "queryLogStatus" + "membershipIdentifier", + "name", + "inputReferenceConfig" ], "members":{ - "collaborationIdentifier":{ - "shape":"CollaborationIdentifier", - "documentation":"

The unique ID for the associated collaboration.

" + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership that contains the ID mapping table.

", + "location":"uri", + "locationName":"membershipIdentifier" }, - "queryLogStatus":{ - "shape":"MembershipQueryLogStatus", - "documentation":"

An indicator as to whether query logging has been enabled or disabled for the membership.

" + "name":{ + "shape":"ResourceAlias", + "documentation":"

A name for the ID mapping table.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

A description of the ID mapping table.

" + }, + "inputReferenceConfig":{ + "shape":"IdMappingTableInputReferenceConfig", + "documentation":"

The input reference configuration needed to create the ID mapping table.

" }, "tags":{ "shape":"TagMap", "documentation":"

An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource.

" }, - "defaultResultConfiguration":{ - "shape":"MembershipProtectedQueryResultConfiguration", - "documentation":"

The default protected query result configuration as specified by the member who can receive results.

" - }, - "paymentConfiguration":{ - "shape":"MembershipPaymentConfiguration", - "documentation":"

The payment responsibilities accepted by the collaboration member.

Not required if the collaboration member has the member ability to run queries.

Required if the collaboration member doesn't have the member ability to run queries but is configured as a payer by the collaboration creator.

" + "kmsKeyArn":{ + "shape":"KMSKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. This value is used to encrypt the mapping table data that is stored by Clean Rooms.

" } } }, - "CreateMembershipOutput":{ + "CreateIdMappingTableOutput":{ "type":"structure", - "required":["membership"], + "required":["idMappingTable"], "members":{ - "membership":{ - "shape":"Membership", - "documentation":"

The membership that was created.

" + "idMappingTable":{ + "shape":"IdMappingTable", + "documentation":"

The ID mapping table that was created.

" } } }, - "CreatePrivacyBudgetTemplateInput":{ + "CreateIdNamespaceAssociationInput":{ "type":"structure", "required":[ "membershipIdentifier", - "autoRefresh", - "privacyBudgetType", - "parameters" + "inputReferenceConfig", + "name" ], "members":{ "membershipIdentifier":{ "shape":"MembershipIdentifier", - "documentation":"

A unique identifier for one of your memberships for a collaboration. The privacy budget template is created in the collaboration that this membership belongs to. Accepts a membership ID.

", + "documentation":"

The unique identifier of the membership that contains the ID namespace association.

", "location":"uri", "locationName":"membershipIdentifier" }, - "autoRefresh":{ - "shape":"PrivacyBudgetTemplateAutoRefresh", - "documentation":"

How often the privacy budget refreshes.

If you plan to regularly bring new data into the collaboration, you can use CALENDAR_MONTH to automatically get a new privacy budget for the collaboration every calendar month. Choosing this option allows arbitrary amounts of information to be revealed about rows of the data when repeatedly queries across refreshes. Avoid choosing this if the same rows will be repeatedly queried between privacy budget refreshes.

" - }, - "privacyBudgetType":{ - "shape":"PrivacyBudgetType", - "documentation":"

Specifies the type of the privacy budget template.

" - }, - "parameters":{ - "shape":"PrivacyBudgetTemplateParametersInput", - "documentation":"

Specifies your parameters for the privacy budget template.

" + "inputReferenceConfig":{ + "shape":"IdNamespaceAssociationInputReferenceConfig", + "documentation":"

The input reference configuration needed to create the ID namespace association.

" }, "tags":{ "shape":"TagMap", "documentation":"

An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource.

" + }, + "name":{ + "shape":"GenericResourceName", + "documentation":"

The name for the ID namespace association.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the ID namespace association.

" + }, + "idMappingConfig":{ + "shape":"IdMappingConfig", + "documentation":"

The configuration settings for the ID mapping table.

" } } }, - "CreatePrivacyBudgetTemplateOutput":{ + "CreateIdNamespaceAssociationOutput":{ + "type":"structure", + "required":["idNamespaceAssociation"], + "members":{ + "idNamespaceAssociation":{ + "shape":"IdNamespaceAssociation", + "documentation":"

The ID namespace association that was created.

" + } + } + }, + "CreateMembershipInput":{ + "type":"structure", + "required":[ + "collaborationIdentifier", + "queryLogStatus" + ], + "members":{ + "collaborationIdentifier":{ + "shape":"CollaborationIdentifier", + "documentation":"

The unique ID for the associated collaboration.

" + }, + "queryLogStatus":{ + "shape":"MembershipQueryLogStatus", + "documentation":"

An indicator as to whether query logging has been enabled or disabled for the membership.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource.

" + }, + "defaultResultConfiguration":{ + "shape":"MembershipProtectedQueryResultConfiguration", + "documentation":"

The default protected query result configuration as specified by the member who can receive results.

" + }, + "paymentConfiguration":{ + "shape":"MembershipPaymentConfiguration", + "documentation":"

The payment responsibilities accepted by the collaboration member.

Not required if the collaboration member has the member ability to run queries.

Required if the collaboration member doesn't have the member ability to run queries but is configured as a payer by the collaboration creator.

" + } + } + }, + "CreateMembershipOutput":{ + "type":"structure", + "required":["membership"], + "members":{ + "membership":{ + "shape":"Membership", + "documentation":"

The membership that was created.

" + } + } + }, + "CreatePrivacyBudgetTemplateInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "autoRefresh", + "privacyBudgetType", + "parameters" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

A unique identifier for one of your memberships for a collaboration. The privacy budget template is created in the collaboration that this membership belongs to. Accepts a membership ID.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "autoRefresh":{ + "shape":"PrivacyBudgetTemplateAutoRefresh", + "documentation":"

How often the privacy budget refreshes.

If you plan to regularly bring new data into the collaboration, you can use CALENDAR_MONTH to automatically get a new privacy budget for the collaboration every calendar month. Choosing this option allows arbitrary amounts of information to be revealed about rows of the data when repeatedly queries across refreshes. Avoid choosing this if the same rows will be repeatedly queried between privacy budget refreshes.

" + }, + "privacyBudgetType":{ + "shape":"PrivacyBudgetType", + "documentation":"

Specifies the type of the privacy budget template.

" + }, + "parameters":{ + "shape":"PrivacyBudgetTemplateParametersInput", + "documentation":"

Specifies your parameters for the privacy budget template.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource.

" + } + } + }, + "CreatePrivacyBudgetTemplateOutput":{ "type":"structure", "required":["privacyBudgetTemplate"], "members":{ @@ -3572,6 +4375,39 @@ }, "documentation":"

An empty response that indicates a successful delete.

" }, + "DeleteConfiguredTableAssociationAnalysisRuleInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "configuredTableAssociationIdentifier", + "analysisRuleType" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "configuredTableAssociationIdentifier":{ + "shape":"ConfiguredTableAssociationIdentifier", + "documentation":"

The identifier for the configured table association that's related to the analysis rule that you want to delete.

", + "location":"uri", + "locationName":"configuredTableAssociationIdentifier" + }, + "analysisRuleType":{ + "shape":"ConfiguredTableAssociationAnalysisRuleType", + "documentation":"

The type of the analysis rule that you want to delete.

", + "location":"uri", + "locationName":"analysisRuleType" + } + } + }, + "DeleteConfiguredTableAssociationAnalysisRuleOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteConfiguredTableAssociationInput":{ "type":"structure", "required":[ @@ -3616,6 +4452,58 @@ }, "documentation":"

The empty output for a successful deletion.

" }, + "DeleteIdMappingTableInput":{ + "type":"structure", + "required":[ + "idMappingTableIdentifier", + "membershipIdentifier" + ], + "members":{ + "idMappingTableIdentifier":{ + "shape":"UUID", + "documentation":"

The unique identifier of the ID mapping table that you want to delete.

", + "location":"uri", + "locationName":"idMappingTableIdentifier" + }, + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership that contains the ID mapping table that you want to delete.

", + "location":"uri", + "locationName":"membershipIdentifier" + } + } + }, + "DeleteIdMappingTableOutput":{ + "type":"structure", + "members":{ + } + }, + "DeleteIdNamespaceAssociationInput":{ + "type":"structure", + "required":[ + "idNamespaceAssociationIdentifier", + "membershipIdentifier" + ], + "members":{ + "idNamespaceAssociationIdentifier":{ + "shape":"IdNamespaceAssociationIdentifier", + "documentation":"

The unique identifier of the ID namespace association that you want to delete.

", + "location":"uri", + "locationName":"idNamespaceAssociationIdentifier" + }, + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership that contains the ID namespace association that you want to delete.

", + "location":"uri", + "locationName":"membershipIdentifier" + } + } + }, + "DeleteIdNamespaceAssociationOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteMemberInput":{ "type":"structure", "required":[ @@ -3939,12 +4827,28 @@ }, "documentation":"

The epsilon and noise parameter values that you want to update in the differential privacy template.

" }, + "DirectAnalysisConfigurationDetails":{ + "type":"structure", + "members":{ + "receiverAccountIds":{ + "shape":"ReceiverAccountIds", + "documentation":"

The account IDs for the member who received the results of a protected query.

" + } + }, + "documentation":"

The direct analysis configuration details.

" + }, "DisplayName":{ "type":"string", "max":100, "min":1, "pattern":"(?!\\s*$)[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t]*" }, + "Document":{ + "type":"structure", + "members":{ + }, + "document":true + }, "Double":{ "type":"double", "box":true @@ -3966,6 +4870,12 @@ "type":"float", "box":true }, + "GenericResourceName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"(?!\\s*$)[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t]*" + }, "GetAnalysisTemplateInput":{ "type":"structure", "required":[ @@ -4059,6 +4969,37 @@ } } }, + "GetCollaborationIdNamespaceAssociationInput":{ + "type":"structure", + "required":[ + "collaborationIdentifier", + "idNamespaceAssociationIdentifier" + ], + "members":{ + "collaborationIdentifier":{ + "shape":"CollaborationIdentifier", + "documentation":"

The unique identifier of the collaboration that contains the ID namespace association that you want to retrieve.

", + "location":"uri", + "locationName":"collaborationIdentifier" + }, + "idNamespaceAssociationIdentifier":{ + "shape":"IdNamespaceAssociationIdentifier", + "documentation":"

The unique identifier of the ID namespace association that you want to retrieve.

", + "location":"uri", + "locationName":"idNamespaceAssociationIdentifier" + } + } + }, + "GetCollaborationIdNamespaceAssociationOutput":{ + "type":"structure", + "required":["collaborationIdNamespaceAssociation"], + "members":{ + "collaborationIdNamespaceAssociation":{ + "shape":"CollaborationIdNamespaceAssociation", + "documentation":"

The ID namespace association that you requested.

" + } + } + }, "GetCollaborationInput":{ "type":"structure", "required":["collaborationIdentifier"], @@ -4174,6 +5115,44 @@ } } }, + "GetConfiguredTableAssociationAnalysisRuleInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "configuredTableAssociationIdentifier", + "analysisRuleType" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "configuredTableAssociationIdentifier":{ + "shape":"ConfiguredTableAssociationIdentifier", + "documentation":"

The identifier for the configured table association that's related to the analysis rule.

", + "location":"uri", + "locationName":"configuredTableAssociationIdentifier" + }, + "analysisRuleType":{ + "shape":"ConfiguredTableAssociationAnalysisRuleType", + "documentation":"

The type of analysis rule that you want to retrieve.

", + "location":"uri", + "locationName":"analysisRuleType" + } + } + }, + "GetConfiguredTableAssociationAnalysisRuleOutput":{ + "type":"structure", + "required":["analysisRule"], + "members":{ + "analysisRule":{ + "shape":"ConfiguredTableAssociationAnalysisRule", + "documentation":"

The analysis rule for the configured table association. In the console, the ConfiguredTableAssociationAnalysisRule is referred to as the collaboration analysis rule.

" + } + } + }, "GetConfiguredTableAssociationInput":{ "type":"structure", "required":[ @@ -4227,6 +5206,68 @@ } } }, + "GetIdMappingTableInput":{ + "type":"structure", + "required":[ + "idMappingTableIdentifier", + "membershipIdentifier" + ], + "members":{ + "idMappingTableIdentifier":{ + "shape":"UUID", + "documentation":"

The unique identifier of the ID mapping table identifier that you want to retrieve.

", + "location":"uri", + "locationName":"idMappingTableIdentifier" + }, + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership that contains the ID mapping table that you want to retrieve.

", + "location":"uri", + "locationName":"membershipIdentifier" + } + } + }, + "GetIdMappingTableOutput":{ + "type":"structure", + "required":["idMappingTable"], + "members":{ + "idMappingTable":{ + "shape":"IdMappingTable", + "documentation":"

The ID mapping table that you requested.

" + } + } + }, + "GetIdNamespaceAssociationInput":{ + "type":"structure", + "required":[ + "idNamespaceAssociationIdentifier", + "membershipIdentifier" + ], + "members":{ + "idNamespaceAssociationIdentifier":{ + "shape":"IdNamespaceAssociationIdentifier", + "documentation":"

The unique identifier of the ID namespace association that you want to retrieve.

", + "location":"uri", + "locationName":"idNamespaceAssociationIdentifier" + }, + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership that contains the ID namespace association that you want to retrieve.

", + "location":"uri", + "locationName":"membershipIdentifier" + } + } + }, + "GetIdNamespaceAssociationOutput":{ + "type":"structure", + "required":["idNamespaceAssociation"], + "members":{ + "idNamespaceAssociation":{ + "shape":"IdNamespaceAssociation", + "documentation":"

The ID namespace association that you requested.

" + } + } + }, "GetMembershipInput":{ "type":"structure", "required":["membershipIdentifier"], @@ -4410,6 +5451,448 @@ }, "documentation":"

A reference to a table within an Glue data catalog.

" }, + "IdMappingConfig":{ + "type":"structure", + "required":["allowUseAsDimensionColumn"], + "members":{ + "allowUseAsDimensionColumn":{ + "shape":"Boolean", + "documentation":"

An indicator as to whether you can use your column as a dimension column in the ID mapping table (TRUE) or not (FALSE).

Default is FALSE.

" + } + }, + "documentation":"

The configuration settings for the ID mapping table.

" + }, + "IdMappingTable":{ + "type":"structure", + "required":[ + "id", + "arn", + "inputReferenceConfig", + "membershipId", + "membershipArn", + "collaborationId", + "collaborationArn", + "name", + "createTime", + "updateTime", + "inputReferenceProperties" + ], + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of the ID mapping table.

" + }, + "arn":{ + "shape":"IdMappingTableArn", + "documentation":"

The Amazon Resource Name (ARN) of the ID mapping table.

" + }, + "inputReferenceConfig":{ + "shape":"IdMappingTableInputReferenceConfig", + "documentation":"

The input reference configuration for the ID mapping table.

" + }, + "membershipId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the membership resource for the ID mapping table.

" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"

The Amazon Resource Name (ARN) of the membership resource for the ID mapping table.

" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the collaboration that contains this ID mapping table.

" + }, + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"

The Amazon Resource Name (ARN) of the collaboration that contains this ID mapping table.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the ID mapping table.

" + }, + "name":{ + "shape":"ResourceAlias", + "documentation":"

The name of the ID mapping table.

" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the ID mapping table was created.

" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"

The most recent time at which the ID mapping table was updated.

" + }, + "inputReferenceProperties":{ + "shape":"IdMappingTableInputReferenceProperties", + "documentation":"

The input reference properties for the ID mapping table.

" + }, + "kmsKeyArn":{ + "shape":"KMSKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services KMS key.

" + } + }, + "documentation":"

Describes information about the ID mapping table.

" + }, + "IdMappingTableArn":{ + "type":"string", + "max":200, + "min":0, + "pattern":"arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/idmappingtable/[\\d\\w-]+" + }, + "IdMappingTableInputReferenceArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:(aws|aws-us-gov|aws-cn):entityresolution:.*:[0-9]+:(idmappingworkflow/.*)" + }, + "IdMappingTableInputReferenceConfig":{ + "type":"structure", + "required":[ + "inputReferenceArn", + "manageResourcePolicies" + ], + "members":{ + "inputReferenceArn":{ + "shape":"IdMappingTableInputReferenceArn", + "documentation":"

The Amazon Resource Name (ARN) of the referenced resource in Entity Resolution. Valid values are ID mapping workflow ARNs.

" + }, + "manageResourcePolicies":{ + "shape":"Boolean", + "documentation":"

When TRUE, Clean Rooms manages permissions for the ID mapping table resource.

When FALSE, the resource owner manages permissions for the ID mapping table resource.

" + } + }, + "documentation":"

Provides the input reference configuration for the ID mapping table.

" + }, + "IdMappingTableInputReferenceProperties":{ + "type":"structure", + "required":["idMappingTableInputSource"], + "members":{ + "idMappingTableInputSource":{ + "shape":"IdMappingTableInputSourceList", + "documentation":"

The input source of the ID mapping table.

" + } + }, + "documentation":"

The input reference properties for the ID mapping table.

" + }, + "IdMappingTableInputSource":{ + "type":"structure", + "required":[ + "idNamespaceAssociationId", + "type" + ], + "members":{ + "idNamespaceAssociationId":{ + "shape":"String", + "documentation":"

The unique identifier of the ID namespace association.

" + }, + "type":{ + "shape":"IdNamespaceType", + "documentation":"

The type of the input source of the ID mapping table.

" + } + }, + "documentation":"

The input source of the ID mapping table.

" + }, + "IdMappingTableInputSourceList":{ + "type":"list", + "member":{"shape":"IdMappingTableInputSource"}, + "max":2, + "min":2 + }, + "IdMappingTableSchemaTypeProperties":{ + "type":"structure", + "required":["idMappingTableInputSource"], + "members":{ + "idMappingTableInputSource":{ + "shape":"IdMappingTableInputSourceList", + "documentation":"

Defines which ID namespace associations are used to create the ID mapping table.

" + } + }, + "documentation":"

Additional properties that are specific to the type of the associated schema.

" + }, + "IdMappingTableSummary":{ + "type":"structure", + "required":[ + "collaborationArn", + "collaborationId", + "membershipId", + "membershipArn", + "createTime", + "updateTime", + "id", + "arn", + "inputReferenceConfig", + "name" + ], + "members":{ + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"

The Amazon Resource Name (ARN) of the collaboration that contains this ID mapping table.

" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the collaboration that contains this ID mapping table.

" + }, + "membershipId":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership resource for this ID mapping table.

" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"

The Amazon Resource Name (ARN) of the membership resource for this ID mapping table.

" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"

The time at which this ID mapping table was created.

" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"

The most recent time at which this ID mapping table was updated.

" + }, + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of this ID mapping table.

" + }, + "arn":{ + "shape":"IdMappingTableArn", + "documentation":"

The Amazon Resource Name (ARN) of this ID mapping table.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of this ID mapping table.

" + }, + "inputReferenceConfig":{ + "shape":"IdMappingTableInputReferenceConfig", + "documentation":"

The input reference configuration for the ID mapping table.

" + }, + "name":{ + "shape":"ResourceAlias", + "documentation":"

The name of this ID mapping table.

" + } + }, + "documentation":"

Detailed information about the ID mapping table.

" + }, + "IdMappingTableSummaryList":{ + "type":"list", + "member":{"shape":"IdMappingTableSummary"} + }, + "IdMappingWorkflowsSupported":{ + "type":"list", + "member":{"shape":"Document"} + }, + "IdNamespaceAssociation":{ + "type":"structure", + "required":[ + "id", + "arn", + "membershipId", + "membershipArn", + "collaborationId", + "collaborationArn", + "name", + "createTime", + "updateTime", + "inputReferenceConfig", + "inputReferenceProperties" + ], + "members":{ + "id":{ + "shape":"IdNamespaceAssociationIdentifier", + "documentation":"

The unique identifier for this ID namespace association.

" + }, + "arn":{ + "shape":"IdNamespaceAssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of the ID namespace association.

" + }, + "membershipId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the membership resource for this ID namespace association.

" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"

The Amazon Resource Name (ARN) of the membership resource for this ID namespace association.

" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the collaboration that contains this ID namespace association.

" + }, + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"

The Amazon Resource Name (ARN) of the collaboration that contains this ID namespace association.

" + }, + "name":{ + "shape":"GenericResourceName", + "documentation":"

The name of this ID namespace association.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the ID namespace association.

" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the ID namespace association was created.

" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"

The most recent time at which the ID namespace association was updated.

" + }, + "inputReferenceConfig":{ + "shape":"IdNamespaceAssociationInputReferenceConfig", + "documentation":"

The input reference configuration for the ID namespace association.

" + }, + "inputReferenceProperties":{ + "shape":"IdNamespaceAssociationInputReferenceProperties", + "documentation":"

The input reference properties for the ID namespace association.

" + }, + "idMappingConfig":{ + "shape":"IdMappingConfig", + "documentation":"

The configuration settings for the ID mapping table.

" + } + }, + "documentation":"

Provides information to create the ID namespace association.

" + }, + "IdNamespaceAssociationArn":{ + "type":"string", + "max":256, + "min":0, + "pattern":"arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/idnamespaceassociation/[\\d\\w-]+" + }, + "IdNamespaceAssociationIdentifier":{ + "type":"string", + "max":36, + "min":36, + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + "IdNamespaceAssociationInputReferenceArn":{ + "type":"string", + "max":256, + "min":0, + "pattern":"arn:aws:entityresolution:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:idnamespace/[\\d\\w-]+" + }, + "IdNamespaceAssociationInputReferenceConfig":{ + "type":"structure", + "required":[ + "inputReferenceArn", + "manageResourcePolicies" + ], + "members":{ + "inputReferenceArn":{ + "shape":"IdNamespaceAssociationInputReferenceArn", + "documentation":"

The Amazon Resource Name (ARN) of the Entity Resolution resource that is being associated to the collaboration. Valid resource ARNs are from the ID namespaces that you own.

" + }, + "manageResourcePolicies":{ + "shape":"Boolean", + "documentation":"

When TRUE, Clean Rooms manages permissions for the ID namespace association resource.

When FALSE, the resource owner manages permissions for the ID namespace association resource.

" + } + }, + "documentation":"

Provides the information for the ID namespace association input reference configuration.

" + }, + "IdNamespaceAssociationInputReferenceProperties":{ + "type":"structure", + "required":[ + "idNamespaceType", + "idMappingWorkflowsSupported" + ], + "members":{ + "idNamespaceType":{ + "shape":"IdNamespaceType", + "documentation":"

The ID namespace type for this ID namespace association.

" + }, + "idMappingWorkflowsSupported":{ + "shape":"IdMappingWorkflowsSupported", + "documentation":"

Defines how ID mapping workflows are supported for this ID namespace association.

" + } + }, + "documentation":"

Provides the information for the ID namespace association input reference properties.

" + }, + "IdNamespaceAssociationInputReferencePropertiesSummary":{ + "type":"structure", + "required":["idNamespaceType"], + "members":{ + "idNamespaceType":{ + "shape":"IdNamespaceType", + "documentation":"

The ID namespace type for this ID namespace association.

" + } + }, + "documentation":"

Detailed information about the ID namespace association input reference properties.

" + }, + "IdNamespaceAssociationSummary":{ + "type":"structure", + "required":[ + "membershipId", + "membershipArn", + "collaborationArn", + "collaborationId", + "createTime", + "updateTime", + "id", + "arn", + "inputReferenceConfig", + "name", + "inputReferenceProperties" + ], + "members":{ + "membershipId":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership resource for this ID namespace association.

" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"

The Amazon Resource Name (ARN) of the membership resource for this ID namespace association.

" + }, + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"

The Amazon Resource Name (ARN) of the collaboration that contains this ID namespace association.

" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the collaboration that contains this ID namespace association.

" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"

The time at which this ID namespace association was created.

" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"

The most recent time at which this ID namespace association has been updated.

" + }, + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of this ID namespace association.

" + }, + "arn":{ + "shape":"IdNamespaceAssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of this ID namespace association.

" + }, + "inputReferenceConfig":{ + "shape":"IdNamespaceAssociationInputReferenceConfig", + "documentation":"

The input reference configuration details for this ID namespace association.

" + }, + "name":{ + "shape":"GenericResourceName", + "documentation":"

The name of the ID namespace association.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the ID namespace association.

" + }, + "inputReferenceProperties":{ + "shape":"IdNamespaceAssociationInputReferencePropertiesSummary", + "documentation":"

The input reference properties for this ID namespace association.

" + } + }, + "documentation":"

Detailed information about the ID namespace association.

" + }, + "IdNamespaceAssociationSummaryList":{ + "type":"list", + "member":{"shape":"IdNamespaceAssociationSummary"} + }, + "IdNamespaceType":{ + "type":"string", + "enum":[ + "SOURCE", + "TARGET" + ] + }, "InternalServerException":{ "type":"structure", "members":{ @@ -4437,6 +5920,12 @@ "type":"string", "enum":["QUERY_RUNNER"] }, + "KMSKeyArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws:kms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:key/[a-zA-Z0-9-]+" + }, "KeyPrefix":{ "type":"string", "max":512, @@ -4557,6 +6046,44 @@ } } }, + "ListCollaborationIdNamespaceAssociationsInput":{ + "type":"structure", + "required":["collaborationIdentifier"], + "members":{ + "collaborationIdentifier":{ + "shape":"CollaborationIdentifier", + "documentation":"

The unique identifier of the collaboration that contains the ID namespace associations that you want to retrieve.

", + "location":"uri", + "locationName":"collaborationIdentifier" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The pagination token that's used to fetch the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.>

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListCollaborationIdNamespaceAssociationsOutput":{ + "type":"structure", + "required":["collaborationIdNamespaceAssociationSummaries"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token value provided to access the next page of results.

" + }, + "collaborationIdNamespaceAssociationSummaries":{ + "shape":"CollaborationIdNamespaceAssociationSummaryList", + "documentation":"

The summary information of the collaboration ID namespace associations that you requested.

" + } + } + }, "ListCollaborationPrivacyBudgetTemplatesInput":{ "type":"structure", "required":["collaborationIdentifier"], @@ -4786,6 +6313,82 @@ } } }, + "ListIdMappingTablesInput":{ + "type":"structure", + "required":["membershipIdentifier"], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership that contains the ID mapping tables that you want to view.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The pagination token that's used to fetch the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListIdMappingTablesOutput":{ + "type":"structure", + "required":["idMappingTableSummaries"], + "members":{ + "idMappingTableSummaries":{ + "shape":"IdMappingTableSummaryList", + "documentation":"

The summary information of the ID mapping tables that you requested.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token value provided to access the next page of results.

" + } + } + }, + "ListIdNamespaceAssociationsInput":{ + "type":"structure", + "required":["membershipIdentifier"], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership that contains the ID namespace association that you want to view.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The pagination token that's used to fetch the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListIdNamespaceAssociationsOutput":{ + "type":"structure", + "required":["idNamespaceAssociationSummaries"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token value provided to access the next page of results.

" + }, + "idNamespaceAssociationSummaries":{ + "shape":"IdNamespaceAssociationSummaryList", + "documentation":"

The summary information of the ID namespace associations that you requested.

" + } + } + }, "ListMembersInput":{ "type":"structure", "required":["collaborationIdentifier"], @@ -5447,6 +7050,37 @@ }, "documentation":"

An object representing the collaboration member's payment responsibilities set by the collaboration creator.

" }, + "PopulateIdMappingTableInput":{ + "type":"structure", + "required":[ + "idMappingTableIdentifier", + "membershipIdentifier" + ], + "members":{ + "idMappingTableIdentifier":{ + "shape":"UUID", + "documentation":"

The unique identifier of the ID mapping table that you want to populate.

", + "location":"uri", + "locationName":"idMappingTableIdentifier" + }, + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership that contains the ID mapping table that you want to populate.

", + "location":"uri", + "locationName":"membershipIdentifier" + } + } + }, + "PopulateIdMappingTableOutput":{ + "type":"structure", + "required":["idMappingJobId"], + "members":{ + "idMappingJobId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the mapping job that will populate the ID mapping table.

" + } + } + }, "PreviewPrivacyImpactInput":{ "type":"structure", "required":[ @@ -5623,7 +7257,7 @@ }, "parameters":{ "shape":"PrivacyBudgetTemplateParametersOutput", - "documentation":"

Specifies the epislon and noise parameters for the privacy budget template.

" + "documentation":"

Specifies the epsilon and noise parameters for the privacy budget template.

" } }, "documentation":"

An object that defines the privacy budget template.

" @@ -5833,6 +7467,17 @@ "min":36, "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" }, + "ProtectedQueryMemberOutputConfiguration":{ + "type":"structure", + "required":["accountId"], + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

The unique identifier for the account.

" + } + }, + "documentation":"

Contains configuration details for the protected query member output.

" + }, "ProtectedQueryMemberOutputList":{ "type":"list", "member":{"shape":"ProtectedQuerySingleMemberOutput"} @@ -5857,7 +7502,11 @@ "members":{ "s3":{ "shape":"ProtectedQueryS3OutputConfiguration", - "documentation":"

Required configuration for a protected query with an `S3` output type.

" + "documentation":"

Required configuration for a protected query with an s3 output type.

" + }, + "member":{ + "shape":"ProtectedQueryMemberOutputConfiguration", + "documentation":"

Required configuration for a protected query with a member output type.

" } }, "documentation":"

Contains configuration details for protected query output.

", @@ -5945,7 +7594,7 @@ }, "ProtectedQuerySQLParametersQueryStringString":{ "type":"string", - "max":90000, + "max":500000, "min":0 }, "ProtectedQuerySingleMemberOutput":{ @@ -5964,7 +7613,7 @@ "members":{ "totalDurationInMillis":{ "shape":"Long", - "documentation":"

The duration of the Protected Query, from creation until query completion.

" + "documentation":"

The duration of the protected query, from creation until query completion.

" } }, "documentation":"

Contains statistics about the execution of the protected query.

" @@ -5988,7 +7637,8 @@ "membershipId", "membershipArn", "createTime", - "status" + "status", + "receiverConfigurations" ], "members":{ "id":{ @@ -6010,6 +7660,10 @@ "status":{ "shape":"ProtectedQueryStatus", "documentation":"

The status of the protected query. Value values are `SUBMITTED`, `STARTED`, `CANCELLED`, `CANCELLING`, `FAILED`, `SUCCESS`, `TIMED_OUT`.

" + }, + "receiverConfigurations":{ + "shape":"ReceiverConfigurationsList", + "documentation":"

The receiver configuration.

" } }, "documentation":"

The protected query summary for the objects listed by the request.

" @@ -6033,10 +7687,60 @@ }, "documentation":"

An object representing the collaboration member's payment responsibilities set by the collaboration creator for query compute costs.

" }, + "QueryConstraint":{ + "type":"structure", + "members":{ + "requireOverlap":{ + "shape":"QueryConstraintRequireOverlap", + "documentation":"

An array of column names that specifies which columns are required in the JOIN statement.

" + } + }, + "documentation":"

Provides any necessary query constraint information.

", + "union":true + }, + "QueryConstraintList":{ + "type":"list", + "member":{"shape":"QueryConstraint"}, + "max":1, + "min":0 + }, + "QueryConstraintRequireOverlap":{ + "type":"structure", + "members":{ + "columns":{ + "shape":"AnalysisRuleColumnList", + "documentation":"

The columns that are required to overlap.

" + } + }, + "documentation":"

Provides the name of the columns that are required to overlap.

" + }, "QueryTables":{ "type":"list", "member":{"shape":"TableAlias"} }, + "ReceiverAccountIds":{ + "type":"list", + "member":{"shape":"AccountId"} + }, + "ReceiverConfiguration":{ + "type":"structure", + "required":["analysisType"], + "members":{ + "analysisType":{ + "shape":"AnalysisType", + "documentation":"

The type of analysis for the protected query. The results of the query can be analyzed directly (DIRECT_ANALYSIS) or used as input into additional analyses (ADDITIONAL_ANALYSIS), such as a query that is a seed for a lookalike ML model.

" + }, + "configurationDetails":{ + "shape":"ConfigurationDetails", + "documentation":"

The configuration details of the receiver configuration.

" + } + }, + "documentation":"

The receiver configuration for a protected query.

" + }, + "ReceiverConfigurationsList":{ + "type":"list", + "member":{"shape":"ReceiverConfiguration"} + }, "ResourceAlias":{ "type":"string", "max":128, @@ -6197,6 +7901,10 @@ "schemaStatusDetails":{ "shape":"SchemaStatusDetailList", "documentation":"

Details about the status of the schema. Currently, only one entry is present.

" + }, + "schemaTypeProperties":{ + "shape":"SchemaTypeProperties", + "documentation":"

The schema type properties.

" } }, "documentation":"

A schema is a relation within a collaboration.

" @@ -6254,11 +7962,14 @@ }, "SchemaStatusDetail":{ "type":"structure", - "required":["status"], + "required":[ + "status", + "analysisType" + ], "members":{ "status":{ "shape":"SchemaStatus", - "documentation":"

The status of the schema.

" + "documentation":"

The status of the schema, indicating if it is ready to query.

" }, "reasons":{ "shape":"SchemaStatusReasonList", @@ -6271,6 +7982,10 @@ "configurations":{ "shape":"SchemaConfigurationList", "documentation":"

The configuration details of the schema analysis rule for the given type.

" + }, + "analysisType":{ + "shape":"AnalysisType", + "documentation":"

The type of analysis that can be performed on the schema.

A schema can have an analysisType of DIRECT_ANALYSIS, ADDITIONAL_ANALYSIS_FOR_AUDIENCE_GENERATION, or both.

" } }, "documentation":"

Information about the schema status.

A status of READY means that based on the schema analysis rule, queries of the given analysis rule type are properly configured to run queries on this schema.

" @@ -6303,7 +8018,14 @@ "ANALYSIS_RULE_MISSING", "ANALYSIS_TEMPLATES_NOT_CONFIGURED", "ANALYSIS_PROVIDERS_NOT_CONFIGURED", - "DIFFERENTIAL_PRIVACY_POLICY_NOT_CONFIGURED" + "DIFFERENTIAL_PRIVACY_POLICY_NOT_CONFIGURED", + "ID_MAPPING_TABLE_NOT_POPULATED", + "COLLABORATION_ANALYSIS_RULE_NOT_CONFIGURED", + "ADDITIONAL_ANALYSES_NOT_CONFIGURED", + "RESULT_RECEIVERS_NOT_CONFIGURED", + "ADDITIONAL_ANALYSES_NOT_ALLOWED", + "RESULT_RECEIVERS_NOT_ALLOWED", + "ANALYSIS_RULE_TYPES_NOT_COMPATIBLE" ] }, "SchemaStatusReasonList":{ @@ -6368,7 +8090,21 @@ }, "SchemaType":{ "type":"string", - "enum":["TABLE"] + "enum":[ + "TABLE", + "ID_MAPPING_TABLE" + ] + }, + "SchemaTypeProperties":{ + "type":"structure", + "members":{ + "idMappingTable":{ + "shape":"IdMappingTableSchemaTypeProperties", + "documentation":"

The ID mapping table for the schema type properties.

" + } + }, + "documentation":"

Information about the schema type properties.

", + "union":true }, "ServiceQuotaExceededException":{ "type":"structure", @@ -6698,6 +8434,49 @@ } } }, + "UpdateConfiguredTableAssociationAnalysisRuleInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "configuredTableAssociationIdentifier", + "analysisRuleType", + "analysisRulePolicy" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

A unique identifier for the membership that the configured table association belongs to. Currently accepts the membership ID.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "configuredTableAssociationIdentifier":{ + "shape":"ConfiguredTableAssociationIdentifier", + "documentation":"

The identifier for the configured table association to update.

", + "location":"uri", + "locationName":"configuredTableAssociationIdentifier" + }, + "analysisRuleType":{ + "shape":"ConfiguredTableAssociationAnalysisRuleType", + "documentation":"

The analysis rule type that you want to update.

", + "location":"uri", + "locationName":"analysisRuleType" + }, + "analysisRulePolicy":{ + "shape":"ConfiguredTableAssociationAnalysisRulePolicy", + "documentation":"

The updated analysis rule policy for the configured table association.

" + } + } + }, + "UpdateConfiguredTableAssociationAnalysisRuleOutput":{ + "type":"structure", + "required":["analysisRule"], + "members":{ + "analysisRule":{ + "shape":"ConfiguredTableAssociationAnalysisRule", + "documentation":"

The updated analysis rule for the configured table association. In the console, the ConfiguredTableAssociationAnalysisRule is referred to as the collaboration analysis rule.

" + } + } + }, "UpdateConfiguredTableAssociationInput":{ "type":"structure", "required":[ @@ -6767,6 +8546,88 @@ } } }, + "UpdateIdMappingTableInput":{ + "type":"structure", + "required":[ + "idMappingTableIdentifier", + "membershipIdentifier" + ], + "members":{ + "idMappingTableIdentifier":{ + "shape":"UUID", + "documentation":"

The unique identifier of the ID mapping table that you want to update.

", + "location":"uri", + "locationName":"idMappingTableIdentifier" + }, + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership that contains the ID mapping table that you want to update.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

A new description for the ID mapping table.

" + }, + "kmsKeyArn":{ + "shape":"KMSKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services KMS key.

" + } + } + }, + "UpdateIdMappingTableOutput":{ + "type":"structure", + "required":["idMappingTable"], + "members":{ + "idMappingTable":{ + "shape":"IdMappingTable", + "documentation":"

The updated ID mapping table.

" + } + } + }, + "UpdateIdNamespaceAssociationInput":{ + "type":"structure", + "required":[ + "idNamespaceAssociationIdentifier", + "membershipIdentifier" + ], + "members":{ + "idNamespaceAssociationIdentifier":{ + "shape":"IdNamespaceAssociationIdentifier", + "documentation":"

The unique identifier of the ID namespace association that you want to update.

", + "location":"uri", + "locationName":"idNamespaceAssociationIdentifier" + }, + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The unique identifier of the membership that contains the ID namespace association that you want to update.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "name":{ + "shape":"GenericResourceName", + "documentation":"

A new name for the ID namespace association.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

A new description for the ID namespace association.

" + }, + "idMappingConfig":{ + "shape":"IdMappingConfig", + "documentation":"

The configuration settings for the ID mapping table.

" + } + } + }, + "UpdateIdNamespaceAssociationOutput":{ + "type":"structure", + "required":["idNamespaceAssociation"], + "members":{ + "idNamespaceAssociation":{ + "shape":"IdNamespaceAssociation", + "documentation":"

The updated ID namespace association.

" + } + } + }, "UpdateMembershipInput":{ "type":"structure", "required":["membershipIdentifier"], diff --git a/botocore/data/cleanroomsml/2023-09-06/service-2.json b/botocore/data/cleanroomsml/2023-09-06/service-2.json index 65ffe86aeb..7a9e69e30b 100644 --- a/botocore/data/cleanroomsml/2023-09-06/service-2.json +++ b/botocore/data/cleanroomsml/2023-09-06/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2023-09-06", + "auth":["aws.auth#sigv4"], "endpointPrefix":"cleanrooms-ml", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Clean Rooms ML", "serviceId":"CleanRoomsML", "signatureVersion":"v4", @@ -446,7 +447,13 @@ "type":"string", "max":12, "min":12, - "pattern":"^[0-9]{12}$" + "pattern":"[0-9]{12}" + }, + "AnalysisTemplateArn":{ + "type":"string", + "max":200, + "min":0, + "pattern":"arn:aws[-a-z]*:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/analysistemplate/[\\d\\w-]+" }, "AudienceDestination":{ "type":"structure", @@ -475,43 +482,43 @@ "AudienceExportJobSummary":{ "type":"structure", "required":[ - "audienceGenerationJobArn", - "audienceSize", "createTime", + "updateTime", "name", - "status", - "updateTime" + "audienceGenerationJobArn", + "audienceSize", + "status" ], "members":{ - "audienceGenerationJobArn":{ - "shape":"AudienceGenerationJobArn", - "documentation":"

The Amazon Resource Name (ARN) of the audience generation job that was exported.

" - }, - "audienceSize":{"shape":"AudienceSize"}, "createTime":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The time at which the audience export job was created.

" }, - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the audience export job.

" + "updateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The most recent time at which the audience export job was updated.

" }, "name":{ "shape":"NameString", "documentation":"

The name of the audience export job.

" }, - "outputLocation":{ - "shape":"S3Path", - "documentation":"

The Amazon S3 bucket where the audience export is stored.

" + "audienceGenerationJobArn":{ + "shape":"AudienceGenerationJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the audience generation job that was exported.

" + }, + "audienceSize":{"shape":"AudienceSize"}, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the audience export job.

" }, "status":{ "shape":"AudienceExportJobStatus", "documentation":"

The status of the audience export job.

" }, "statusDetails":{"shape":"StatusDetails"}, - "updateTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The most recent time at which the audience export job was updated.

" + "outputLocation":{ + "shape":"S3Path", + "documentation":"

The Amazon S3 bucket where the audience export is stored.

" } }, "documentation":"

Provides information about the audience export job.

" @@ -520,14 +527,11 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:audience-generation-job/[-a-zA-Z0-9_/.]+$" + "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:audience-generation-job/[-a-zA-Z0-9_/.]+" }, "AudienceGenerationJobDataSource":{ "type":"structure", - "required":[ - "dataSource", - "roleArn" - ], + "required":["roleArn"], "members":{ "dataSource":{ "shape":"S3ConfigMap", @@ -535,7 +539,11 @@ }, "roleArn":{ "shape":"IamRoleArn", - "documentation":"

The ARN of the IAM role that can read the Amazon S3 bucket where the training data is stored.

" + "documentation":"

The ARN of the IAM role that can read the Amazon S3 bucket where the seed audience is stored.

" + }, + "sqlParameters":{ + "shape":"ProtectedQuerySQLParameters", + "documentation":"

The protected SQL query parameters.

" } }, "documentation":"

Defines the Amazon S3 bucket where the seed audience for the generating audience is stored.

" @@ -559,49 +567,49 @@ "AudienceGenerationJobSummary":{ "type":"structure", "required":[ - "audienceGenerationJobArn", - "configuredAudienceModelArn", "createTime", + "updateTime", + "audienceGenerationJobArn", "name", "status", - "updateTime" + "configuredAudienceModelArn" ], "members":{ - "audienceGenerationJobArn":{ - "shape":"AudienceGenerationJobArn", - "documentation":"

The Amazon Resource Name (ARN) of the audience generation job.

" - }, - "collaborationId":{ - "shape":"UUID", - "documentation":"

The identifier of the collaboration that contains this audience generation job.

" - }, - "configuredAudienceModelArn":{ - "shape":"ConfiguredAudienceModelArn", - "documentation":"

The Amazon Resource Name (ARN) of the configured audience model that was used for this audience generation job.

" - }, "createTime":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The time at which the audience generation job was created.

" }, - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the audience generation job.

" + "updateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The most recent time at which the audience generation job was updated.

" + }, + "audienceGenerationJobArn":{ + "shape":"AudienceGenerationJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the audience generation job.

" }, "name":{ "shape":"NameString", "documentation":"

The name of the audience generation job.

" }, - "startedBy":{ - "shape":"AccountId", - "documentation":"

The AWS Account that submitted the job.

" + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the audience generation job.

" }, "status":{ "shape":"AudienceGenerationJobStatus", "documentation":"

The status of the audience generation job.

" }, - "updateTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The most recent time at which the audience generation job was updated.

" + "configuredAudienceModelArn":{ + "shape":"ConfiguredAudienceModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the configured audience model that was used for this audience generation job.

" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

The identifier of the collaboration that contains this audience generation job.

" + }, + "startedBy":{ + "shape":"AccountId", + "documentation":"

The AWS Account that submitted the job.

" } }, "documentation":"

Provides information about the configured audience generation job.

" @@ -610,7 +618,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:audience-model/[-a-zA-Z0-9_/.]+$" + "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:audience-model/[-a-zA-Z0-9_/.]+" }, "AudienceModelList":{ "type":"list", @@ -631,41 +639,41 @@ "AudienceModelSummary":{ "type":"structure", "required":[ - "audienceModelArn", "createTime", + "updateTime", + "audienceModelArn", "name", - "status", "trainingDatasetArn", - "updateTime" + "status" ], "members":{ - "audienceModelArn":{ - "shape":"AudienceModelArn", - "documentation":"

The Amazon Resource Name (ARN) of the audience model.

" - }, "createTime":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The time at which the audience model was created.

" }, - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the audience model.

" + "updateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The most recent time at which the audience model was updated.

" + }, + "audienceModelArn":{ + "shape":"AudienceModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the audience model.

" }, "name":{ "shape":"NameString", "documentation":"

The name of the audience model.

" }, - "status":{ - "shape":"AudienceModelStatus", - "documentation":"

The status of the audience model.

" - }, "trainingDatasetArn":{ "shape":"TrainingDatasetArn", "documentation":"

The Amazon Resource Name (ARN) of the training dataset that was used for the audience model.

" }, - "updateTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The most recent time at which the audience model was updated.

" + "status":{ + "shape":"AudienceModelStatus", + "documentation":"

The status of the audience model.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the audience model.

" } }, "documentation":"

Information about the audience model.

" @@ -674,13 +682,13 @@ "type":"structure", "required":["relevanceMetrics"], "members":{ - "recallMetric":{ - "shape":"Double", - "documentation":"

The recall score of the generated audience. Recall is the percentage of the most similar users (by default, the most similar 20%) from a sample of the training data that are included in the seed audience by the audience generation job. Values range from 0-1, larger values indicate a better audience. A recall value approximately equal to the maximum bin size indicates that the audience model is equivalent to random selection.

" - }, "relevanceMetrics":{ "shape":"RelevanceMetrics", "documentation":"

The relevance scores of the generated audience.

" + }, + "recallMetric":{ + "shape":"Double", + "documentation":"

The recall score of the generated audience. Recall is the percentage of the most similar users (by default, the most similar 20%) from a sample of the training data that are included in the seed audience by the audience generation job. Values range from 0-1, larger values indicate a better audience. A recall value approximately equal to the maximum bin size indicates that the audience model is equivalent to random selection.

" } }, "documentation":"

Metrics that describe the quality of the generated audience.

" @@ -712,20 +720,20 @@ "AudienceSizeConfig":{ "type":"structure", "required":[ - "audienceSizeBins", - "audienceSizeType" + "audienceSizeType", + "audienceSizeBins" ], "members":{ - "audienceSizeBins":{ - "shape":"AudienceSizeBins", - "documentation":"

An array of the different audience output sizes.

" - }, "audienceSizeType":{ "shape":"AudienceSizeType", "documentation":"

Whether the audience output sizes are defined as an absolute number or a percentage.

" + }, + "audienceSizeBins":{ + "shape":"AudienceSizeBins", + "documentation":"

An array of the different audience output sizes.

" } }, - "documentation":"

Configure the list of audience output sizes that can be created. A request to StartAudienceGenerationJob that uses this configured audience model must have an audienceSize selected from this list. You can use the ABSOLUTE AudienceSize to configure out audience sizes using the count of identifiers in the output. You can use the Percentage AudienceSize to configure sizes in the range 1-100 percent.

" + "documentation":"

Returns the relevance scores at these audience sizes when used in the GetAudienceGenerationJob for a specified audience generation job and configured audience model.

Specifies the list of allowed audienceSize values when used in the StartAudienceExportJob for an audience generation job. You can use the ABSOLUTE AudienceSize to configure out audience sizes using the count of identifiers in the output. You can use the Percentage AudienceSize to configure sizes in the range 1-100 percent.

" }, "AudienceSizeType":{ "type":"string", @@ -748,7 +756,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z0-9_](([a-zA-Z0-9_ ]+-)*([a-zA-Z0-9_ ]+))?$" + "pattern":"[a-zA-Z0-9_](([a-zA-Z0-9_ ]+-)*([a-zA-Z0-9_ ]+))?" }, "ColumnSchema":{ "type":"structure", @@ -788,7 +796,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:configured-audience-model/[-a-zA-Z0-9_/.]+$" + "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:configured-audience-model/[-a-zA-Z0-9_/.]+" }, "ConfiguredAudienceModelList":{ "type":"list", @@ -816,46 +824,46 @@ "ConfiguredAudienceModelSummary":{ "type":"structure", "required":[ - "audienceModelArn", - "configuredAudienceModelArn", "createTime", + "updateTime", "name", + "audienceModelArn", "outputConfig", - "status", - "updateTime" + "configuredAudienceModelArn", + "status" ], "members":{ - "audienceModelArn":{ - "shape":"AudienceModelArn", - "documentation":"

The Amazon Resource Name (ARN) of the audience model that was used to create the configured audience model.

" - }, - "configuredAudienceModelArn":{ - "shape":"ConfiguredAudienceModelArn", - "documentation":"

The Amazon Resource Name (ARN) of the configured audience model that you are interested in.

" - }, "createTime":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The time at which the configured audience model was created.

" }, - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the configured audience model.

" + "updateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The most recent time at which the configured audience model was updated.

" }, "name":{ "shape":"NameString", "documentation":"

The name of the configured audience model.

" }, + "audienceModelArn":{ + "shape":"AudienceModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the audience model that was used to create the configured audience model.

" + }, "outputConfig":{ "shape":"ConfiguredAudienceModelOutputConfig", "documentation":"

The output configuration of the configured audience model.

" }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the configured audience model.

" + }, + "configuredAudienceModelArn":{ + "shape":"ConfiguredAudienceModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the configured audience model that you are interested in.

" + }, "status":{ "shape":"ConfiguredAudienceModelStatus", "documentation":"

The status of the configured audience model.

" - }, - "updateTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The most recent time at which the configured audience model was updated.

" } }, "documentation":"

Information about the configured audience model.

" @@ -880,33 +888,33 @@ "trainingDatasetArn" ], "members":{ - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the audience model.

" + "trainingDataStartTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The start date and time of the training window.

" }, - "kmsKeyArn":{ - "shape":"KmsKeyArn", - "documentation":"

The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the trained ML model and the associated data.

" + "trainingDataEndTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The end date and time of the training window.

" }, "name":{ "shape":"NameString", "documentation":"

The name of the audience model resource.

" }, + "trainingDatasetArn":{ + "shape":"TrainingDatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the training dataset for this audience model.

" + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the trained ML model and the associated data.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50.

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8.

  • Maximum value length - 256 Unicode characters in UTF-8.

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

" }, - "trainingDataEndTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The end date and time of the training window.

" - }, - "trainingDataStartTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The start date and time of the training window.

" - }, - "trainingDatasetArn":{ - "shape":"TrainingDatasetArn", - "documentation":"

The Amazon Resource Name (ARN) of the training dataset for this audience model.

" + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the audience model.

" } } }, @@ -923,47 +931,47 @@ "CreateConfiguredAudienceModelRequest":{ "type":"structure", "required":[ - "audienceModelArn", "name", + "audienceModelArn", "outputConfig", "sharedAudienceMetrics" ], "members":{ + "name":{ + "shape":"NameString", + "documentation":"

The name of the configured audience model.

" + }, "audienceModelArn":{ "shape":"AudienceModelArn", "documentation":"

The Amazon Resource Name (ARN) of the audience model to use for the configured audience model.

" }, - "audienceSizeConfig":{ - "shape":"AudienceSizeConfig", - "documentation":"

Configure the list of output sizes of audiences that can be created using this configured audience model. A request to StartAudienceGenerationJob that uses this configured audience model must have an audienceSize selected from this list. You can use the ABSOLUTE AudienceSize to configure out audience sizes using the count of identifiers in the output. You can use the Percentage AudienceSize to configure sizes in the range 1-100 percent.

" - }, - "childResourceTagOnCreatePolicy":{ - "shape":"TagOnCreatePolicy", - "documentation":"

Configure how the service tags audience generation jobs created using this configured audience model. If you specify NONE, the tags from the StartAudienceGenerationJob request determine the tags of the audience generation job. If you specify FROM_PARENT_RESOURCE, the audience generation job inherits the tags from the configured audience model, by default. Tags in the StartAudienceGenerationJob will override the default.

When the client is in a different account than the configured audience model, the tags from the client are never applied to a resource in the caller's account.

" + "outputConfig":{ + "shape":"ConfiguredAudienceModelOutputConfig", + "documentation":"

Configure the Amazon S3 location and IAM Role for audiences created using this configured audience model. Each audience will have a unique location. The IAM Role must have s3:PutObject permission on the destination Amazon S3 location. If the destination is protected with Amazon S3 KMS-SSE, then the Role must also have the required KMS permissions.

" }, "description":{ "shape":"ResourceDescription", "documentation":"

The description of the configured audience model.

" }, + "sharedAudienceMetrics":{ + "shape":"MetricsList", + "documentation":"

Whether audience metrics are shared.

" + }, "minMatchingSeedSize":{ "shape":"MinMatchingSeedSize", "documentation":"

The minimum number of users from the seed audience that must match with users in the training data of the audience model. The default value is 500.

" }, - "name":{ - "shape":"NameString", - "documentation":"

The name of the configured audience model.

" - }, - "outputConfig":{ - "shape":"ConfiguredAudienceModelOutputConfig", - "documentation":"

Configure the Amazon S3 location and IAM Role for audiences created using this configured audience model. Each audience will have a unique location. The IAM Role must have s3:PutObject permission on the destination Amazon S3 location. If the destination is protected with Amazon S3 KMS-SSE, then the Role must also have the required KMS permissions.

" - }, - "sharedAudienceMetrics":{ - "shape":"MetricsList", - "documentation":"

Whether audience metrics are shared.

" + "audienceSizeConfig":{ + "shape":"AudienceSizeConfig", + "documentation":"

Configure the list of output sizes of audiences that can be created using this configured audience model. A request to StartAudienceGenerationJob that uses this configured audience model must have an audienceSize selected from this list. You can use the ABSOLUTE AudienceSize to configure out audience sizes using the count of identifiers in the output. You can use the Percentage AudienceSize to configure sizes in the range 1-100 percent.

" }, "tags":{ "shape":"TagMap", "documentation":"

The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50.

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8.

  • Maximum value length - 256 Unicode characters in UTF-8.

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

" + }, + "childResourceTagOnCreatePolicy":{ + "shape":"TagOnCreatePolicy", + "documentation":"

Configure how the service tags audience generation jobs created using this configured audience model. If you specify NONE, the tags from the StartAudienceGenerationJob request determine the tags of the audience generation job. If you specify FROM_PARENT_RESOURCE, the audience generation job inherits the tags from the configured audience model, by default. Tags in the StartAudienceGenerationJob will override the default.

When the client is in a different account than the configured audience model, the tags from the client are never applied to a resource in the caller's account.

" } } }, @@ -985,10 +993,6 @@ "trainingData" ], "members":{ - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the training dataset.

" - }, "name":{ "shape":"NameString", "documentation":"

The name of the training dataset. This name must be unique in your account and region.

" @@ -997,13 +1001,17 @@ "shape":"IamRoleArn", "documentation":"

The ARN of the IAM role that Clean Rooms ML can assume to read the data referred to in the dataSource field of each dataset.

Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an AccessDeniedException error.

" }, + "trainingData":{ + "shape":"CreateTrainingDatasetRequestTrainingDataList", + "documentation":"

An array of information that lists the Dataset objects, which specifies the dataset type and details on its location and schema. You must provide a role that has read access to these tables.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50.

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8.

  • Maximum value length - 256 Unicode characters in UTF-8.

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

" }, - "trainingData":{ - "shape":"CreateTrainingDatasetRequestTrainingDataList", - "documentation":"

An array of information that lists the Dataset objects, which specifies the dataset type and details on its location and schema. You must provide a role that has read access to these tables.

" + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the training dataset.

" } } }, @@ -1037,17 +1045,17 @@ "Dataset":{ "type":"structure", "required":[ - "inputConfig", - "type" + "type", + "inputConfig" ], "members":{ - "inputConfig":{ - "shape":"DatasetInputConfig", - "documentation":"

A DatasetInputConfig object that defines the data source and schema mapping.

" - }, "type":{ "shape":"DatasetType", "documentation":"

What type of information is found in the dataset.

" + }, + "inputConfig":{ + "shape":"DatasetInputConfig", + "documentation":"

A DatasetInputConfig object that defines the data source and schema mapping.

" } }, "documentation":"

Defines where the training dataset is located, what type of data it contains, and how to access the data.

" @@ -1055,17 +1063,17 @@ "DatasetInputConfig":{ "type":"structure", "required":[ - "dataSource", - "schema" + "schema", + "dataSource" ], "members":{ - "dataSource":{ - "shape":"DataSource", - "documentation":"

A DataSource object that specifies the Glue data source for the training data.

" - }, "schema":{ "shape":"DatasetInputConfigSchemaList", "documentation":"

The schema information for the training data.

" + }, + "dataSource":{ + "shape":"DataSource", + "documentation":"

A DataSource object that specifies the Glue data source for the training data.

" } }, "documentation":"

Defines the Glue data source and schema mapping information.

" @@ -1163,69 +1171,73 @@ "GetAudienceGenerationJobResponse":{ "type":"structure", "required":[ - "audienceGenerationJobArn", - "configuredAudienceModelArn", "createTime", + "updateTime", + "audienceGenerationJobArn", "name", "status", - "updateTime" + "configuredAudienceModelArn" ], "members":{ + "createTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The time at which the audience generation job was created.

" + }, + "updateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The most recent time at which the audience generation job was updated.

" + }, "audienceGenerationJobArn":{ "shape":"AudienceGenerationJobArn", "documentation":"

The Amazon Resource Name (ARN) of the audience generation job.

" }, - "collaborationId":{ - "shape":"UUID", - "documentation":"

The identifier of the collaboration that this audience generation job is associated with.

" + "name":{ + "shape":"NameString", + "documentation":"

The name of the audience generation job.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the audience generation job.

" + }, + "status":{ + "shape":"AudienceGenerationJobStatus", + "documentation":"

The status of the audience generation job.

" + }, + "statusDetails":{ + "shape":"StatusDetails", + "documentation":"

Details about the status of the audience generation job.

" }, "configuredAudienceModelArn":{ "shape":"ConfiguredAudienceModelArn", "documentation":"

The Amazon Resource Name (ARN) of the configured audience model used for this audience generation job.

" }, - "createTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The time at which the audience generation job was created.

" - }, - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the audience generation job.

" + "seedAudience":{ + "shape":"AudienceGenerationJobDataSource", + "documentation":"

The seed audience that was used for this audience generation job. This field will be null if the account calling the API is the account that started this audience generation job.

" }, "includeSeedInOutput":{ "shape":"Boolean", "documentation":"

Configure whether the seed users are included in the output audience. By default, Clean Rooms ML removes seed users from the output audience. If you specify TRUE, the seed users will appear first in the output. Clean Rooms ML does not explicitly reveal whether a user was in the seed, but the recipient of the audience will know that the first minimumSeedSize count of users are from the seed.

" }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

The identifier of the collaboration that this audience generation job is associated with.

" + }, "metrics":{ "shape":"AudienceQualityMetrics", "documentation":"

The relevance scores for different audience sizes and the recall score of the generated audience.

" }, - "name":{ - "shape":"NameString", - "documentation":"

The name of the audience generation job.

" - }, - "seedAudience":{ - "shape":"AudienceGenerationJobDataSource", - "documentation":"

The seed audience that was used for this audience generation job. This field will be null if the account calling the API is the account that started this audience generation job.

" - }, "startedBy":{ "shape":"AccountId", "documentation":"

The AWS account that started this audience generation job.

" }, - "status":{ - "shape":"AudienceGenerationJobStatus", - "documentation":"

The status of the audience generation job.

" - }, - "statusDetails":{ - "shape":"StatusDetails", - "documentation":"

Details about the status of the audience generation job.

" - }, "tags":{ "shape":"TagMap", "documentation":"

The tags that are associated to this audience generation job.

" }, - "updateTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The most recent time at which the audience generation job was updated.

" + "protectedQueryIdentifier":{ + "shape":"String", + "documentation":"

The unique identifier of the protected query for this audience generation job.

" } } }, @@ -1244,34 +1256,42 @@ "GetAudienceModelResponse":{ "type":"structure", "required":[ - "audienceModelArn", "createTime", + "updateTime", + "audienceModelArn", "name", - "status", "trainingDatasetArn", - "updateTime" + "status" ], "members":{ - "audienceModelArn":{ - "shape":"AudienceModelArn", - "documentation":"

The Amazon Resource Name (ARN) of the audience model.

" - }, "createTime":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The time at which the audience model was created.

" }, - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the audience model.

" + "updateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The most recent time at which the audience model was updated.

" }, - "kmsKeyArn":{ - "shape":"KmsKeyArn", - "documentation":"

The KMS key ARN used for the audience model.

" + "trainingDataStartTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The start date specified for the training window.

" + }, + "trainingDataEndTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The end date specified for the training window.

" + }, + "audienceModelArn":{ + "shape":"AudienceModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the audience model.

" }, "name":{ "shape":"NameString", "documentation":"

The name of the audience model.

" }, + "trainingDatasetArn":{ + "shape":"TrainingDatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the training dataset that was used for this audience model.

" + }, "status":{ "shape":"AudienceModelStatus", "documentation":"

The status of the audience model.

" @@ -1280,25 +1300,17 @@ "shape":"StatusDetails", "documentation":"

Details about the status of the audience model.

" }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The KMS key ARN used for the audience model.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The tags that are assigned to the audience model.

" }, - "trainingDataEndTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The end date specified for the training window.

" - }, - "trainingDataStartTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The start date specified for the training window.

" - }, - "trainingDatasetArn":{ - "shape":"TrainingDatasetArn", - "documentation":"

The Amazon Resource Name (ARN) of the training dataset that was used for this audience model.

" - }, - "updateTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The most recent time at which the audience model was updated.

" + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the audience model.

" } } }, @@ -1351,67 +1363,67 @@ "GetConfiguredAudienceModelResponse":{ "type":"structure", "required":[ - "audienceModelArn", - "configuredAudienceModelArn", "createTime", + "updateTime", + "configuredAudienceModelArn", "name", + "audienceModelArn", "outputConfig", - "sharedAudienceMetrics", "status", - "updateTime" + "sharedAudienceMetrics" ], "members":{ - "audienceModelArn":{ - "shape":"AudienceModelArn", - "documentation":"

The Amazon Resource Name (ARN) of the audience model used for this configured audience model.

" - }, - "audienceSizeConfig":{ - "shape":"AudienceSizeConfig", - "documentation":"

The list of output sizes of audiences that can be created using this configured audience model. A request to StartAudienceGenerationJob that uses this configured audience model must have an audienceSize selected from this list. You can use the ABSOLUTE AudienceSize to configure out audience sizes using the count of identifiers in the output. You can use the Percentage AudienceSize to configure sizes in the range 1-100 percent.

" - }, - "childResourceTagOnCreatePolicy":{ - "shape":"TagOnCreatePolicy", - "documentation":"

Provides the childResourceTagOnCreatePolicy that was used for this configured audience model.

" - }, - "configuredAudienceModelArn":{ - "shape":"ConfiguredAudienceModelArn", - "documentation":"

The Amazon Resource Name (ARN) of the configured audience model.

" - }, "createTime":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The time at which the configured audience model was created.

" }, - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the configured audience model.

" + "updateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The most recent time at which the configured audience model was updated.

" }, - "minMatchingSeedSize":{ - "shape":"MinMatchingSeedSize", - "documentation":"

The minimum number of users from the seed audience that must match with users in the training data of the audience model.

" + "configuredAudienceModelArn":{ + "shape":"ConfiguredAudienceModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the configured audience model.

" }, "name":{ "shape":"NameString", "documentation":"

The name of the configured audience model.

" }, + "audienceModelArn":{ + "shape":"AudienceModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the audience model used for this configured audience model.

" + }, "outputConfig":{ "shape":"ConfiguredAudienceModelOutputConfig", "documentation":"

The output configuration of the configured audience model

" }, - "sharedAudienceMetrics":{ - "shape":"MetricsList", - "documentation":"

Whether audience metrics are shared.

" + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the configured audience model.

" }, "status":{ "shape":"ConfiguredAudienceModelStatus", "documentation":"

The status of the configured audience model.

" }, + "sharedAudienceMetrics":{ + "shape":"MetricsList", + "documentation":"

Whether audience metrics are shared.

" + }, + "minMatchingSeedSize":{ + "shape":"MinMatchingSeedSize", + "documentation":"

The minimum number of users from the seed audience that must match with users in the training data of the audience model.

" + }, + "audienceSizeConfig":{ + "shape":"AudienceSizeConfig", + "documentation":"

The list of output sizes of audiences that can be created using this configured audience model. A request to StartAudienceGenerationJob that uses this configured audience model must have an audienceSize selected from this list. You can use the ABSOLUTE AudienceSize to configure out audience sizes using the count of identifiers in the output. You can use the Percentage AudienceSize to configure sizes in the range 1-100 percent.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The tags that are associated to this configured audience model.

" }, - "updateTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The most recent time at which the configured audience model was updated.

" + "childResourceTagOnCreatePolicy":{ + "shape":"TagOnCreatePolicy", + "documentation":"

Provides the childResourceTagOnCreatePolicy that was used for this configured audience model.

" } } }, @@ -1431,70 +1443,70 @@ "type":"structure", "required":[ "createTime", + "updateTime", + "trainingDatasetArn", "name", - "roleArn", - "status", "trainingData", - "trainingDatasetArn", - "updateTime" + "status", + "roleArn" ], "members":{ "createTime":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The time at which the training dataset was created.

" }, - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the training dataset.

" + "updateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The most recent time at which the training dataset was updated.

" + }, + "trainingDatasetArn":{ + "shape":"TrainingDatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the training dataset.

" }, "name":{ "shape":"NameString", "documentation":"

The name of the training dataset.

" }, - "roleArn":{ - "shape":"IamRoleArn", - "documentation":"

The IAM role used to read the training data.

" + "trainingData":{ + "shape":"DatasetList", + "documentation":"

Metadata about the requested training data.

" }, "status":{ "shape":"TrainingDatasetStatus", "documentation":"

The status of the training dataset.

" }, + "roleArn":{ + "shape":"IamRoleArn", + "documentation":"

The IAM role used to read the training data.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The tags that are assigned to this training dataset.

" }, - "trainingData":{ - "shape":"DatasetList", - "documentation":"

Metadata about the requested training data.

" - }, - "trainingDatasetArn":{ - "shape":"TrainingDatasetArn", - "documentation":"

The Amazon Resource Name (ARN) of the training dataset.

" - }, - "updateTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The most recent time at which the training dataset was updated.

" + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the training dataset.

" } } }, "GlueDataSource":{ "type":"structure", "required":[ - "databaseName", - "tableName" + "tableName", + "databaseName" ], "members":{ - "catalogId":{ - "shape":"AccountId", - "documentation":"

The Glue catalog that contains the training data.

" + "tableName":{ + "shape":"GlueTableName", + "documentation":"

The Glue table that contains the training data.

" }, "databaseName":{ "shape":"GlueDatabaseName", "documentation":"

The Glue database that contains the training data.

" }, - "tableName":{ - "shape":"GlueTableName", - "documentation":"

The Glue table that contains the training data.

" + "catalogId":{ + "shape":"AccountId", + "documentation":"

The Glue catalog that contains the training data.

" } }, "documentation":"

Defines the Glue data source that contains the training data.

" @@ -1503,40 +1515,40 @@ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z0-9_](([a-zA-Z0-9_]+-)*([a-zA-Z0-9_]+))?$" + "pattern":"[a-zA-Z0-9_](([a-zA-Z0-9_]+-)*([a-zA-Z0-9_]+))?" }, "GlueTableName":{ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z0-9_](([a-zA-Z0-9_ ]+-)*([a-zA-Z0-9_ ]+))?$" + "pattern":"[a-zA-Z0-9_](([a-zA-Z0-9_ ]+-)*([a-zA-Z0-9_ ]+))?" }, "Hash":{ "type":"string", "max":128, "min":64, - "pattern":"^[0-9a-f]+$" + "pattern":"[0-9a-f]+" }, "IamRoleArn":{ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[-a-z]*:iam::[0-9]{12}:role/.+$" + "pattern":"arn:aws[-a-z]*:iam::[0-9]{12}:role/.+" }, "KmsKeyArn":{ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[-a-z]*:kms:[-a-z0-9]+:[0-9]{12}:key/.+$" + "pattern":"arn:aws[-a-z]*:kms:[-a-z0-9]+:[0-9]{12}:key/.+" }, "ListAudienceExportJobsRequest":{ "type":"structure", "members":{ - "audienceGenerationJobArn":{ - "shape":"AudienceGenerationJobArn", - "documentation":"

The Amazon Resource Name (ARN) of the audience generation job that you are interested in.

", + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token value retrieved from a previous call to access the next page of results.

", "location":"querystring", - "locationName":"audienceGenerationJobArn" + "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", @@ -1544,11 +1556,11 @@ "location":"querystring", "locationName":"maxResults" }, - "nextToken":{ - "shape":"NextToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "audienceGenerationJobArn":{ + "shape":"AudienceGenerationJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the audience generation job that you are interested in.

", "location":"querystring", - "locationName":"nextToken" + "locationName":"audienceGenerationJobArn" } } }, @@ -1556,30 +1568,24 @@ "type":"structure", "required":["audienceExportJobs"], "members":{ - "audienceExportJobs":{ - "shape":"AudienceExportJobList", - "documentation":"

The audience export jobs that match the request.

" - }, "nextToken":{ "shape":"NextToken", "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + }, + "audienceExportJobs":{ + "shape":"AudienceExportJobList", + "documentation":"

The audience export jobs that match the request.

" } } }, "ListAudienceGenerationJobsRequest":{ "type":"structure", "members":{ - "collaborationId":{ - "shape":"UUID", - "documentation":"

The identifier of the collaboration that contains the audience generation jobs that you are interested in.

", - "location":"querystring", - "locationName":"collaborationId" - }, - "configuredAudienceModelArn":{ - "shape":"ConfiguredAudienceModelArn", - "documentation":"

The Amazon Resource Name (ARN) of the configured audience model that was used for the audience generation jobs that you are interested in.

", + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token value retrieved from a previous call to access the next page of results.

", "location":"querystring", - "locationName":"configuredAudienceModelArn" + "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", @@ -1587,11 +1593,17 @@ "location":"querystring", "locationName":"maxResults" }, - "nextToken":{ - "shape":"NextToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "configuredAudienceModelArn":{ + "shape":"ConfiguredAudienceModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the configured audience model that was used for the audience generation jobs that you are interested in.

", "location":"querystring", - "locationName":"nextToken" + "locationName":"configuredAudienceModelArn" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

The identifier of the collaboration that contains the audience generation jobs that you are interested in.

", + "location":"querystring", + "locationName":"collaborationId" } } }, @@ -1599,30 +1611,30 @@ "type":"structure", "required":["audienceGenerationJobs"], "members":{ - "audienceGenerationJobs":{ - "shape":"AudienceGenerationJobList", - "documentation":"

The audience generation jobs that match the request.

" - }, "nextToken":{ "shape":"NextToken", "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + }, + "audienceGenerationJobs":{ + "shape":"AudienceGenerationJobList", + "documentation":"

The audience generation jobs that match the request.

" } } }, "ListAudienceModelsRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call.

", - "location":"querystring", - "locationName":"maxResults" - }, "nextToken":{ "shape":"NextToken", "documentation":"

The token value retrieved from a previous call to access the next page of results.

", "location":"querystring", "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum size of the results that is returned per call.

", + "location":"querystring", + "locationName":"maxResults" } } }, @@ -1630,30 +1642,30 @@ "type":"structure", "required":["audienceModels"], "members":{ - "audienceModels":{ - "shape":"AudienceModelList", - "documentation":"

The audience models that match the request.

" - }, "nextToken":{ "shape":"NextToken", "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + }, + "audienceModels":{ + "shape":"AudienceModelList", + "documentation":"

The audience models that match the request.

" } } }, "ListConfiguredAudienceModelsRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call.

", - "location":"querystring", - "locationName":"maxResults" - }, "nextToken":{ "shape":"NextToken", "documentation":"

The token value retrieved from a previous call to access the next page of results.

", "location":"querystring", "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum size of the results that is returned per call.

", + "location":"querystring", + "locationName":"maxResults" } } }, @@ -1661,13 +1673,13 @@ "type":"structure", "required":["configuredAudienceModels"], "members":{ - "configuredAudienceModels":{ - "shape":"ConfiguredAudienceModelList", - "documentation":"

The configured audience models.

" - }, "nextToken":{ "shape":"NextToken", "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + }, + "configuredAudienceModels":{ + "shape":"ConfiguredAudienceModelList", + "documentation":"

The configured audience models.

" } } }, @@ -1696,17 +1708,17 @@ "ListTrainingDatasetsRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call.

", - "location":"querystring", - "locationName":"maxResults" - }, "nextToken":{ "shape":"NextToken", "documentation":"

The token value retrieved from a previous call to access the next page of results.

", "location":"querystring", "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum size of the results that is returned per call.

", + "location":"querystring", + "locationName":"maxResults" } } }, @@ -1746,13 +1758,29 @@ "type":"string", "max":63, "min":1, - "pattern":"^(?!\\s*$)[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t]*$" + "pattern":"(?!\\s*$)[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t]*" }, "NextToken":{ "type":"string", "max":10240, "min":1 }, + "ParameterKey":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[0-9a-zA-Z_]+" + }, + "ParameterMap":{ + "type":"map", + "key":{"shape":"ParameterKey"}, + "value":{"shape":"ParameterValue"} + }, + "ParameterValue":{ + "type":"string", + "max":250, + "min":0 + }, "PolicyExistenceCondition":{ "type":"string", "enum":[ @@ -1760,6 +1788,30 @@ "POLICY_MUST_NOT_EXIST" ] }, + "ProtectedQuerySQLParameters":{ + "type":"structure", + "members":{ + "queryString":{ + "shape":"ProtectedQuerySQLParametersQueryStringString", + "documentation":"

The query string to be submitted.

" + }, + "analysisTemplateArn":{ + "shape":"AnalysisTemplateArn", + "documentation":"

The Amazon Resource Name (ARN) associated with the analysis template within a collaboration.

" + }, + "parameters":{ + "shape":"ParameterMap", + "documentation":"

The protected query SQL parameters.

" + } + }, + "documentation":"

The parameters for the SQL type Protected Query.

", + "sensitive":true + }, + "ProtectedQuerySQLParametersQueryStringString":{ + "type":"string", + "max":90000, + "min":0 + }, "PutConfiguredAudienceModelPolicyRequest":{ "type":"structure", "required":[ @@ -1777,13 +1829,13 @@ "shape":"ResourcePolicy", "documentation":"

The IAM resource policy.

" }, - "policyExistenceCondition":{ - "shape":"PolicyExistenceCondition", - "documentation":"

Use this to prevent unexpected concurrent modification of the policy.

" - }, "previousPolicyHash":{ "shape":"Hash", "documentation":"

A cryptographic hash of the contents of the policy used to prevent unexpected concurrent modification of the policy.

" + }, + "policyExistenceCondition":{ + "shape":"PolicyExistenceCondition", + "documentation":"

Use this to prevent unexpected concurrent modification of the policy.

" } } }, @@ -1824,7 +1876,7 @@ "type":"string", "max":255, "min":0, - "pattern":"^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t\\r\\n]*$" + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t\\r\\n]*" }, "ResourceNotFoundException":{ "type":"structure", @@ -1859,7 +1911,7 @@ "type":"string", "max":1285, "min":1, - "pattern":"^s3://.+$" + "pattern":"s3://.+" }, "ServiceQuotaExceededException":{ "type":"structure", @@ -1884,11 +1936,15 @@ "StartAudienceExportJobRequest":{ "type":"structure", "required":[ + "name", "audienceGenerationJobArn", - "audienceSize", - "name" + "audienceSize" ], "members":{ + "name":{ + "shape":"NameString", + "documentation":"

The name of the audience export job.

" + }, "audienceGenerationJobArn":{ "shape":"AudienceGenerationJobArn", "documentation":"

The Amazon Resource Name (ARN) of the audience generation job that you want to export.

" @@ -1897,44 +1953,40 @@ "description":{ "shape":"ResourceDescription", "documentation":"

The description of the audience export job.

" - }, - "name":{ - "shape":"NameString", - "documentation":"

The name of the audience export job.

" } } }, "StartAudienceGenerationJobRequest":{ "type":"structure", "required":[ - "configuredAudienceModelArn", "name", + "configuredAudienceModelArn", "seedAudience" ], "members":{ - "collaborationId":{ - "shape":"UUID", - "documentation":"

The identifier of the collaboration that contains the audience generation job.

" + "name":{ + "shape":"NameString", + "documentation":"

The name of the audience generation job.

" }, "configuredAudienceModelArn":{ "shape":"ConfiguredAudienceModelArn", "documentation":"

The Amazon Resource Name (ARN) of the configured audience model that is used for this audience generation job.

" }, - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the audience generation job.

" + "seedAudience":{ + "shape":"AudienceGenerationJobDataSource", + "documentation":"

The seed audience that is used to generate the audience.

" }, "includeSeedInOutput":{ "shape":"Boolean", "documentation":"

Whether the seed audience is included in the audience generation output.

" }, - "name":{ - "shape":"NameString", - "documentation":"

The name of the audience generation job.

" + "collaborationId":{ + "shape":"UUID", + "documentation":"

The identifier of the collaboration that contains the audience generation job.

" }, - "seedAudience":{ - "shape":"AudienceGenerationJobDataSource", - "documentation":"

The seed audience that is used to generate the audience.

" + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the audience generation job.

" }, "tags":{ "shape":"TagMap", @@ -1955,13 +2007,13 @@ "StatusDetails":{ "type":"structure", "members":{ - "message":{ - "shape":"String", - "documentation":"

The error message that was returned. The message is intended for human consumption and can change at any time. Use the statusCode for programmatic error handling.

" - }, "statusCode":{ "shape":"String", "documentation":"

The status code that was returned. The status code is intended for programmatic error handling. Clean Rooms ML will not change the status code for existing error conditions.

" + }, + "message":{ + "shape":"String", + "documentation":"

The error message that was returned. The message is intended for human consumption and can change at any time. Use the statusCode for programmatic error handling.

" } }, "documentation":"

Details about the status of a resource.

" @@ -2029,13 +2081,13 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:(training-dataset|audience-model|configured-audience-model|audience-generation-job)/[-a-zA-Z0-9_/.]+$" + "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:(training-dataset|audience-model|configured-audience-model|audience-generation-job)/[-a-zA-Z0-9_/.]+" }, "TrainingDatasetArn":{ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:training-dataset/[-a-zA-Z0-9_/.]+$" + "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:training-dataset/[-a-zA-Z0-9_/.]+" }, "TrainingDatasetList":{ "type":"list", @@ -2049,19 +2101,23 @@ "type":"structure", "required":[ "createTime", - "name", - "status", + "updateTime", "trainingDatasetArn", - "updateTime" + "name", + "status" ], "members":{ "createTime":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The time at which the training dataset was created.

" }, - "description":{ - "shape":"ResourceDescription", - "documentation":"

The description of the training dataset.

" + "updateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The most recent time at which the training dataset was updated.

" + }, + "trainingDatasetArn":{ + "shape":"TrainingDatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the training dataset.

" }, "name":{ "shape":"NameString", @@ -2071,13 +2127,9 @@ "shape":"TrainingDatasetStatus", "documentation":"

The status of the training dataset.

" }, - "trainingDatasetArn":{ - "shape":"TrainingDatasetArn", - "documentation":"

The Amazon Resource Name (ARN) of the training dataset.

" - }, - "updateTime":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The most recent time at which the training dataset was updated.

" + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the training dataset.

" } }, "documentation":"

Provides information about the training dataset.

" @@ -2086,7 +2138,7 @@ "type":"string", "max":36, "min":36, - "pattern":"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" }, "UntagResourceRequest":{ "type":"structure", @@ -2118,35 +2170,35 @@ "type":"structure", "required":["configuredAudienceModelArn"], "members":{ - "audienceModelArn":{ - "shape":"AudienceModelArn", - "documentation":"

The Amazon Resource Name (ARN) of the new audience model that you want to use.

" - }, - "audienceSizeConfig":{ - "shape":"AudienceSizeConfig", - "documentation":"

The new audience size configuration.

" - }, "configuredAudienceModelArn":{ "shape":"ConfiguredAudienceModelArn", "documentation":"

The Amazon Resource Name (ARN) of the configured audience model that you want to update.

", "location":"uri", "locationName":"configuredAudienceModelArn" }, - "description":{ - "shape":"ResourceDescription", - "documentation":"

The new description of the configured audience model.

" - }, - "minMatchingSeedSize":{ - "shape":"MinMatchingSeedSize", - "documentation":"

The minimum number of users from the seed audience that must match with users in the training data of the audience model.

" - }, "outputConfig":{ "shape":"ConfiguredAudienceModelOutputConfig", "documentation":"

The new output configuration.

" }, + "audienceModelArn":{ + "shape":"AudienceModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the new audience model that you want to use.

" + }, "sharedAudienceMetrics":{ "shape":"MetricsList", "documentation":"

The new value for whether to share audience metrics.

" + }, + "minMatchingSeedSize":{ + "shape":"MinMatchingSeedSize", + "documentation":"

The minimum number of users from the seed audience that must match with users in the training data of the audience model.

" + }, + "audienceSizeConfig":{ + "shape":"AudienceSizeConfig", + "documentation":"

The new audience size configuration.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The new description of the configured audience model.

" } } }, diff --git a/botocore/data/cleanroomsml/2023-09-06/waiters-2.json b/botocore/data/cleanroomsml/2023-09-06/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/cleanroomsml/2023-09-06/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/cloudformation/2010-05-15/service-2.json b/botocore/data/cloudformation/2010-05-15/service-2.json index e5aac03c4d..4d2e5dc771 100644 --- a/botocore/data/cloudformation/2010-05-15/service-2.json +++ b/botocore/data/cloudformation/2010-05-15/service-2.json @@ -4,6 +4,7 @@ "apiVersion":"2010-05-15", "endpointPrefix":"cloudformation", "protocol":"query", + "protocols":["query"], "serviceFullName":"AWS CloudFormation", "serviceId":"CloudFormation", "signatureVersion":"v4", @@ -2359,6 +2360,10 @@ "ClientRequestToken":{ "shape":"ClientRequestToken", "documentation":"

A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry DeleteStack requests to ensure that CloudFormation successfully received them.

All events initiated by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" + }, + "DeletionMode":{ + "shape":"DeletionMode", + "documentation":"

Specifies the deletion mode for the stack. Possible values are:

  • STANDARD - Use the standard behavior. Specifying this value is the same as not specifying this parameter.

  • FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED state due to resource deletion failure.

" } }, "documentation":"

The input for DeleteStack action.

" @@ -2434,6 +2439,13 @@ "members":{ } }, + "DeletionMode":{ + "type":"string", + "enum":[ + "STANDARD", + "FORCE_DELETE_STACK" + ] + }, "DeletionTime":{"type":"timestamp"}, "DeploymentTargets":{ "type":"structure", @@ -4250,7 +4262,7 @@ "members":{ "Summaries":{ "shape":"StackInstanceResourceDriftsSummaries", - "documentation":"

A list of StackInstanceResourceDriftSummary structures that contain information about the specified stack instances.

" + "documentation":"

A list of StackInstanceResourceDriftsSummary structures that contain information about the specified stack instances.

" }, "NextToken":{ "shape":"NextToken", @@ -6118,6 +6130,10 @@ "shape":"RetainExceptOnCreate", "documentation":"

When set to true, newly created resources are deleted when the operation rolls back. This includes newly created resources marked with a deletion policy of Retain.

Default: false

" }, + "DeletionMode":{ + "shape":"DeletionMode", + "documentation":"

Specifies the deletion mode for the stack. Possible values are:

  • STANDARD - Use the standard behavior. Specifying this value is the same as not specifying this parameter.

  • FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED state due to resource deletion failure.

" + }, "DetailedStatus":{ "shape":"DetailedStatus", "documentation":"

The detailed status of the resource or stack. If CONFIGURATION_COMPLETE is present, the resource or resource configuration phase has completed and the stabilization of the resources is in progress. The stack sets CONFIGURATION_COMPLETE when all of the resources in the stack have reached that event. For more information, see CloudFormation stack deployment in the CloudFormation User Guide.

" @@ -7097,7 +7113,7 @@ }, "ConcurrencyMode":{ "shape":"ConcurrencyMode", - "documentation":"

Specifies how the concurrency level behaves during the operation execution.

  • STRICT_FAILURE_TOLERANCE: This option dynamically lowers the concurrency level to ensure the number of failed accounts never exceeds the value of FailureToleranceCount +1. The initial actual concurrency is set to the lower of either the value of the MaxConcurrentCount, or the value of MaxConcurrentCount +1. The actual concurrency is then reduced proportionally by the number of failures. This is the default behavior.

    If failure tolerance or Maximum concurrent accounts are set to percentages, the behavior is similar.

  • SOFT_FAILURE_TOLERANCE: This option decouples FailureToleranceCount from the actual concurrency. This allows stack set operations to run at the concurrency level set by the MaxConcurrentCount value, or MaxConcurrentPercentage, regardless of the number of failures.

" + "documentation":"

Specifies how the concurrency level behaves during the operation execution.

  • STRICT_FAILURE_TOLERANCE: This option dynamically lowers the concurrency level to ensure the number of failed accounts never exceeds the value of FailureToleranceCount +1. The initial actual concurrency is set to the lower of either the value of the MaxConcurrentCount, or the value of FailureToleranceCount +1. The actual concurrency is then reduced proportionally by the number of failures. This is the default behavior.

    If failure tolerance or Maximum concurrent accounts are set to percentages, the behavior is similar.

  • SOFT_FAILURE_TOLERANCE: This option decouples FailureToleranceCount from the actual concurrency. This allows stack set operations to run at the concurrency level set by the MaxConcurrentCount value, or MaxConcurrentPercentage, regardless of the number of failures.

" } }, "documentation":"

The user-specified preferences for how CloudFormation performs a stack set operation.

For more information about maximum concurrent accounts and failure tolerance, see Stack set operation options.

" diff --git a/botocore/data/cloudfront/2020-05-31/service-2.json b/botocore/data/cloudfront/2020-05-31/service-2.json index 7192db9029..475f174485 100644 --- a/botocore/data/cloudfront/2020-05-31/service-2.json +++ b/botocore/data/cloudfront/2020-05-31/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"cloudfront", "globalEndpoint":"cloudfront.amazonaws.com", "protocol":"rest-xml", + "protocols":["rest-xml"], "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", "serviceId":"CloudFront", "signatureVersion":"v4", - "uid":"cloudfront-2020-05-31" + "uid":"cloudfront-2020-05-31", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateAlias":{ @@ -1662,7 +1664,7 @@ {"shape":"InvalidTagging"}, {"shape":"NoSuchResource"} ], - "documentation":"

List tags for a CloudFront resource.

" + "documentation":"

List tags for a CloudFront resource. For more information, see Tagging a distribution in the Amazon CloudFront Developer Guide.

" }, "PublishFunction":{ "name":"PublishFunction2020_05_31", @@ -1695,7 +1697,7 @@ {"shape":"InvalidTagging"}, {"shape":"NoSuchResource"} ], - "documentation":"

Add tags to a CloudFront resource.

" + "documentation":"

Add tags to a CloudFront resource. For more information, see Tagging a distribution in the Amazon CloudFront Developer Guide.

" }, "TestFunction":{ "name":"TestFunction2020_05_31", @@ -1732,7 +1734,7 @@ {"shape":"InvalidTagging"}, {"shape":"NoSuchResource"} ], - "documentation":"

Remove tags from a CloudFront resource.

" + "documentation":"

Remove tags from a CloudFront resource. For more information, see Tagging a distribution in the Amazon CloudFront Developer Guide.

" }, "UpdateCachePolicy":{ "name":"UpdateCachePolicy2020_05_31", @@ -2463,7 +2465,7 @@ "deprecated":true } }, - "documentation":"

A complex type that describes how CloudFront processes requests.

You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to serve objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin is never used.

For the current quota (formerly known as limit) on the number of cache behaviors that you can add to a distribution, see Quotas in the Amazon CloudFront Developer Guide.

If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. For more information, see CacheBehaviors. Don't include an empty CacheBehavior element because this is invalid.

To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element.

To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.

For more information about cache behaviors, see Cache Behavior Settings in the Amazon CloudFront Developer Guide.

" + "documentation":"

A complex type that describes how CloudFront processes requests.

You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to serve objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin is never used.

For the current quota (formerly known as limit) on the number of cache behaviors that you can add to a distribution, see Quotas in the Amazon CloudFront Developer Guide.

If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't specify an empty individual CacheBehavior element, because this is invalid. For more information, see CacheBehaviors.

To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element.

To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.

For more information about cache behaviors, see Cache Behavior Settings in the Amazon CloudFront Developer Guide.

" }, "CacheBehaviorList":{ "type":"list", @@ -2697,7 +2699,7 @@ }, "Items":{ "shape":"MethodsList", - "documentation":"

A complex type that contains the HTTP methods that you want CloudFront to cache responses to.

" + "documentation":"

A complex type that contains the HTTP methods that you want CloudFront to cache responses to. Valid values for CachedMethods include GET, HEAD, and OPTIONS, depending on which caching option you choose. For more information, see the preceding section.

" } }, "documentation":"

A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices:

  • CloudFront caches responses to GET and HEAD requests.

  • CloudFront caches responses to GET, HEAD, and OPTIONS requests.

If you pick the second choice for your Amazon S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers, and Origin headers for the responses to be cached correctly.

" diff --git a/botocore/data/cloudhsmv2/2017-04-28/endpoint-rule-set-1.json b/botocore/data/cloudhsmv2/2017-04-28/endpoint-rule-set-1.json index e866a7977e..84e75cdaf0 100644 --- a/botocore/data/cloudhsmv2/2017-04-28/endpoint-rule-set-1.json +++ b/botocore/data/cloudhsmv2/2017-04-28/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -351,9 +349,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/cloudhsmv2/2017-04-28/service-2.json b/botocore/data/cloudhsmv2/2017-04-28/service-2.json index 02c5960077..18bbecd93b 100644 --- a/botocore/data/cloudhsmv2/2017-04-28/service-2.json +++ b/botocore/data/cloudhsmv2/2017-04-28/service-2.json @@ -5,13 +5,15 @@ "endpointPrefix":"cloudhsmv2", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"CloudHSM V2", "serviceFullName":"AWS CloudHSM V2", "serviceId":"CloudHSM V2", "signatureVersion":"v4", "signingName":"cloudhsm", "targetPrefix":"BaldrApiService", - "uid":"cloudhsmv2-2017-04-28" + "uid":"cloudhsmv2-2017-04-28", + "auth":["aws.auth#sigv4"] }, "operations":{ "CopyBackupToRegion":{ @@ -30,7 +32,7 @@ {"shape":"CloudHsmServiceException"}, {"shape":"CloudHsmTagException"} ], - "documentation":"

Copy an AWS CloudHSM cluster backup to a different region.

" + "documentation":"

Copy an CloudHSM cluster backup to a different region.

Cross-account use: No. You cannot perform this operation on an CloudHSM backup in a different Amazon Web Services account.

" }, "CreateCluster":{ "name":"CreateCluster", @@ -48,7 +50,7 @@ {"shape":"CloudHsmServiceException"}, {"shape":"CloudHsmTagException"} ], - "documentation":"

Creates a new AWS CloudHSM cluster.

" + "documentation":"

Creates a new CloudHSM cluster.

Cross-account use: Yes. To perform this operation with an CloudHSM backup in a different AWS account, specify the full backup ARN in the value of the SourceBackupId parameter.

" }, "CreateHsm":{ "name":"CreateHsm", @@ -65,7 +67,7 @@ {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmAccessDeniedException"} ], - "documentation":"

Creates a new hardware security module (HSM) in the specified AWS CloudHSM cluster.

" + "documentation":"

Creates a new hardware security module (HSM) in the specified CloudHSM cluster.

Cross-account use: No. You cannot perform this operation on an CloudHSM cluster in a different Amazon Web Service account.

" }, "DeleteBackup":{ "name":"DeleteBackup", @@ -82,7 +84,7 @@ {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmServiceException"} ], - "documentation":"

Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the DeleteBackup request is made. For more information on restoring a backup, see RestoreBackup.

" + "documentation":"

Deletes a specified CloudHSM backup. A backup can be restored up to 7 days after the DeleteBackup request is made. For more information on restoring a backup, see RestoreBackup.

Cross-account use: No. You cannot perform this operation on an CloudHSM backup in a different Amazon Web Services account.

" }, "DeleteCluster":{ "name":"DeleteCluster", @@ -100,7 +102,7 @@ {"shape":"CloudHsmServiceException"}, {"shape":"CloudHsmTagException"} ], - "documentation":"

Deletes the specified AWS CloudHSM cluster. Before you can delete a cluster, you must delete all HSMs in the cluster. To see if the cluster contains any HSMs, use DescribeClusters. To delete an HSM, use DeleteHsm.

" + "documentation":"

Deletes the specified CloudHSM cluster. Before you can delete a cluster, you must delete all HSMs in the cluster. To see if the cluster contains any HSMs, use DescribeClusters. To delete an HSM, use DeleteHsm.

Cross-account use: No. You cannot perform this operation on an CloudHSM cluster in a different Amazon Web Services account.

" }, "DeleteHsm":{ "name":"DeleteHsm", @@ -117,7 +119,24 @@ {"shape":"CloudHsmInvalidRequestException"}, {"shape":"CloudHsmAccessDeniedException"} ], - "documentation":"

Deletes the specified HSM. To specify an HSM, you can use its identifier (ID), the IP address of the HSM's elastic network interface (ENI), or the ID of the HSM's ENI. You need to specify only one of these values. To find these values, use DescribeClusters.

" + "documentation":"

Deletes the specified HSM. To specify an HSM, you can use its identifier (ID), the IP address of the HSM's elastic network interface (ENI), or the ID of the HSM's ENI. You need to specify only one of these values. To find these values, use DescribeClusters.

Cross-account use: No. You cannot perform this operation on an CloudHSM hsm in a different Amazon Web Services account.

" + }, + "DeleteResourcePolicy":{ + "name":"DeleteResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteResourcePolicyRequest"}, + "output":{"shape":"DeleteResourcePolicyResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Deletes an CloudHSM resource policy. Deleting a resource policy will result in the resource being unshared and removed from any RAM resource shares. Deleting the resource policy attached to a backup will not impact any clusters created from that backup.

Cross-account use: No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web Services account.

" }, "DescribeBackups":{ "name":"DescribeBackups", @@ -135,7 +154,7 @@ {"shape":"CloudHsmServiceException"}, {"shape":"CloudHsmTagException"} ], - "documentation":"

Gets information about backups of AWS CloudHSM clusters.

This is a paginated operation, which means that each response might contain only a subset of all the backups. When the response contains only a subset of backups, it includes a NextToken value. Use this value in a subsequent DescribeBackups request to get more backups. When you receive a response with no NextToken (or an empty or null value), that means there are no more backups to get.

" + "documentation":"

Gets information about backups of CloudHSM clusters. Lists either the backups you own or the backups shared with you when the Shared parameter is true.

This is a paginated operation, which means that each response might contain only a subset of all the backups. When the response contains only a subset of backups, it includes a NextToken value. Use this value in a subsequent DescribeBackups request to get more backups. When you receive a response with no NextToken (or an empty or null value), that means there are no more backups to get.

Cross-account use: Yes. Customers can describe backups in other Amazon Web Services accounts that are shared with them.

" }, "DescribeClusters":{ "name":"DescribeClusters", @@ -152,7 +171,24 @@ {"shape":"CloudHsmServiceException"}, {"shape":"CloudHsmTagException"} ], - "documentation":"

Gets information about AWS CloudHSM clusters.

This is a paginated operation, which means that each response might contain only a subset of all the clusters. When the response contains only a subset of clusters, it includes a NextToken value. Use this value in a subsequent DescribeClusters request to get more clusters. When you receive a response with no NextToken (or an empty or null value), that means there are no more clusters to get.

" + "documentation":"

Gets information about CloudHSM clusters.

This is a paginated operation, which means that each response might contain only a subset of all the clusters. When the response contains only a subset of clusters, it includes a NextToken value. Use this value in a subsequent DescribeClusters request to get more clusters. When you receive a response with no NextToken (or an empty or null value), that means there are no more clusters to get.

Cross-account use: No. You cannot perform this operation on CloudHSM clusters in a different Amazon Web Services account.

" + }, + "GetResourcePolicy":{ + "name":"GetResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetResourcePolicyRequest"}, + "output":{"shape":"GetResourcePolicyResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Retrieves the resource policy document attached to a given resource.

Cross-account use: No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web Services account.

" }, "InitializeCluster":{ "name":"InitializeCluster", @@ -169,7 +205,7 @@ {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmServiceException"} ], - "documentation":"

Claims an AWS CloudHSM cluster by submitting the cluster certificate issued by your issuing certificate authority (CA) and the CA's root certificate. Before you can claim a cluster, you must sign the cluster's certificate signing request (CSR) with your issuing CA. To get the cluster's CSR, use DescribeClusters.

" + "documentation":"

Claims an CloudHSM cluster by submitting the cluster certificate issued by your issuing certificate authority (CA) and the CA's root certificate. Before you can claim a cluster, you must sign the cluster's certificate signing request (CSR) with your issuing CA. To get the cluster's CSR, use DescribeClusters.

Cross-account use: No. You cannot perform this operation on an CloudHSM cluster in a different Amazon Web Services account.

" }, "ListTags":{ "name":"ListTags", @@ -187,7 +223,7 @@ {"shape":"CloudHsmServiceException"}, {"shape":"CloudHsmTagException"} ], - "documentation":"

Gets a list of tags for the specified AWS CloudHSM cluster.

This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken (or an empty or null value), that means there are no more tags to get.

" + "documentation":"

Gets a list of tags for the specified CloudHSM cluster.

This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken (or an empty or null value), that means there are no more tags to get.

Cross-account use: No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web Services account.

" }, "ModifyBackupAttributes":{ "name":"ModifyBackupAttributes", @@ -204,7 +240,7 @@ {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmServiceException"} ], - "documentation":"

Modifies attributes for AWS CloudHSM backup.

" + "documentation":"

Modifies attributes for CloudHSM backup.

Cross-account use: No. You cannot perform this operation on an CloudHSM backup in a different Amazon Web Services account.

" }, "ModifyCluster":{ "name":"ModifyCluster", @@ -221,7 +257,24 @@ {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmServiceException"} ], - "documentation":"

Modifies AWS CloudHSM cluster.

" + "documentation":"

Modifies CloudHSM cluster.

Cross-account use: No. You cannot perform this operation on an CloudHSM cluster in a different Amazon Web Services account.

" + }, + "PutResourcePolicy":{ + "name":"PutResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutResourcePolicyRequest"}, + "output":{"shape":"PutResourcePolicyResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Creates or updates an CloudHSM resource policy. A resource policy helps you to define the IAM entity (for example, an Amazon Web Services account) that can manage your CloudHSM resources. The following resources support CloudHSM resource policies:

  • Backup - The resource policy allows you to describe the backup and restore a cluster from the backup in another Amazon Web Services account.

In order to share a backup, it must be in a 'READY' state and you must own it.

While you can share a backup using the CloudHSM PutResourcePolicy operation, we recommend using Resource Access Manager (RAM) instead. Using RAM provides multiple benefits as it creates the policy for you, allows multiple resources to be shared at one time, and increases the discoverability of shared resources. If you use PutResourcePolicy and want consumers to be able to describe the backups you share with them, you must promote the backup to a standard RAM Resource Share using the RAM PromoteResourceShareCreatedFromPolicy API operation. For more information, see Working with shared backups in the CloudHSM User Guide

Cross-account use: No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web Services account.

" }, "RestoreBackup":{ "name":"RestoreBackup", @@ -238,7 +291,7 @@ {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmServiceException"} ], - "documentation":"

Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state. For mor information on deleting a backup, see DeleteBackup.

" + "documentation":"

Restores a specified CloudHSM backup that is in the PENDING_DELETION state. For more information on deleting a backup, see DeleteBackup.

Cross-account use: No. You cannot perform this operation on an CloudHSM backup in a different Amazon Web Services account.

" }, "TagResource":{ "name":"TagResource", @@ -256,7 +309,7 @@ {"shape":"CloudHsmServiceException"}, {"shape":"CloudHsmTagException"} ], - "documentation":"

Adds or overwrites one or more tags for the specified AWS CloudHSM cluster.

" + "documentation":"

Adds or overwrites one or more tags for the specified CloudHSM cluster.

Cross-account use: No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web Services account.

" }, "UntagResource":{ "name":"UntagResource", @@ -274,7 +327,7 @@ {"shape":"CloudHsmServiceException"}, {"shape":"CloudHsmTagException"} ], - "documentation":"

Removes the specified tag or tags from the specified AWS CloudHSM cluster.

" + "documentation":"

Removes the specified tag or tags from the specified CloudHSM cluster.

Cross-account use: No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web Services account.

" } }, "shapes":{ @@ -286,6 +339,10 @@ "shape":"BackupId", "documentation":"

The identifier (ID) of the backup.

" }, + "BackupArn":{ + "shape":"BackupArn", + "documentation":"

The Amazon Resource Name (ARN) of the backup.

" + }, "BackupState":{ "shape":"BackupState", "documentation":"

The state of the backup.

" @@ -325,9 +382,21 @@ "TagList":{ "shape":"TagList", "documentation":"

The list of tags for the backup.

" + }, + "HsmType":{ + "shape":"HsmType", + "documentation":"

The HSM type used to create the backup.

" + }, + "Mode":{ + "shape":"ClusterMode", + "documentation":"

The mode of the cluster that was backed up.

" } }, - "documentation":"

Contains information about a backup of an AWS CloudHSM cluster. All backup objects contain the BackupId, BackupState, ClusterId, and CreateTimestamp parameters. Backups that were copied into a destination region additionally contain the CopyTimestamp, SourceBackup, SourceCluster, and SourceRegion parameters. A backup that is pending deletion will include the DeleteTimestamp parameter.

" + "documentation":"

Contains information about a backup of an CloudHSM cluster. All backup objects contain the BackupId, BackupState, ClusterId, and CreateTimestamp parameters. Backups that were copied into a destination region additionally contain the CopyTimestamp, SourceBackup, SourceCluster, and SourceRegion parameters. A backup that is pending deletion will include the DeleteTimestamp parameter.

" + }, + "BackupArn":{ + "type":"string", + "pattern":"^(arn:aws(-(us-gov))?:cloudhsm:([a-z]{2}(-(gov|isob|iso))?-(east|west|north|south|central){1,2}-[0-9]{1}):[0-9]{12}:backup/)?backup-[2-7a-zA-Z]{11,16}" }, "BackupId":{ "type":"string", @@ -382,7 +451,7 @@ "Boolean":{"type":"boolean"}, "Cert":{ "type":"string", - "max":5000, + "max":20000, "pattern":"[a-zA-Z0-9+-/=\\s]*" }, "Certificates":{ @@ -398,7 +467,7 @@ }, "AwsHardwareCertificate":{ "shape":"Cert", - "documentation":"

The HSM hardware certificate issued (signed) by AWS CloudHSM.

" + "documentation":"

The HSM hardware certificate issued (signed) by CloudHSM.

" }, "ManufacturerHardwareCertificate":{ "shape":"Cert", @@ -419,12 +488,16 @@ "documentation":"

The request was rejected because the requester does not have permission to perform the requested operation.

", "exception":true }, + "CloudHsmArn":{ + "type":"string", + "pattern":"arn:aws(-(us-gov))?:cloudhsm:([a-z]{2}(-(gov|isob|iso))?-(east|west|north|south|central){1,2}-[0-9]{1}):[0-9]{12}:(backup/backup|cluster/cluster|hsm/hsm)-[2-7a-zA-Z]{11,16}" + }, "CloudHsmInternalFailureException":{ "type":"structure", "members":{ "Message":{"shape":"errorMessage"} }, - "documentation":"

The request was rejected because of an AWS CloudHSM internal failure. The request can be retried.

", + "documentation":"

The request was rejected because of an CloudHSM internal failure. The request can be retried.

", "exception":true, "fault":true }, @@ -522,14 +595,25 @@ "TagList":{ "shape":"TagList", "documentation":"

The list of tags for the cluster.

" + }, + "Mode":{ + "shape":"ClusterMode", + "documentation":"

The mode of the cluster.

" } }, - "documentation":"

Contains information about an AWS CloudHSM cluster.

" + "documentation":"

Contains information about an CloudHSM cluster.

" }, "ClusterId":{ "type":"string", "pattern":"cluster-[2-7a-zA-Z]{11,16}" }, + "ClusterMode":{ + "type":"string", + "enum":[ + "FIPS", + "NON_FIPS" + ] + }, "ClusterState":{ "type":"string", "enum":[ @@ -596,11 +680,11 @@ }, "HsmType":{ "shape":"HsmType", - "documentation":"

The type of HSM to use in the cluster. Currently the only allowed value is hsm1.medium.

" + "documentation":"

The type of HSM to use in the cluster. The allowed values are hsm1.medium and hsm2m.medium.

" }, "SourceBackupId":{ - "shape":"BackupId", - "documentation":"

The identifier (ID) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID, use DescribeBackups.

" + "shape":"BackupArn", + "documentation":"

The identifier (ID) or the Amazon Resource Name (ARN) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID or ARN, use DescribeBackups. If using a backup in another account, the full ARN must be supplied.

" }, "SubnetIds":{ "shape":"SubnetIds", @@ -609,6 +693,10 @@ "TagList":{ "shape":"TagList", "documentation":"

Tags to apply to the CloudHSM cluster during creation.

" + }, + "Mode":{ + "shape":"ClusterMode", + "documentation":"

The mode to use in the cluster. The allowed values are FIPS and NON_FIPS.

" } } }, @@ -720,6 +808,28 @@ } } }, + "DeleteResourcePolicyRequest":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"CloudHsmArn", + "documentation":"

Amazon Resource Name (ARN) of the resource from which the policy will be removed.

" + } + } + }, + "DeleteResourcePolicyResponse":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"CloudHsmArn", + "documentation":"

Amazon Resource Name (ARN) of the resource from which the policy was deleted.

" + }, + "Policy":{ + "shape":"ResourcePolicy", + "documentation":"

The policy previously attached to the resource.

" + } + } + }, "DescribeBackupsRequest":{ "type":"structure", "members":{ @@ -735,6 +845,10 @@ "shape":"Filters", "documentation":"

One or more filters to limit the items returned in the response.

Use the backupIds filter to return only the specified backups. Specify backups by their backup identifier (ID).

Use the sourceBackupIds filter to return only the backups created from a source backup. The sourceBackupID of a source backup is returned by the CopyBackupToRegion operation.

Use the clusterIds filter to return only the backups for the specified clusters. Specify clusters by their cluster identifier (ID).

Use the states filter to return only backups that match the specified state.

Use the neverExpires filter to return backups filtered by the value in the neverExpires parameter. True returns all backups exempt from the backup retention policy. False returns all backups with a backup retention policy defined at the cluster.

" }, + "Shared":{ + "shape":"Boolean", + "documentation":"

Describe backups that are shared with you.

By default when using this option, the command returns backups that have been shared using a standard Resource Access Manager resource share. In order for a backup that was shared using the PutResourcePolicy command to be returned, the share must be promoted to a standard resource share using the RAM PromoteResourceShareCreatedFromPolicy API operation. For more information about sharing backups, see Working with shared backups in the CloudHSM User Guide.

" + }, "SortAscending":{ "shape":"Boolean", "documentation":"

Designates whether or not to sort the return backups by ascending chronological order of generation.

" @@ -826,7 +940,26 @@ "Filters":{ "type":"map", "key":{"shape":"Field"}, - "value":{"shape":"Strings"} + "value":{"shape":"Strings"}, + "max":30 + }, + "GetResourcePolicyRequest":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"CloudHsmArn", + "documentation":"

Amazon Resource Name (ARN) of the resource to which a policy is attached.

" + } + } + }, + "GetResourcePolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"ResourcePolicy", + "documentation":"

The policy attached to a resource.

" + } + } }, "Hsm":{ "type":"structure", @@ -865,7 +998,7 @@ "documentation":"

A description of the HSM's state.

" } }, - "documentation":"

Contains information about a hardware security module (HSM) in an AWS CloudHSM cluster.

" + "documentation":"

Contains information about a hardware security module (HSM) in an CloudHSM cluster.

" }, "HsmId":{ "type":"string", @@ -883,7 +1016,8 @@ }, "HsmType":{ "type":"string", - "pattern":"(hsm1\\.medium)" + "max":32, + "pattern":"((p|)hsm[0-9][a-z.]*\\.[a-zA-Z]+)" }, "Hsms":{ "type":"list", @@ -1021,6 +1155,32 @@ "max":32, "min":7 }, + "PutResourcePolicyRequest":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"CloudHsmArn", + "documentation":"

Amazon Resource Name (ARN) of the resource to which you want to attach a policy.

" + }, + "Policy":{ + "shape":"ResourcePolicy", + "documentation":"

The policy you want to associate with a resource.

For an example policy, see Working with shared backups in the CloudHSM User Guide

" + } + } + }, + "PutResourcePolicyResponse":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"CloudHsmArn", + "documentation":"

Amazon Resource Name (ARN) of the resource to which a policy is attached.

" + }, + "Policy":{ + "shape":"ResourcePolicy", + "documentation":"

The policy attached to a resource.

" + } + } + }, "Region":{ "type":"string", "pattern":"[a-z]{2}(-(gov))?-(east|west|north|south|central){1,2}-\\d" @@ -1029,6 +1189,11 @@ "type":"string", "pattern":"(?:cluster|backup)-[2-7a-zA-Z]{11,16}" }, + "ResourcePolicy":{ + "type":"string", + "max":20000, + "min":1 + }, "RestoreBackupRequest":{ "type":"structure", "required":["BackupId"], @@ -1165,5 +1330,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"

For more information about AWS CloudHSM, see AWS CloudHSM and the AWS CloudHSM User Guide.

" + "documentation":"

For more information about CloudHSM, see CloudHSM and the CloudHSM User Guide.

" } diff --git a/botocore/data/cloudtrail/2013-11-01/service-2.json b/botocore/data/cloudtrail/2013-11-01/service-2.json index 332d2b0640..10db39af11 100644 --- a/botocore/data/cloudtrail/2013-11-01/service-2.json +++ b/botocore/data/cloudtrail/2013-11-01/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"cloudtrail", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"CloudTrail", "serviceFullName":"AWS CloudTrail", "serviceId":"CloudTrail", "signatureVersion":"v4", "targetPrefix":"com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101", - "uid":"cloudtrail-2013-11-01" + "uid":"cloudtrail-2013-11-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddTags":{ @@ -746,7 +748,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"InsufficientDependencyServiceAccessPermissionException"} ], - "documentation":"

Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events for trails in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events.

When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.

Example

  1. You create an event selector for a trail and specify that you want write-only events.

  2. The EC2 GetConsoleOutput and RunInstances API operations occur in your account.

  3. CloudTrail evaluates whether the events match your event selectors.

  4. The RunInstances is a write-only event and it matches your event selector. The trail logs the event.

  5. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event.

The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown.

You can configure up to five event selectors for each trail. For more information, see Logging management events, Logging data events, and Quotas in CloudTrail in the CloudTrail User Guide.

You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide.

", + "documentation":"

Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events.

When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.

Example

  1. You create an event selector for a trail and specify that you want write-only events.

  2. The EC2 GetConsoleOutput and RunInstances API operations occur in your account.

  3. CloudTrail evaluates whether the events match your event selectors.

  4. The RunInstances is a write-only event and it matches your event selector. The trail logs the event.

  5. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event.

The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown.

You can configure up to five event selectors for each trail. For more information, see Logging management events, Logging data events, and Quotas in CloudTrail in the CloudTrail User Guide.

You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide.

", "idempotent":true }, "PutInsightSelectors":{ @@ -921,7 +923,7 @@ {"shape":"OperationNotPermittedException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

Starts an import of logged trail events from a source S3 bucket to a destination event data store. By default, CloudTrail only imports events contained in the S3 bucket's CloudTrail prefix and the prefixes inside the CloudTrail prefix, and does not check prefixes for other Amazon Web Services services. If you want to import CloudTrail events contained in another prefix, you must include the prefix in the S3LocationUri. For more considerations about importing trail events, see Considerations.

When you start a new import, the Destinations and ImportSource parameters are required. Before starting a new import, disable any access control lists (ACLs) attached to the source S3 bucket. For more information about disabling ACLs, see Controlling ownership of objects and disabling ACLs for your bucket.

When you retry an import, the ImportID parameter is required.

If the destination event data store is for an organization, you must use the management account to import trail events. You cannot use the delegated administrator account for the organization.

" + "documentation":"

Starts an import of logged trail events from a source S3 bucket to a destination event data store. By default, CloudTrail only imports events contained in the S3 bucket's CloudTrail prefix and the prefixes inside the CloudTrail prefix, and does not check prefixes for other Amazon Web Services services. If you want to import CloudTrail events contained in another prefix, you must include the prefix in the S3LocationUri. For more considerations about importing trail events, see Considerations for copying trail events in the CloudTrail User Guide.

When you start a new import, the Destinations and ImportSource parameters are required. Before starting a new import, disable any access control lists (ACLs) attached to the source S3 bucket. For more information about disabling ACLs, see Controlling ownership of objects and disabling ACLs for your bucket.

When you retry an import, the ImportID parameter is required.

If the destination event data store is for an organization, you must use the management account to import trail events. You cannot use the delegated administrator account for the organization.

" }, "StartLogging":{ "name":"StartLogging", @@ -1230,7 +1232,7 @@ "members":{ "Field":{ "shape":"SelectorField", - "documentation":"

A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported.

For CloudTrail management events, supported fields include readOnly, eventCategory, and eventSource.

For CloudTrail data events, supported fields include readOnly, eventCategory, eventName, resources.type, and resources.ARN.

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory.

  • readOnly - Optional. Can be set to Equals a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events.

  • eventSource - For filtering management events only. This can be set to NotEquals kms.amazonaws.com or NotEquals rdsdata.amazonaws.com.

  • eventName - Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock. You can have multiple values for this field, separated by commas.

  • eventCategory - This is required and must be set to Equals.

    • For CloudTrail management events, the value must be Management.

    • For CloudTrail data events, the value must be Data.

    The following are used only for event data stores:

    • For CloudTrail Insights events, the value must be Insight.

    • For Config configuration items, the value must be ConfigurationItem.

    • For Audit Manager evidence, the value must be Evidence.

    • For non-Amazon Web Services events, the value must be ActivityAuditLog.

  • resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator, and the value can be one of the following:

    • AWS::DynamoDB::Table

    • AWS::Lambda::Function

    • AWS::S3::Object

    • AWS::AppConfig::Configuration

    • AWS::B2BI::Transformer

    • AWS::Bedrock::AgentAlias

    • AWS::Bedrock::KnowledgeBase

    • AWS::Cassandra::Table

    • AWS::CloudFront::KeyValueStore

    • AWS::CloudTrail::Channel

    • AWS::CodeWhisperer::Customization

    • AWS::CodeWhisperer::Profile

    • AWS::Cognito::IdentityPool

    • AWS::DynamoDB::Stream

    • AWS::EC2::Snapshot

    • AWS::EMRWAL::Workspace

    • AWS::FinSpace::Environment

    • AWS::Glue::Table

    • AWS::GreengrassV2::ComponentVersion

    • AWS::GreengrassV2::Deployment

    • AWS::GuardDuty::Detector

    • AWS::IoT::Certificate

    • AWS::IoT::Thing

    • AWS::IoTSiteWise::Asset

    • AWS::IoTSiteWise::TimeSeries

    • AWS::IoTTwinMaker::Entity

    • AWS::IoTTwinMaker::Workspace

    • AWS::KendraRanking::ExecutionPlan

    • AWS::KinesisVideo::Stream

    • AWS::ManagedBlockchain::Network

    • AWS::ManagedBlockchain::Node

    • AWS::MedicalImaging::Datastore

    • AWS::NeptuneGraph::Graph

    • AWS::PCAConnectorAD::Connector

    • AWS::QBusiness::Application

    • AWS::QBusiness::DataSource

    • AWS::QBusiness::Index

    • AWS::QBusiness::WebExperience

    • AWS::RDS::DBCluster

    • AWS::S3::AccessPoint

    • AWS::S3ObjectLambda::AccessPoint

    • AWS::S3Outposts::Object

    • AWS::SageMaker::Endpoint

    • AWS::SageMaker::ExperimentTrialComponent

    • AWS::SageMaker::FeatureGroup

    • AWS::ServiceDiscovery::Namespace

    • AWS::ServiceDiscovery::Service

    • AWS::SCN::Instance

    • AWS::SNS::PlatformEndpoint

    • AWS::SNS::Topic

    • AWS::SWF::Domain

    • AWS::SQS::Queue

    • AWS::SSMMessages::ControlChannel

    • AWS::ThinClient::Device

    • AWS::ThinClient::Environment

    • AWS::Timestream::Database

    • AWS::Timestream::Table

    • AWS::VerifiedPermissions::PolicyStore

    You can have only one resources.type field per selector. To log data events on more than one resource type, add another selector.

  • resources.ARN - You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. For example, if resources.type equals AWS::S3::Object, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value.

    The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.

    • arn:<partition>:s3:::<bucket_name>/

    • arn:<partition>:s3:::<bucket_name>/<object_path>/

    When resources.type equals AWS::DynamoDB::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>

    When resources.type equals AWS::Lambda::Function, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:lambda:<region>:<account_ID>:function:<function_name>

    When resources.type equals AWS::AppConfig::Configuration, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:appconfig:<region>:<account_ID>:application/<application_ID>/environment/<environment_ID>/configuration/<configuration_profile_ID>

    When resources.type equals AWS::B2BI::Transformer, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:b2bi:<region>:<account_ID>:transformer/<transformer_ID>

    When resources.type equals AWS::Bedrock::AgentAlias, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:bedrock:<region>:<account_ID>:agent-alias/<agent_ID>/<alias_ID>

    When resources.type equals AWS::Bedrock::KnowledgeBase, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:bedrock:<region>:<account_ID>:knowledge-base/<knowledge_base_ID>

    When resources.type equals AWS::Cassandra::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cassandra:<region>:<account_ID>:/keyspace/<keyspace_name>/table/<table_name>

    When resources.type equals AWS::CloudFront::KeyValueStore, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cloudfront:<region>:<account_ID>:key-value-store/<KVS_name>

    When resources.type equals AWS::CloudTrail::Channel, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cloudtrail:<region>:<account_ID>:channel/<channel_UUID>

    When resources.type equals AWS::CodeWhisperer::Customization, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:codewhisperer:<region>:<account_ID>:customization/<customization_ID>

    When resources.type equals AWS::CodeWhisperer::Profile, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:codewhisperer:<region>:<account_ID>:profile/<profile_ID>

    When resources.type equals AWS::Cognito::IdentityPool, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cognito-identity:<region>:<account_ID>:identitypool/<identity_pool_ID>

    When resources.type equals AWS::DynamoDB::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>/stream/<date_time>

    When resources.type equals AWS::EC2::Snapshot, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:ec2:<region>::snapshot/<snapshot_ID>

    When resources.type equals AWS::EMRWAL::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:emrwal:<region>:<account_ID>:workspace/<workspace_name>

    When resources.type equals AWS::FinSpace::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:finspace:<region>:<account_ID>:environment/<environment_ID>

    When resources.type equals AWS::Glue::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:glue:<region>:<account_ID>:table/<database_name>/<table_name>

    When resources.type equals AWS::GreengrassV2::ComponentVersion, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:greengrass:<region>:<account_ID>:components/<component_name>

    When resources.type equals AWS::GreengrassV2::Deployment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:greengrass:<region>:<account_ID>:deployments/<deployment_ID

    When resources.type equals AWS::GuardDuty::Detector, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:guardduty:<region>:<account_ID>:detector/<detector_ID>

    When resources.type equals AWS::IoT::Certificate, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iot:<region>:<account_ID>:cert/<certificate_ID>

    When resources.type equals AWS::IoT::Thing, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iot:<region>:<account_ID>:thing/<thing_ID>

    When resources.type equals AWS::IoTSiteWise::Asset, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iotsitewise:<region>:<account_ID>:asset/<asset_ID>

    When resources.type equals AWS::IoTSiteWise::TimeSeries, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iotsitewise:<region>:<account_ID>:timeseries/<timeseries_ID>

    When resources.type equals AWS::IoTTwinMaker::Entity, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iottwinmaker:<region>:<account_ID>:workspace/<workspace_ID>/entity/<entity_ID>

    When resources.type equals AWS::IoTTwinMaker::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iottwinmaker:<region>:<account_ID>:workspace/<workspace_ID>

    When resources.type equals AWS::KendraRanking::ExecutionPlan, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:kendra-ranking:<region>:<account_ID>:rescore-execution-plan/<rescore_execution_plan_ID>

    When resources.type equals AWS::KinesisVideo::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:kinesisvideo:<region>:<account_ID>:stream/<stream_name>/<creation_time>

    When resources.type equals AWS::ManagedBlockchain::Network, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:managedblockchain:::networks/<network_name>

    When resources.type equals AWS::ManagedBlockchain::Node, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:managedblockchain:<region>:<account_ID>:nodes/<node_ID>

    When resources.type equals AWS::MedicalImaging::Datastore, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:medical-imaging:<region>:<account_ID>:datastore/<data_store_ID>

    When resources.type equals AWS::NeptuneGraph::Graph, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:neptune-graph:<region>:<account_ID>:graph/<graph_ID>

    When resources.type equals AWS::PCAConnectorAD::Connector, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:pca-connector-ad:<region>:<account_ID>:connector/<connector_ID>

    When resources.type equals AWS::QBusiness::Application, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>

    When resources.type equals AWS::QBusiness::DataSource, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/index/<index_ID>/data-source/<datasource_ID>

    When resources.type equals AWS::QBusiness::Index, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/index/<index_ID>

    When resources.type equals AWS::QBusiness::WebExperience, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/web-experience/<web_experience_ID>

    When resources.type equals AWS::RDS::DBCluster, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:rds:<region>:<account_ID>:cluster/<cluster_name>

    When resources.type equals AWS::S3::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith or NotStartsWith operators.

    • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>

    • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>/object/<object_path>

    When resources.type equals AWS::S3ObjectLambda::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:s3-object-lambda:<region>:<account_ID>:accesspoint/<access_point_name>

    When resources.type equals AWS::S3Outposts::Object, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:s3-outposts:<region>:<account_ID>:<object_path>

    When resources.type equals AWS::SageMaker::Endpoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sagemaker:<region>:<account_ID>:endpoint/<endpoint_name>

    When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sagemaker:<region>:<account_ID>:experiment-trial-component/<experiment_trial_component_name>

    When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sagemaker:<region>:<account_ID>:feature-group/<feature_group_name>

    When resources.type equals AWS::SCN::Instance, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:scn:<region>:<account_ID>:instance/<instance_ID>

    When resources.type equals AWS::ServiceDiscovery::Namespace, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:servicediscovery:<region>:<account_ID>:namespace/<namespace_ID>

    When resources.type equals AWS::ServiceDiscovery::Service, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:servicediscovery:<region>:<account_ID>:service/<service_ID>

    When resources.type equals AWS::SNS::PlatformEndpoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sns:<region>:<account_ID>:endpoint/<endpoint_type>/<endpoint_name>/<endpoint_ID>

    When resources.type equals AWS::SNS::Topic, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sns:<region>:<account_ID>:<topic_name>

    When resources.type equals AWS::SWF::Domain, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:swf:<region>:<account_ID>:domain/<domain_name>

    When resources.type equals AWS::SQS::Queue, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sqs:<region>:<account_ID>:<queue_name>

    When resources.type equals AWS::SSMMessages::ControlChannel, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:ssmmessages:<region>:<account_ID>:control-channel/<channel_ID>

    When resources.type equals AWS::ThinClient::Device, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:thinclient:<region>:<account_ID>:device/<device_ID>

    When resources.type equals AWS::ThinClient::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:thinclient:<region>:<account_ID>:environment/<environment_ID>

    When resources.type equals AWS::Timestream::Database, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:timestream:<region>:<account_ID>:database/<database_name>

    When resources.type equals AWS::Timestream::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:timestream:<region>:<account_ID>:database/<database_name>/table/<table_name>

    When resources.type equals AWS::VerifiedPermissions::PolicyStore, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:verifiedpermissions:<region>:<account_ID>:policy-store/<policy_store_UUID>

" + "documentation":"

A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported.

For CloudTrail management events, supported fields include readOnly, eventCategory, and eventSource.

For CloudTrail data events, supported fields include readOnly, eventCategory, eventName, resources.type, and resources.ARN.

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory.

  • readOnly - Optional. Can be set to Equals a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events.

  • eventSource - For filtering management events only. This can be set to NotEquals kms.amazonaws.com or NotEquals rdsdata.amazonaws.com.

  • eventName - Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock. You can have multiple values for this field, separated by commas.

  • eventCategory - This is required and must be set to Equals.

    • For CloudTrail management events, the value must be Management.

    • For CloudTrail data events, the value must be Data.

    The following are used only for event data stores:

    • For CloudTrail Insights events, the value must be Insight.

    • For Config configuration items, the value must be ConfigurationItem.

    • For Audit Manager evidence, the value must be Evidence.

    • For non-Amazon Web Services events, the value must be ActivityAuditLog.

  • resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator, and the value can be one of the following:

    • AWS::DynamoDB::Table

    • AWS::Lambda::Function

    • AWS::S3::Object

    • AWS::AppConfig::Configuration

    • AWS::B2BI::Transformer

    • AWS::Bedrock::AgentAlias

    • AWS::Bedrock::KnowledgeBase

    • AWS::Cassandra::Table

    • AWS::CloudFront::KeyValueStore

    • AWS::CloudTrail::Channel

    • AWS::CodeWhisperer::Customization

    • AWS::CodeWhisperer::Profile

    • AWS::Cognito::IdentityPool

    • AWS::DynamoDB::Stream

    • AWS::EC2::Snapshot

    • AWS::EMRWAL::Workspace

    • AWS::FinSpace::Environment

    • AWS::Glue::Table

    • AWS::GreengrassV2::ComponentVersion

    • AWS::GreengrassV2::Deployment

    • AWS::GuardDuty::Detector

    • AWS::IoT::Certificate

    • AWS::IoT::Thing

    • AWS::IoTSiteWise::Asset

    • AWS::IoTSiteWise::TimeSeries

    • AWS::IoTTwinMaker::Entity

    • AWS::IoTTwinMaker::Workspace

    • AWS::KendraRanking::ExecutionPlan

    • AWS::KinesisVideo::Stream

    • AWS::ManagedBlockchain::Network

    • AWS::ManagedBlockchain::Node

    • AWS::MedicalImaging::Datastore

    • AWS::NeptuneGraph::Graph

    • AWS::PCAConnectorAD::Connector

    • AWS::QApps:QApp

    • AWS::QBusiness::Application

    • AWS::QBusiness::DataSource

    • AWS::QBusiness::Index

    • AWS::QBusiness::WebExperience

    • AWS::RDS::DBCluster

    • AWS::S3::AccessPoint

    • AWS::S3ObjectLambda::AccessPoint

    • AWS::S3Outposts::Object

    • AWS::SageMaker::Endpoint

    • AWS::SageMaker::ExperimentTrialComponent

    • AWS::SageMaker::FeatureGroup

    • AWS::ServiceDiscovery::Namespace

    • AWS::ServiceDiscovery::Service

    • AWS::SCN::Instance

    • AWS::SNS::PlatformEndpoint

    • AWS::SNS::Topic

    • AWS::SQS::Queue

    • AWS::SSM::ManagedNode

    • AWS::SSMMessages::ControlChannel

    • AWS::SWF::Domain

    • AWS::ThinClient::Device

    • AWS::ThinClient::Environment

    • AWS::Timestream::Database

    • AWS::Timestream::Table

    • AWS::VerifiedPermissions::PolicyStore

    • AWS::XRay::Trace

    You can have only one resources.type field per selector. To log data events on more than one resource type, add another selector.

  • resources.ARN - You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type.

    You can't use the resources.ARN field to filter resource types that do not have ARNs.

    The resources.ARN field can be set one of the following.

    If resources.type equals AWS::S3::Object, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value.

    The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.

    • arn:<partition>:s3:::<bucket_name>/

    • arn:<partition>:s3:::<bucket_name>/<object_path>/

    When resources.type equals AWS::DynamoDB::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>

    When resources.type equals AWS::Lambda::Function, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:lambda:<region>:<account_ID>:function:<function_name>

    When resources.type equals AWS::AppConfig::Configuration, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:appconfig:<region>:<account_ID>:application/<application_ID>/environment/<environment_ID>/configuration/<configuration_profile_ID>

    When resources.type equals AWS::B2BI::Transformer, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:b2bi:<region>:<account_ID>:transformer/<transformer_ID>

    When resources.type equals AWS::Bedrock::AgentAlias, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:bedrock:<region>:<account_ID>:agent-alias/<agent_ID>/<alias_ID>

    When resources.type equals AWS::Bedrock::KnowledgeBase, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:bedrock:<region>:<account_ID>:knowledge-base/<knowledge_base_ID>

    When resources.type equals AWS::Cassandra::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cassandra:<region>:<account_ID>:/keyspace/<keyspace_name>/table/<table_name>

    When resources.type equals AWS::CloudFront::KeyValueStore, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cloudfront:<region>:<account_ID>:key-value-store/<KVS_name>

    When resources.type equals AWS::CloudTrail::Channel, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cloudtrail:<region>:<account_ID>:channel/<channel_UUID>

    When resources.type equals AWS::CodeWhisperer::Customization, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:codewhisperer:<region>:<account_ID>:customization/<customization_ID>

    When resources.type equals AWS::CodeWhisperer::Profile, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:codewhisperer:<region>:<account_ID>:profile/<profile_ID>

    When resources.type equals AWS::Cognito::IdentityPool, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cognito-identity:<region>:<account_ID>:identitypool/<identity_pool_ID>

    When resources.type equals AWS::DynamoDB::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>/stream/<date_time>

    When resources.type equals AWS::EC2::Snapshot, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:ec2:<region>::snapshot/<snapshot_ID>

    When resources.type equals AWS::EMRWAL::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:emrwal:<region>:<account_ID>:workspace/<workspace_name>

    When resources.type equals AWS::FinSpace::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:finspace:<region>:<account_ID>:environment/<environment_ID>

    When resources.type equals AWS::Glue::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:glue:<region>:<account_ID>:table/<database_name>/<table_name>

    When resources.type equals AWS::GreengrassV2::ComponentVersion, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:greengrass:<region>:<account_ID>:components/<component_name>

    When resources.type equals AWS::GreengrassV2::Deployment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:greengrass:<region>:<account_ID>:deployments/<deployment_ID

    When resources.type equals AWS::GuardDuty::Detector, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:guardduty:<region>:<account_ID>:detector/<detector_ID>

    When resources.type equals AWS::IoT::Certificate, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iot:<region>:<account_ID>:cert/<certificate_ID>

    When resources.type equals AWS::IoT::Thing, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iot:<region>:<account_ID>:thing/<thing_ID>

    When resources.type equals AWS::IoTSiteWise::Asset, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iotsitewise:<region>:<account_ID>:asset/<asset_ID>

    When resources.type equals AWS::IoTSiteWise::TimeSeries, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iotsitewise:<region>:<account_ID>:timeseries/<timeseries_ID>

    When resources.type equals AWS::IoTTwinMaker::Entity, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iottwinmaker:<region>:<account_ID>:workspace/<workspace_ID>/entity/<entity_ID>

    When resources.type equals AWS::IoTTwinMaker::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:iottwinmaker:<region>:<account_ID>:workspace/<workspace_ID>

    When resources.type equals AWS::KendraRanking::ExecutionPlan, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:kendra-ranking:<region>:<account_ID>:rescore-execution-plan/<rescore_execution_plan_ID>

    When resources.type equals AWS::KinesisVideo::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:kinesisvideo:<region>:<account_ID>:stream/<stream_name>/<creation_time>

    When resources.type equals AWS::ManagedBlockchain::Network, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:managedblockchain:::networks/<network_name>

    When resources.type equals AWS::ManagedBlockchain::Node, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:managedblockchain:<region>:<account_ID>:nodes/<node_ID>

    When resources.type equals AWS::MedicalImaging::Datastore, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:medical-imaging:<region>:<account_ID>:datastore/<data_store_ID>

    When resources.type equals AWS::NeptuneGraph::Graph, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:neptune-graph:<region>:<account_ID>:graph/<graph_ID>

    When resources.type equals AWS::PCAConnectorAD::Connector, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:pca-connector-ad:<region>:<account_ID>:connector/<connector_ID>

    When resources.type equals AWS::QApps:QApp, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:qapps:<region>:<account_ID>:application/<application_UUID>/qapp/<qapp_UUID>

    When resources.type equals AWS::QBusiness::Application, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>

    When resources.type equals AWS::QBusiness::DataSource, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/index/<index_ID>/data-source/<datasource_ID>

    When resources.type equals AWS::QBusiness::Index, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/index/<index_ID>

    When resources.type equals AWS::QBusiness::WebExperience, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/web-experience/<web_experience_ID>

    When resources.type equals AWS::RDS::DBCluster, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:rds:<region>:<account_ID>:cluster/<cluster_name>

    When resources.type equals AWS::S3::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith or NotStartsWith operators.

    • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>

    • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>/object/<object_path>

    When resources.type equals AWS::S3ObjectLambda::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:s3-object-lambda:<region>:<account_ID>:accesspoint/<access_point_name>

    When resources.type equals AWS::S3Outposts::Object, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:s3-outposts:<region>:<account_ID>:<object_path>

    When resources.type equals AWS::SageMaker::Endpoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sagemaker:<region>:<account_ID>:endpoint/<endpoint_name>

    When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sagemaker:<region>:<account_ID>:experiment-trial-component/<experiment_trial_component_name>

    When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sagemaker:<region>:<account_ID>:feature-group/<feature_group_name>

    When resources.type equals AWS::SCN::Instance, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:scn:<region>:<account_ID>:instance/<instance_ID>

    When resources.type equals AWS::ServiceDiscovery::Namespace, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:servicediscovery:<region>:<account_ID>:namespace/<namespace_ID>

    When resources.type equals AWS::ServiceDiscovery::Service, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:servicediscovery:<region>:<account_ID>:service/<service_ID>

    When resources.type equals AWS::SNS::PlatformEndpoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sns:<region>:<account_ID>:endpoint/<endpoint_type>/<endpoint_name>/<endpoint_ID>

    When resources.type equals AWS::SNS::Topic, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sns:<region>:<account_ID>:<topic_name>

    When resources.type equals AWS::SQS::Queue, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sqs:<region>:<account_ID>:<queue_name>

    When resources.type equals AWS::SSM::ManagedNode, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats:

    • arn:<partition>:ssm:<region>:<account_ID>:managed-instance/<instance_ID>

    • arn:<partition>:ec2:<region>:<account_ID>:instance/<instance_ID>

    When resources.type equals AWS::SSMMessages::ControlChannel, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:ssmmessages:<region>:<account_ID>:control-channel/<channel_ID>

    When resources.type equals AWS::SWF::Domain, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:swf:<region>:<account_ID>:domain/<domain_name>

    When resources.type equals AWS::ThinClient::Device, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:thinclient:<region>:<account_ID>:device/<device_ID>

    When resources.type equals AWS::ThinClient::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:thinclient:<region>:<account_ID>:environment/<environment_ID>

    When resources.type equals AWS::Timestream::Database, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:timestream:<region>:<account_ID>:database/<database_name>

    When resources.type equals AWS::Timestream::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:timestream:<region>:<account_ID>:database/<database_name>/table/<table_name>

    When resources.type equals AWS::VerifiedPermissions::PolicyStore, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:verifiedpermissions:<region>:<account_ID>:policy-store/<policy_store_UUID>

" }, "Equals":{ "shape":"Operator", @@ -1389,7 +1391,7 @@ "type":"structure", "members":{ }, - "documentation":"

This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see Enabling Trusted Access with Other Amazon Web Services Services and Prepare For Creating a Trail For Your Organization.

", + "documentation":"

This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see How to enable or disable trusted access in the Organizations User Guide and Prepare For Creating a Trail For Your Organization in the CloudTrail User Guide.

", "exception":true }, "CloudTrailInvalidClientTokenIdException":{ @@ -1475,7 +1477,7 @@ }, "AdvancedEventSelectors":{ "shape":"AdvancedEventSelectors", - "documentation":"

The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.

For more information about how to use advanced event selectors to log CloudTrail events, see Log events by using advanced event selectors in the CloudTrail User Guide.

For more information about how to use advanced event selectors to include Config configuration items in your event data store, see Create an event data store for Config configuration items in the CloudTrail User Guide.

For more information about how to use advanced event selectors to include non-Amazon Web Services events in your event data store, see Create an integration to log events from outside Amazon Web Services in the CloudTrail User Guide.

" + "documentation":"

The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.

For more information about how to use advanced event selectors to log CloudTrail events, see Log events by using advanced event selectors in the CloudTrail User Guide.

For more information about how to use advanced event selectors to include Config configuration items in your event data store, see Create an event data store for Config configuration items in the CloudTrail User Guide.

For more information about how to use advanced event selectors to include events outside of Amazon Web Services events in your event data store, see Create an integration to log events from outside Amazon Web Services in the CloudTrail User Guide.

" }, "MultiRegionEnabled":{ "shape":"Boolean", @@ -1575,11 +1577,11 @@ }, "S3BucketName":{ "shape":"String", - "documentation":"

Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.

" + "documentation":"

Specifies the name of the Amazon S3 bucket designated for publishing log files. For information about bucket naming rules, see Bucket naming rules in the Amazon Simple Storage Service User Guide.

" }, "S3KeyPrefix":{ "shape":"String", - "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" + "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" }, "SnsTopicName":{ "shape":"String", @@ -1630,7 +1632,7 @@ }, "S3KeyPrefix":{ "shape":"String", - "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.

" + "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.

" }, "SnsTopicName":{ "shape":"String", @@ -1685,10 +1687,10 @@ }, "Values":{ "shape":"DataResourceValues", - "documentation":"

An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified objects.

  • To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3.

    This also enables logging of data event activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a bucket that belongs to another Amazon Web Services account.

  • To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/. The trail logs data events for all objects in this S3 bucket.

  • To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::bucket-1/example-images. The trail logs data events for objects in this S3 bucket that match the prefix.

  • To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda.

    This also enables logging of Invoke activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a function that belongs to another Amazon Web Services account.

  • To log data events for a specific Lambda function, specify the function ARN.

    Lambda function ARNs are exact. For example, if you specify a function ARN arn:aws:lambda:us-west-2:111111111111:function:helloworld, data events will only be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld. They will not be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld2.

  • To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb.

" + "documentation":"

An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.

  • To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3.

    This also enables logging of data event activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a bucket that belongs to another Amazon Web Services account.

  • To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/. The trail logs data events for all objects in this S3 bucket.

  • To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::bucket-1/example-images. The trail logs data events for objects in this S3 bucket that match the prefix.

  • To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda.

    This also enables logging of Invoke activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a function that belongs to another Amazon Web Services account.

  • To log data events for a specific Lambda function, specify the function ARN.

    Lambda function ARNs are exact. For example, if you specify a function ARN arn:aws:lambda:us-west-2:111111111111:function:helloworld, data events will only be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld. They will not be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld2.

  • To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb.

" } }, - "documentation":"

The Amazon S3 buckets, Lambda functions, or Amazon DynamoDB tables that you specify in your event selectors for your trail to log data events. Data events provide information about the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.

The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors for the trail.

If you are using advanced event selectors, the maximum total number of values for all conditions, across all advanced event selectors for the trail, is 500.

The following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named bucket-1. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read and Write data events.

  1. A user uploads an image file to bucket-1.

  2. The PutObject API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.

  3. A user uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2.

  4. The PutObject API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn’t log the event.

The following example demonstrates how logging works when you configure logging of Lambda data events for a Lambda function named MyLambdaFunction, but not for all Lambda functions.

  1. A user runs a script that includes a call to the MyLambdaFunction function and the MyOtherLambdaFunction function.

  2. The Invoke API operation on MyLambdaFunction is an Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for MyLambdaFunction, any invocations of that function are logged. The trail processes and logs the event.

  3. The Invoke API operation on MyOtherLambdaFunction is an Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke operation for MyOtherLambdaFunction does not match the function specified for the trail. The trail doesn’t log the event.

" + "documentation":"

Data events provide information about the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.

Configure the DataResource to specify the resource type and resource ARNs for which you want to log data events.

You can specify the following resource types in your event selectors for your trail:

  • AWS::DynamoDB::Table

  • AWS::Lambda::Function

  • AWS::S3::Object

The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors for the trail.

If you are using advanced event selectors, the maximum total number of values for all conditions, across all advanced event selectors for the trail, is 500.

The following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named bucket-1. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read and Write data events.

  1. A user uploads an image file to bucket-1.

  2. The PutObject API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.

  3. A user uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2.

  4. The PutObject API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn’t log the event.

The following example demonstrates how logging works when you configure logging of Lambda data events for a Lambda function named MyLambdaFunction, but not for all Lambda functions.

  1. A user runs a script that includes a call to the MyLambdaFunction function and the MyOtherLambdaFunction function.

  2. The Invoke API operation on MyLambdaFunction is an Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for MyLambdaFunction, any invocations of that function are logged. The trail processes and logs the event.

  3. The Invoke API operation on MyOtherLambdaFunction is an Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke operation for MyOtherLambdaFunction does not match the function specified for the trail. The trail doesn’t log the event.

" }, "DataResourceValues":{ "type":"list", @@ -2086,7 +2088,7 @@ "deprecatedMessage":"UpdatedTimestamp is no longer returned by ListEventDataStores" } }, - "documentation":"

A storage lake of event data against which you can run complex SQL-based queries. An event data store can include events that you have logged on your account. To select events for an event data store, use advanced event selectors.

" + "documentation":"

A storage lake of event data against which you can run complex SQL-based queries. An event data store can include events that you have logged on your account. To select events for an event data store, use advanced event selectors.

" }, "EventDataStoreARNInvalidException":{ "type":"structure", @@ -2336,6 +2338,10 @@ "FederationRoleArn":{ "shape":"FederationRoleArn", "documentation":"

If Lake query federation is enabled, provides the ARN of the federation role used to access the resources for the federated event data store.

" + }, + "PartitionKeys":{ + "shape":"PartitionKeyList", + "documentation":"

The partition keys for the event data store. To improve query performance and efficiency, CloudTrail Lake organizes event data into partitions based on values derived from partition keys.

" } } }, @@ -2559,7 +2565,7 @@ }, "LatestDeliveryError":{ "shape":"String", - "documentation":"

Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.

This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, create a new bucket, and then call UpdateTrail to specify the new bucket; or fix the existing objects so that CloudTrail can again write to the bucket.

" + "documentation":"

Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.

This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, fix the bucket policy so that CloudTrail can write to the bucket; or create a new bucket and call UpdateTrail to specify the new bucket.

" }, "LatestNotificationError":{ "shape":"String", @@ -2595,7 +2601,7 @@ }, "LatestDigestDeliveryError":{ "shape":"String", - "documentation":"

Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.

This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, create a new bucket, and then call UpdateTrail to specify the new bucket; or fix the existing objects so that CloudTrail can again write to the bucket.

" + "documentation":"

Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.

This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, fix the bucket policy so that CloudTrail can write to the bucket; or create a new bucket and call UpdateTrail to specify the new bucket.

" }, "LatestDeliveryAttemptTime":{ "shape":"String", @@ -3564,7 +3570,7 @@ "type":"structure", "members":{ }, - "documentation":"

This exception is thrown when the Amazon Web Services account making the request to create or update an organization trail or event data store is not the management account for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Create an event data store.

", + "documentation":"

This exception is thrown when the Amazon Web Services account making the request to create or update an organization trail or event data store is not the management account for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Organization event data stores.

", "exception":true }, "OperationNotPermittedException":{ @@ -3605,6 +3611,41 @@ "min":4, "pattern":".*" }, + "PartitionKey":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{ + "shape":"PartitionKeyName", + "documentation":"

The name of the partition key.

" + }, + "Type":{ + "shape":"PartitionKeyType", + "documentation":"

The data type of the partition key. For example, bigint or string.

" + } + }, + "documentation":"

Contains information about a partition key for an event data store.

" + }, + "PartitionKeyList":{ + "type":"list", + "member":{"shape":"PartitionKey"}, + "max":2 + }, + "PartitionKeyName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "PartitionKeyType":{ + "type":"string", + "max":255, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, "PublicKey":{ "type":"structure", "members":{ @@ -4395,11 +4436,11 @@ }, "S3BucketName":{ "shape":"String", - "documentation":"

Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket Naming Requirements.

" + "documentation":"

Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket naming rules.

" }, "S3KeyPrefix":{ "shape":"String", - "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" + "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" }, "SnsTopicName":{ "shape":"String", @@ -4673,11 +4714,11 @@ }, "S3BucketName":{ "shape":"String", - "documentation":"

Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.

" + "documentation":"

Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket naming rules.

" }, "S3KeyPrefix":{ "shape":"String", - "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" + "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" }, "SnsTopicName":{ "shape":"String", @@ -4727,7 +4768,7 @@ }, "S3KeyPrefix":{ "shape":"String", - "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your IAM Log Files.

" + "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your IAM Log Files.

" }, "SnsTopicName":{ "shape":"String", diff --git a/botocore/data/cloudwatch/2010-08-01/service-2.json b/botocore/data/cloudwatch/2010-08-01/service-2.json index 29cdb7da8c..443cea7170 100644 --- a/botocore/data/cloudwatch/2010-08-01/service-2.json +++ b/botocore/data/cloudwatch/2010-08-01/service-2.json @@ -4,12 +4,14 @@ "apiVersion":"2010-08-01", "endpointPrefix":"monitoring", "protocol":"query", + "protocols":["query"], "serviceAbbreviation":"CloudWatch", "serviceFullName":"Amazon CloudWatch", "serviceId":"CloudWatch", "signatureVersion":"v4", "uid":"monitoring-2010-08-01", - "xmlNamespace":"http://monitoring.amazonaws.com/doc/2010-08-01/" + "xmlNamespace":"http://monitoring.amazonaws.com/doc/2010-08-01/", + "auth":["aws.auth#sigv4"] }, "operations":{ "DeleteAlarms":{ diff --git a/botocore/data/codeartifact/2018-09-22/service-2.json b/botocore/data/codeartifact/2018-09-22/service-2.json index 10df5d9dc9..47941f71f9 100644 --- a/botocore/data/codeartifact/2018-09-22/service-2.json +++ b/botocore/data/codeartifact/2018-09-22/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"codeartifact", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"CodeArtifact", "serviceId":"codeartifact", "signatureVersion":"v4", "signingName":"codeartifact", - "uid":"codeartifact-2018-09-22" + "uid":"codeartifact-2018-09-22", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateExternalConnection":{ @@ -457,7 +459,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

  • generic

  • maven

  • npm

  • nuget

  • pypi

  • ruby

  • swift

" + "documentation":"

Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

  • cargo

  • generic

  • maven

  • npm

  • nuget

  • pypi

  • ruby

  • swift

" }, "GetRepositoryPermissionsPolicy":{ "name":"GetRepositoryPermissionsPolicy", @@ -963,7 +965,7 @@ }, "externalConnection":{ "shape":"ExternalConnectionName", - "documentation":"

The name of the external connection to add to the repository. The following values are supported:

  • public:npmjs - for the npm public repository.

  • public:nuget-org - for the NuGet Gallery.

  • public:pypi - for the Python Package Index.

  • public:maven-central - for Maven Central.

  • public:maven-googleandroid - for the Google Android repository.

  • public:maven-gradleplugins - for the Gradle plugins repository.

  • public:maven-commonsware - for the CommonsWare Android repository.

  • public:maven-clojars - for the Clojars repository.

", + "documentation":"

The name of the external connection to add to the repository. The following values are supported:

  • public:npmjs - for the npm public repository.

  • public:nuget-org - for the NuGet Gallery.

  • public:pypi - for the Python Package Index.

  • public:maven-central - for Maven Central.

  • public:maven-googleandroid - for the Google Android repository.

  • public:maven-gradleplugins - for the Gradle plugins repository.

  • public:maven-commonsware - for the CommonsWare Android repository.

  • public:maven-clojars - for the Clojars repository.

  • public:ruby-gems-org - for RubyGems.org.

  • public:crates-io - for Crates.io.

", "location":"querystring", "locationName":"external-connection" } @@ -987,7 +989,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the associated package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" + "documentation":"

The namespace of the associated package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -1070,7 +1072,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package versions to be copied. The package component that specifies its namespace depends on its type. For example:

The namespace is required when copying package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package versions to be copied. The package component that specifies its namespace depends on its type. For example:

The namespace is required when copying package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1365,7 +1367,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package to delete. The package component that specifies its namespace depends on its type. For example:

The namespace is required when deleting packages of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package to delete. The package component that specifies its namespace depends on its type. For example:

The namespace is required when deleting packages of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1419,7 +1421,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package versions to be deleted. The package component that specifies its namespace depends on its type. For example:

The namespace is required when deleting package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package versions to be deleted. The package component that specifies its namespace depends on its type. For example:

The namespace is required when deleting package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1625,7 +1627,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the requested package. The package component that specifies its namespace depends on its type. For example:

The namespace is required when requesting packages of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the requested package. The package component that specifies its namespace depends on its type. For example:

The namespace is required when requesting packages of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1683,7 +1685,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the requested package version. The package component that specifies its namespace depends on its type. For example:

The namespace is required when requesting package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the requested package version. The package component that specifies its namespace depends on its type. For example:

The namespace is required when requesting package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1831,7 +1833,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package versions to be disposed. The package component that specifies its namespace depends on its type. For example:

The namespace is required when disposing package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package versions to be disposed. The package component that specifies its namespace depends on its type. For example:

The namespace is required when disposing package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -2010,7 +2012,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package from which to get the associated package group. The package component that specifies its namespace depends on its type. For example:

The namespace is required when getting associated package groups from packages of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package from which to get the associated package group. The package component that specifies its namespace depends on its type. For example:

The namespace is required when getting associated package groups from packages of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -2137,7 +2139,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package version with the requested asset file. The package component that specifies its namespace depends on its type. For example:

The namespace is required when requesting assets from package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package version with the requested asset file. The package component that specifies its namespace depends on its type. For example:

The namespace is required when requesting assets from package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -2231,7 +2233,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package version with the requested readme file. The package component that specifies its namespace depends on its type. For example:

The namespace is required when requesting the readme from package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package version with the requested readme file. The package component that specifies its namespace depends on its type. For example:

The namespace is required when requesting the readme from package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -2258,7 +2260,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package version with the requested readme file. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" + "documentation":"

The namespace of the package version with the requested readme file. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -2650,7 +2652,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package version that contains the requested package version assets. The package component that specifies its namespace depends on its type. For example:

The namespace is required requesting assets from package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package version that contains the requested package version assets. The package component that specifies its namespace depends on its type. For example:

The namespace is required requesting assets from package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -2689,7 +2691,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package version that contains the requested package version assets. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" + "documentation":"

The namespace of the package version that contains the requested package version assets. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -2749,7 +2751,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package version with the requested dependencies. The package component that specifies its namespace depends on its type. For example:

The namespace is required when listing dependencies from package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package version with the requested dependencies. The package component that specifies its namespace depends on its type. For example:

The namespace is required when listing dependencies from package versions of the following formats:

  • Maven

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm package version is its scope.

  • Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -2782,7 +2784,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package version that contains the returned dependencies. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" + "documentation":"

The namespace of the package version that contains the returned dependencies. The package component that specifies its namespace depends on its type. For example:

The namespace is required when listing dependencies from package versions of the following formats:

  • Maven

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm package version is its scope.

  • Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -2846,7 +2848,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package that contains the requested package versions. The package component that specifies its namespace depends on its type. For example:

The namespace is required when deleting package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package that contains the requested package versions. The package component that specifies its namespace depends on its type. For example:

The namespace is required when deleting package versions of the following formats:

  • Maven

  • Swift

  • generic

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -2901,7 +2903,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package that contains the requested package versions. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" + "documentation":"

The namespace of the package that contains the requested package versions. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -2955,7 +2957,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace prefix used to filter requested packages. Only packages with a namespace that starts with the provided string value are returned. Note that although this option is called --namespace and not --namespace-prefix, it has prefix-matching behavior.

Each package format uses namespace as follows:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace prefix used to filter requested packages. Only packages with a namespace that starts with the provided string value are returned. Note that although this option is called --namespace and not --namespace-prefix, it has prefix-matching behavior.

Each package format uses namespace as follows:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -3190,7 +3192,7 @@ "members":{ "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package that this package depends on. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" + "documentation":"

The namespace of the package that this package depends on. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -3220,7 +3222,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" }, "name":{ "shape":"PackageName", @@ -3242,7 +3244,8 @@ "nuget", "generic", "ruby", - "swift" + "swift", + "cargo" ] }, "PackageGroupAllowedRepository":{ @@ -3510,7 +3513,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -3542,7 +3545,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package version. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" + "documentation":"

The namespace of the package version. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

" }, "packageName":{ "shape":"PackageName", @@ -3905,7 +3908,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package to be updated. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package to be updated. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -4421,7 +4424,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package version to be updated. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", + "documentation":"

The namespace of the package version to be updated. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package version is its groupId.

  • The namespace of an npm or Swift package version is its scope.

  • The namespace of a generic package is its namespace.

  • Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -4560,5 +4563,5 @@ ] } }, - "documentation":"

CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, pip, and dotnet. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client.

CodeArtifact concepts

  • Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools such as the npm CLI or the Maven CLI ( mvn ). For a list of supported package managers, see the CodeArtifact User Guide.

  • Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in Key Management Service (KMS).

    Each repository is a member of a single domain and can't be moved to a different domain.

    The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages.

    Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization.

  • Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, Maven, NuGet, Swift, Ruby, and generic package formats. For more information about the supported package formats and how to use CodeArtifact with them, see the CodeArtifact User Guide.

    In CodeArtifact, a package consists of:

    • A name (for example, webpack is the name of a popular npm package)

    • An optional namespace (for example, @types in @types/node)

    • A set of versions (for example, 1.0.0, 1.0.1, 1.0.2, etc.)

    • Package-level metadata (for example, npm tags)

  • Package group: A group of packages that match a specified definition. Package groups can be used to apply configuration to multiple packages that match a defined pattern using package format, package namespace, and package name. You can use package groups to more conveniently configure package origin controls for multiple packages. Package origin controls are used to block or allow ingestion or publishing of new package versions, which protects users from malicious actions known as dependency substitution attacks.

  • Package version: A version of a package, such as @types/node 12.6.9. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets.

  • Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories.

  • Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz file or Maven POM and JAR files.

CodeArtifact supported API operations

  • AssociateExternalConnection: Adds an existing external connection to a repository.

  • CopyPackageVersions: Copies package versions from one repository to another repository in the same domain.

  • CreateDomain: Creates a domain.

  • CreatePackageGroup: Creates a package group.

  • CreateRepository: Creates a CodeArtifact repository in a domain.

  • DeleteDomain: Deletes a domain. You cannot delete a domain that contains repositories.

  • DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain.

  • DeletePackage: Deletes a package and all associated package versions.

  • DeletePackageGroup: Deletes a package group. Does not delete packages or package versions that are associated with a package group.

  • DeletePackageVersions: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage.

  • DeleteRepository: Deletes a repository.

  • DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository.

  • DescribeDomain: Returns a DomainDescription object that contains information about the requested domain.

  • DescribePackage: Returns a PackageDescription object that contains details about a package.

  • DescribePackageGroup: Returns a PackageGroup object that contains details about a package group.

  • DescribePackageVersion: Returns a PackageVersionDescription object that contains details about a package version.

  • DescribeRepository: Returns a RepositoryDescription object that contains detailed information about the requested repository.

  • DisposePackageVersions: Disposes versions of a package. A package version with the status Disposed cannot be restored because they have been permanently removed from storage.

  • DisassociateExternalConnection: Removes an existing external connection from a repository.

  • GetAssociatedPackageGroup: Returns the most closely associated package group to the specified package.

  • GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours.

  • GetDomainPermissionsPolicy: Returns the policy of a resource that is attached to the specified domain.

  • GetPackageVersionAsset: Returns the contents of an asset that is in a package version.

  • GetPackageVersionReadme: Gets the readme file or descriptive text for a package version.

  • GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

    • generic

    • maven

    • npm

    • nuget

    • pypi

    • ruby

    • swift

  • GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository.

  • ListAllowedRepositoriesForGroup: Lists the allowed repositories for a package group that has origin configuration set to ALLOW_SPECIFIC_REPOSITORIES.

  • ListAssociatedPackages: Returns a list of packages associated with the requested package group.

  • ListDomains: Returns a list of DomainSummary objects. Each returned DomainSummary object contains information about a domain.

  • ListPackages: Lists the packages in a repository.

  • ListPackageGroups: Returns a list of package groups in the requested domain.

  • ListPackageVersionAssets: Lists the assets for a given package version.

  • ListPackageVersionDependencies: Returns a list of the direct dependencies for a package version.

  • ListPackageVersions: Returns a list of package versions for a specified package in a repository.

  • ListRepositories: Returns a list of repositories owned by the Amazon Web Services account that called this method.

  • ListRepositoriesInDomain: Returns a list of the repositories in a domain.

  • ListSubPackageGroups: Returns a list of direct children of the specified package group.

  • PublishPackageVersion: Creates a new package version containing one or more assets.

  • PutDomainPermissionsPolicy: Attaches a resource policy to a domain.

  • PutPackageOriginConfiguration: Sets the package origin configuration for a package, which determine how new versions of the package can be added to a specific repository.

  • PutRepositoryPermissionsPolicy: Sets the resource policy on a repository that specifies permissions to access it.

  • UpdatePackageGroup: Updates a package group. This API cannot be used to update a package group's origin configuration or pattern.

  • UpdatePackageGroupOriginConfiguration: Updates the package origin configuration for a package group.

  • UpdatePackageVersionsStatus: Updates the status of one or more versions of a package.

  • UpdateRepository: Updates the properties of a repository.

" + "documentation":"

CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, pip, and dotnet. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client.

CodeArtifact concepts

  • Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools such as the npm CLI or the Maven CLI ( mvn ). For a list of supported package managers, see the CodeArtifact User Guide.

  • Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in Key Management Service (KMS).

    Each repository is a member of a single domain and can't be moved to a different domain.

    The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages.

    Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization.

  • Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, Maven, NuGet, Swift, Ruby, Cargo, and generic package formats. For more information about the supported package formats and how to use CodeArtifact with them, see the CodeArtifact User Guide.

    In CodeArtifact, a package consists of:

    • A name (for example, webpack is the name of a popular npm package)

    • An optional namespace (for example, @types in @types/node)

    • A set of versions (for example, 1.0.0, 1.0.1, 1.0.2, etc.)

    • Package-level metadata (for example, npm tags)

  • Package group: A group of packages that match a specified definition. Package groups can be used to apply configuration to multiple packages that match a defined pattern using package format, package namespace, and package name. You can use package groups to more conveniently configure package origin controls for multiple packages. Package origin controls are used to block or allow ingestion or publishing of new package versions, which protects users from malicious actions known as dependency substitution attacks.

  • Package version: A version of a package, such as @types/node 12.6.9. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets.

  • Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories.

  • Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz file or Maven POM and JAR files.

CodeArtifact supported API operations

  • AssociateExternalConnection: Adds an existing external connection to a repository.

  • CopyPackageVersions: Copies package versions from one repository to another repository in the same domain.

  • CreateDomain: Creates a domain.

  • CreatePackageGroup: Creates a package group.

  • CreateRepository: Creates a CodeArtifact repository in a domain.

  • DeleteDomain: Deletes a domain. You cannot delete a domain that contains repositories.

  • DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain.

  • DeletePackage: Deletes a package and all associated package versions.

  • DeletePackageGroup: Deletes a package group. Does not delete packages or package versions that are associated with a package group.

  • DeletePackageVersions: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage.

  • DeleteRepository: Deletes a repository.

  • DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository.

  • DescribeDomain: Returns a DomainDescription object that contains information about the requested domain.

  • DescribePackage: Returns a PackageDescription object that contains details about a package.

  • DescribePackageGroup: Returns a PackageGroup object that contains details about a package group.

  • DescribePackageVersion: Returns a PackageVersionDescription object that contains details about a package version.

  • DescribeRepository: Returns a RepositoryDescription object that contains detailed information about the requested repository.

  • DisposePackageVersions: Disposes versions of a package. A package version with the status Disposed cannot be restored because they have been permanently removed from storage.

  • DisassociateExternalConnection: Removes an existing external connection from a repository.

  • GetAssociatedPackageGroup: Returns the most closely associated package group to the specified package.

  • GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours.

  • GetDomainPermissionsPolicy: Returns the policy of a resource that is attached to the specified domain.

  • GetPackageVersionAsset: Returns the contents of an asset that is in a package version.

  • GetPackageVersionReadme: Gets the readme file or descriptive text for a package version.

  • GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

    • cargo

    • generic

    • maven

    • npm

    • nuget

    • pypi

    • ruby

    • swift

  • GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository.

  • ListAllowedRepositoriesForGroup: Lists the allowed repositories for a package group that has origin configuration set to ALLOW_SPECIFIC_REPOSITORIES.

  • ListAssociatedPackages: Returns a list of packages associated with the requested package group.

  • ListDomains: Returns a list of DomainSummary objects. Each returned DomainSummary object contains information about a domain.

  • ListPackages: Lists the packages in a repository.

  • ListPackageGroups: Returns a list of package groups in the requested domain.

  • ListPackageVersionAssets: Lists the assets for a given package version.

  • ListPackageVersionDependencies: Returns a list of the direct dependencies for a package version.

  • ListPackageVersions: Returns a list of package versions for a specified package in a repository.

  • ListRepositories: Returns a list of repositories owned by the Amazon Web Services account that called this method.

  • ListRepositoriesInDomain: Returns a list of the repositories in a domain.

  • ListSubPackageGroups: Returns a list of direct children of the specified package group.

  • PublishPackageVersion: Creates a new package version containing one or more assets.

  • PutDomainPermissionsPolicy: Attaches a resource policy to a domain.

  • PutPackageOriginConfiguration: Sets the package origin configuration for a package, which determine how new versions of the package can be added to a specific repository.

  • PutRepositoryPermissionsPolicy: Sets the resource policy on a repository that specifies permissions to access it.

  • UpdatePackageGroup: Updates a package group. This API cannot be used to update a package group's origin configuration or pattern.

  • UpdatePackageGroupOriginConfiguration: Updates the package origin configuration for a package group.

  • UpdatePackageVersionsStatus: Updates the status of one or more versions of a package.

  • UpdateRepository: Updates the properties of a repository.

" } diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index 18c89b27d5..4a82970858 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"codebuild", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS CodeBuild", "serviceId":"CodeBuild", "signatureVersion":"v4", "targetPrefix":"CodeBuild_20161006", - "uid":"codebuild-2016-10-06" + "uid":"codebuild-2016-10-06", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchDeleteBuilds":{ @@ -339,7 +341,7 @@ {"shape":"AccountLimitExceededException"}, {"shape":"ResourceAlreadyExistsException"} ], - "documentation":"

Imports the source repository credentials for an CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.

" + "documentation":"

Imports the source repository credentials for an CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket repository.

" }, "InvalidateProjectCache":{ "name":"InvalidateProjectCache", @@ -733,7 +735,8 @@ "OAUTH", "BASIC_AUTH", "PERSONAL_ACCESS_TOKEN", - "CODECONNECTIONS" + "CODECONNECTIONS", + "SECRETS_MANAGER" ] }, "BatchDeleteBuildsInput":{ @@ -1013,7 +1016,7 @@ }, "timeoutInMinutes":{ "shape":"WrapperInt", - "documentation":"

How long, in minutes, for CodeBuild to wait before timing out this build if it does not get marked as completed.

" + "documentation":"

How long, in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before timing out this build if it does not get marked as completed.

" }, "queuedTimeoutInMinutes":{ "shape":"WrapperInt", @@ -1423,6 +1426,11 @@ }, "documentation":"

Contains summary information about a batch build group.

" }, + "BuildTimeOut":{ + "type":"integer", + "max":2160, + "min":5 + }, "Builds":{ "type":"list", "member":{"shape":"Build"} @@ -1596,7 +1604,12 @@ }, "overflowBehavior":{ "shape":"FleetOverflowBehavior", - "documentation":"

The compute fleet overflow behavior.

  • For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available.

  • For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand.

" + "documentation":"

The compute fleet overflow behavior.

" + }, + "vpcConfig":{"shape":"VpcConfig"}, + "fleetServiceRole":{ + "shape":"NonEmptyString", + "documentation":"

The service role associated with the compute fleet. For more information, see Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide.

" }, "tags":{ "shape":"TagList", @@ -1641,7 +1654,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For GitLab: the commit ID, branch, or Git tag to use.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", @@ -1668,8 +1681,8 @@ "documentation":"

The ARN of the IAM role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.

" }, "timeoutInMinutes":{ - "shape":"TimeOut", - "documentation":"

How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes.

" + "shape":"BuildTimeOut", + "documentation":"

How long, in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes.

" }, "queuedTimeoutInMinutes":{ "shape":"TimeOut", @@ -1685,7 +1698,7 @@ }, "vpcConfig":{ "shape":"VpcConfig", - "documentation":"

VpcConfig enables CodeBuild to access resources in an Amazon VPC.

" + "documentation":"

VpcConfig enables CodeBuild to access resources in an Amazon VPC.

If you're using compute fleets during project creation, do not provide vpcConfig.

" }, "badgeEnabled":{ "shape":"WrapperBoolean", @@ -1772,6 +1785,14 @@ "buildType":{ "shape":"WebhookBuildType", "documentation":"

Specifies the type of build this webhook will trigger.

" + }, + "manualCreation":{ + "shape":"WrapperBoolean", + "documentation":"

If manualCreation is true, CodeBuild doesn't create a webhook in GitHub and instead returns payloadUrl and secret values for the webhook. The payloadUrl and secret values in the output can be used to manually create a webhook within GitHub.

manualCreation is only available for GitHub webhooks.

" + }, + "scopeConfiguration":{ + "shape":"ScopeConfiguration", + "documentation":"

The scope configuration for global or organization webhooks.

Global or organization webhooks are only available for GitHub and Github Enterprise webhooks.

" } } }, @@ -2203,7 +2224,12 @@ }, "overflowBehavior":{ "shape":"FleetOverflowBehavior", - "documentation":"

The compute fleet overflow behavior.

  • For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available.

  • For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand.

" + "documentation":"

The compute fleet overflow behavior.

" + }, + "vpcConfig":{"shape":"VpcConfig"}, + "fleetServiceRole":{ + "shape":"NonEmptyString", + "documentation":"

The service role associated with the compute fleet. For more information, see Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide.

" }, "tags":{ "shape":"TagList", @@ -2226,7 +2252,8 @@ "type":"string", "enum":[ "CREATE_FAILED", - "UPDATE_FAILED" + "UPDATE_FAILED", + "ACTION_REQUIRED" ] }, "FleetName":{ @@ -2398,7 +2425,7 @@ }, "token":{ "shape":"SensitiveNonEmptyString", - "documentation":"

For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password.

" + "documentation":"

For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the authType CODECONNECTIONS, this is the connectionArn. For the authType SECRETS_MANAGER, this is the secretArn.

" }, "serverType":{ "shape":"ServerType", @@ -2406,7 +2433,7 @@ }, "authType":{ "shape":"AuthType", - "documentation":"

The type of authentication used to connect to a GitHub, GitHub Enterprise, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the CodeBuild console.

" + "documentation":"

The type of authentication used to connect to a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the CodeBuild console.

" }, "shouldOverwrite":{ "shape":"WrapperBoolean", @@ -3012,7 +3039,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For GitLab: the commit ID, branch, or Git tag to use.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", @@ -3039,8 +3066,8 @@ "documentation":"

The ARN of the IAM role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.

" }, "timeoutInMinutes":{ - "shape":"TimeOut", - "documentation":"

How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed. The default is 60 minutes.

" + "shape":"BuildTimeOut", + "documentation":"

How long, in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed. The default is 60 minutes.

" }, "queuedTimeoutInMinutes":{ "shape":"TimeOut", @@ -3315,9 +3342,9 @@ }, "ProjectName":{ "type":"string", - "max":255, + "max":150, "min":2, - "pattern":"[A-Za-z0-9][A-Za-z0-9\\-_]{1,254}" + "pattern":"[A-Za-z0-9][A-Za-z0-9\\-_]{1,149}" }, "ProjectNames":{ "type":"list", @@ -3365,7 +3392,7 @@ }, "auth":{ "shape":"SourceAuth", - "documentation":"

Information about the authorization settings for CodeBuild to access the source code to be built.

This information is for the CodeBuild console's use only. Your code should not get or set this information directly.

" + "documentation":"

Information about the authorization settings for CodeBuild to access the source code to be built.

" }, "reportBuildStatus":{ "shape":"WrapperBoolean", @@ -3399,7 +3426,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

The source version for the corresponding source identifier. If specified, must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub or GitLab: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

The source version for the corresponding source identifier. If specified, must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For GitLab: the commit ID, branch, or Git tag to use.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" } }, "documentation":"

A source identifier and its corresponding version.

" @@ -3894,6 +3921,28 @@ }, "documentation":"

The scaling configuration output of a compute fleet.

" }, + "ScopeConfiguration":{ + "type":"structure", + "required":[ + "name", + "scope" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of either the enterprise or organization that will send webhook events to CodeBuild, depending on if the webhook is a global or organization webhook respectively.

" + }, + "domain":{ + "shape":"String", + "documentation":"

The domain of the GitHub Enterprise organization. Note that this parameter is only required if your project's source type is GITHUB_ENTERPRISE

" + }, + "scope":{ + "shape":"WebhookScopeType", + "documentation":"

The type of scope for a GitHub webhook.

" + } + }, + "documentation":"

Contains configuration information about the scope for a webhook.

" + }, "SecurityGroupIds":{ "type":"list", "member":{"shape":"NonEmptyString"}, @@ -3938,20 +3987,21 @@ "members":{ "type":{ "shape":"SourceAuthType", - "documentation":"

The authorization type to use. Valid options are OAUTH or CODECONNECTIONS.

" + "documentation":"

The authorization type to use. Valid options are OAUTH, CODECONNECTIONS, or SECRETS_MANAGER.

" }, "resource":{ "shape":"String", "documentation":"

The resource value that applies to the specified authorization type.

" } }, - "documentation":"

Information about the authorization settings for CodeBuild to access the source code to be built.

This information is for the CodeBuild console's use only. Your code should not get or set this information directly.

" + "documentation":"

Information about the authorization settings for CodeBuild to access the source code to be built.

" }, "SourceAuthType":{ "type":"string", "enum":[ "OAUTH", - "CODECONNECTIONS" + "CODECONNECTIONS", + "SECRETS_MANAGER" ] }, "SourceCredentialsInfo":{ @@ -3967,11 +4017,11 @@ }, "authType":{ "shape":"AuthType", - "documentation":"

The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.

" + "documentation":"

The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, CODECONNECTIONS, or SECRETS_MANAGER.

" }, "resource":{ "shape":"String", - "documentation":"

The connection ARN if your serverType type is GITLAB or GITLAB_SELF_MANAGED and your authType is CODECONNECTIONS.

" + "documentation":"

The connection ARN if your authType is CODECONNECTIONS or SECRETS_MANAGER.

" } }, "documentation":"

Information about the credentials for a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket repository.

" @@ -4087,7 +4137,7 @@ "documentation":"

Enable this flag to override privileged mode in the batch build project.

" }, "buildTimeoutInMinutesOverride":{ - "shape":"TimeOut", + "shape":"BuildTimeOut", "documentation":"

Overrides the build timeout specified in the batch build project.

" }, "queuedTimeoutInMinutesOverride":{ @@ -4151,7 +4201,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

GitLab

The commit ID, branch, or Git tag to use.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" }, "artifactsOverride":{ "shape":"ProjectArtifacts", @@ -4175,7 +4225,7 @@ }, "sourceAuthOverride":{ "shape":"SourceAuth", - "documentation":"

An authorization type for this build that overrides the one defined in the build project. This override applies only if the build project's source is BitBucket or GitHub.

" + "documentation":"

An authorization type for this build that overrides the one defined in the build project. This override applies only if the build project's source is BitBucket, GitHub, GitLab, or GitLab Self Managed.

" }, "gitCloneDepthOverride":{ "shape":"GitCloneDepth", @@ -4230,8 +4280,8 @@ "documentation":"

Enable this flag to override privileged mode in the build project.

" }, "timeoutInMinutesOverride":{ - "shape":"TimeOut", - "documentation":"

The number of build timeout minutes, from 5 to 480 (8 hours), that overrides, for this build only, the latest setting already defined in the build project.

" + "shape":"BuildTimeOut", + "documentation":"

The number of build timeout minutes, from 5 to 2160 (36 hours), that overrides, for this build only, the latest setting already defined in the build project.

" }, "queuedTimeoutInMinutesOverride":{ "shape":"TimeOut", @@ -4477,7 +4527,12 @@ }, "overflowBehavior":{ "shape":"FleetOverflowBehavior", - "documentation":"

The compute fleet overflow behavior.

  • For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available.

  • For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand.

" + "documentation":"

The compute fleet overflow behavior.

" + }, + "vpcConfig":{"shape":"VpcConfig"}, + "fleetServiceRole":{ + "shape":"NonEmptyString", + "documentation":"

The service role associated with the compute fleet. For more information, see Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide.

" }, "tags":{ "shape":"TagList", @@ -4516,7 +4571,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For GitLab: the commit ID, branch, or Git tag to use.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", @@ -4543,8 +4598,8 @@ "documentation":"

The replacement ARN of the IAM role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.

" }, "timeoutInMinutes":{ - "shape":"TimeOut", - "documentation":"

The replacement value in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed.

" + "shape":"BuildTimeOut", + "documentation":"

The replacement value in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed.

" }, "queuedTimeoutInMinutes":{ "shape":"TimeOut", @@ -4735,9 +4790,17 @@ "shape":"WebhookBuildType", "documentation":"

Specifies the type of build this webhook will trigger.

" }, + "manualCreation":{ + "shape":"WrapperBoolean", + "documentation":"

If manualCreation is true, CodeBuild doesn't create a webhook in GitHub and instead returns payloadUrl and secret values for the webhook. The payloadUrl and secret values in the output can be used to manually create a webhook within GitHub.

manualCreation is only available for GitHub webhooks.

" + }, "lastModifiedSecret":{ "shape":"Timestamp", "documentation":"

A timestamp that indicates the last time a repository's secret token was modified.

" + }, + "scopeConfiguration":{ + "shape":"ScopeConfiguration", + "documentation":"

The scope configuration for global or organization webhooks.

Global or organization webhooks are only available for GitHub and Github Enterprise webhooks.

" } }, "documentation":"

Information about a webhook that connects repository events to a build project in CodeBuild.

" @@ -4758,7 +4821,7 @@ "members":{ "type":{ "shape":"WebhookFilterType", - "documentation":"

The type of webhook filter. There are nine webhook filter types: EVENT, ACTOR_ACCOUNT_ID, HEAD_REF, BASE_REF, FILE_PATH, COMMIT_MESSAGE, TAG_NAME, RELEASE_NAME, and WORKFLOW_NAME.

  • EVENT

    • A webhook event triggers a build when the provided pattern matches one of nine event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, PULL_REQUEST_CLOSED, PULL_REQUEST_REOPENED, PULL_REQUEST_MERGED, RELEASED, PRERELEASED, and WORKFLOW_JOB_QUEUED. The EVENT patterns are specified as a comma-separated string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull request created, and pull request updated events.

      The PULL_REQUEST_REOPENED works with GitHub and GitHub Enterprise only. The RELEASED, PRERELEASED, and WORKFLOW_JOB_QUEUED work with GitHub only.

  • ACTOR_ACCOUNT_ID

    • A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression pattern.

  • HEAD_REF

    • A webhook event triggers a build when the head reference matches the regular expression pattern. For example, refs/heads/branch-name and refs/tags/tag-name.

      Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.

  • BASE_REF

    • A webhook event triggers a build when the base reference matches the regular expression pattern. For example, refs/heads/branch-name.

      Works with pull request events only.

  • FILE_PATH

    • A webhook triggers a build when the path of a changed file matches the regular expression pattern.

      Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.

  • COMMIT_MESSAGE

    • A webhook triggers a build when the head commit message matches the regular expression pattern.

      Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.

  • TAG_NAME

    • A webhook triggers a build when the tag name of the release matches the regular expression pattern.

      Works with RELEASED and PRERELEASED events only.

  • RELEASE_NAME

    • A webhook triggers a build when the release name matches the regular expression pattern.

      Works with RELEASED and PRERELEASED events only.

  • WORKFLOW_NAME

    • A webhook triggers a build when the workflow name matches the regular expression pattern.

      Works with WORKFLOW_JOB_QUEUED events only.

" + "documentation":"

The type of webhook filter. There are nine webhook filter types: EVENT, ACTOR_ACCOUNT_ID, HEAD_REF, BASE_REF, FILE_PATH, COMMIT_MESSAGE, TAG_NAME, RELEASE_NAME, and WORKFLOW_NAME.

  • EVENT

    • A webhook event triggers a build when the provided pattern matches one of nine event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, PULL_REQUEST_CLOSED, PULL_REQUEST_REOPENED, PULL_REQUEST_MERGED, RELEASED, PRERELEASED, and WORKFLOW_JOB_QUEUED. The EVENT patterns are specified as a comma-separated string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull request created, and pull request updated events.

      Types PULL_REQUEST_REOPENED and WORKFLOW_JOB_QUEUED work with GitHub and GitHub Enterprise only. Types RELEASED and PRERELEASED work with GitHub only.

  • ACTOR_ACCOUNT_ID

    • A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression pattern.

  • HEAD_REF

    • A webhook event triggers a build when the head reference matches the regular expression pattern. For example, refs/heads/branch-name and refs/tags/tag-name.

      Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.

  • BASE_REF

    • A webhook event triggers a build when the base reference matches the regular expression pattern. For example, refs/heads/branch-name.

      Works with pull request events only.

  • FILE_PATH

    • A webhook triggers a build when the path of a changed file matches the regular expression pattern.

      Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.

  • COMMIT_MESSAGE

    • A webhook triggers a build when the head commit message matches the regular expression pattern.

      Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.

  • TAG_NAME

    • A webhook triggers a build when the tag name of the release matches the regular expression pattern.

      Works with RELEASED and PRERELEASED events only.

  • RELEASE_NAME

    • A webhook triggers a build when the release name matches the regular expression pattern.

      Works with RELEASED and PRERELEASED events only.

  • REPOSITORY_NAME

    • A webhook triggers a build when the repository name matches the regular expression pattern.

      Works with GitHub global or organization webhooks only.

  • WORKFLOW_NAME

    • A webhook triggers a build when the workflow name matches the regular expression pattern.

      Works with WORKFLOW_JOB_QUEUED events only.

" }, "pattern":{ "shape":"String", @@ -4785,6 +4848,13 @@ "RELEASE_NAME" ] }, + "WebhookScopeType":{ + "type":"string", + "enum":[ + "GITHUB_ORGANIZATION", + "GITHUB_GLOBAL" + ] + }, "WrapperBoolean":{"type":"boolean"}, "WrapperDouble":{"type":"double"}, "WrapperInt":{"type":"integer"}, diff --git a/botocore/data/codecommit/2015-04-13/service-2.json b/botocore/data/codecommit/2015-04-13/service-2.json index 58ffe2545d..568f9f6bf5 100644 --- a/botocore/data/codecommit/2015-04-13/service-2.json +++ b/botocore/data/codecommit/2015-04-13/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"codecommit", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"CodeCommit", "serviceFullName":"AWS CodeCommit", "serviceId":"CodeCommit", "signatureVersion":"v4", "targetPrefix":"CodeCommit_20150413", - "uid":"codecommit-2015-04-13" + "uid":"codecommit-2015-04-13", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateApprovalRuleTemplateWithRepository":{ @@ -327,6 +329,7 @@ {"shape":"InvalidRepositoryNameException"}, {"shape":"InvalidRepositoryDescriptionException"}, {"shape":"RepositoryLimitExceededException"}, + {"shape":"OperationNotAllowedException"}, {"shape":"EncryptionIntegrityChecksFailedException"}, {"shape":"EncryptionKeyAccessDeniedException"}, {"shape":"EncryptionKeyDisabledException"}, @@ -3409,7 +3412,7 @@ }, "kmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to programmatically retrieve a key ID. For more information about acceptable values for kmsKeyID, see KeyId in the Decrypt API description in the Key Management Service API Reference.

If no key is specified, the default aws/codecommit Amazon Web Services managed key is used.

" + "documentation":"

The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to programmatically retrieve a key ID. For more information about acceptable values for kmsKeyID, see KeyId in the Decrypt API description in the Key Management Service API Reference.

If no key is specified, the default aws/codecommit Amazon Web Services managed key is used.

" } }, "documentation":"

Represents the input of a create repository operation.

" @@ -6249,6 +6252,13 @@ }, "documentation":"

Information about the type of an object in a merge operation.

" }, + "OperationNotAllowedException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The requested action is not allowed.

", + "exception":true + }, "OrderEnum":{ "type":"string", "enum":[ @@ -7983,7 +7993,7 @@ }, "kmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to programmatically retrieve a key ID. For more information about acceptable values for keyID, see KeyId in the Decrypt API description in the Key Management Service API Reference.

" + "documentation":"

The ID of the encryption key. You can view the ID of an encryption key in the KMS console, or use the KMS APIs to programmatically retrieve a key ID. For more information about acceptable values for keyID, see KeyId in the Decrypt API description in the Key Management Service API Reference.

" } } }, diff --git a/botocore/data/codedeploy/2014-10-06/service-2.json b/botocore/data/codedeploy/2014-10-06/service-2.json index 325ef83f09..becea699a1 100644 --- a/botocore/data/codedeploy/2014-10-06/service-2.json +++ b/botocore/data/codedeploy/2014-10-06/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"codedeploy", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"CodeDeploy", "serviceFullName":"AWS CodeDeploy", "serviceId":"CodeDeploy", "signatureVersion":"v4", "targetPrefix":"CodeDeploy_20141006", - "uid":"codedeploy-2014-10-06" + "uid":"codedeploy-2014-10-06", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddTagsToOnPremisesInstances":{ diff --git a/botocore/data/codeguru-security/2018-05-10/endpoint-rule-set-1.json b/botocore/data/codeguru-security/2018-05-10/endpoint-rule-set-1.json index 9c6f96dac9..bea431e421 100644 --- a/botocore/data/codeguru-security/2018-05-10/endpoint-rule-set-1.json +++ b/botocore/data/codeguru-security/2018-05-10/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -59,7 +58,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -87,13 +85,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -106,7 +105,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -120,7 +118,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -143,7 +140,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -178,11 +174,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -193,16 +187,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -216,14 +213,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -232,15 +227,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -251,16 +245,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -274,7 +271,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -294,11 +290,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -309,20 +303,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -333,18 +329,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] } \ No newline at end of file diff --git a/botocore/data/codeguru-security/2018-05-10/service-2.json b/botocore/data/codeguru-security/2018-05-10/service-2.json index 23de518763..389208a83f 100644 --- a/botocore/data/codeguru-security/2018-05-10/service-2.json +++ b/botocore/data/codeguru-security/2018-05-10/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"codeguru-security", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon CodeGuru Security", "serviceId":"CodeGuru Security", "signatureVersion":"v4", @@ -27,7 +28,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Returns a list of all requested findings.

" + "documentation":"

Returns a list of requested findings from standard scans.

" }, "CreateScan":{ "name":"CreateScan", @@ -46,7 +47,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Use to create a scan using code uploaded to an S3 bucket.

" + "documentation":"

Use to create a scan using code uploaded to an Amazon S3 bucket.

" }, "CreateUploadUrl":{ "name":"CreateUploadUrl", @@ -63,7 +64,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Generates a pre-signed URL and request headers used to upload a code resource.

You can upload your code resource to the URL and add the request headers using any HTTP client.

" + "documentation":"

Generates a pre-signed URL, request headers used to upload a code resource, and code artifact identifier for the uploaded resource.

You can upload your code resource to the URL with the request headers using any HTTP client.

" }, "GetAccountConfiguration":{ "name":"GetAccountConfiguration", @@ -80,7 +81,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Use to get account level configuration.

" + "documentation":"

Use to get the encryption configuration for an account.

" }, "GetFindings":{ "name":"GetFindings", @@ -116,7 +117,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Returns top level metrics about an account from a specified date, including number of open findings, the categories with most findings, the scans with most open findings, and scans with most open critical findings.

" + "documentation":"

Returns a summary of metrics for an account from a specified date, including number of open findings, the categories with most findings, the scans with most open findings, and scans with most open critical findings.

" }, "GetScan":{ "name":"GetScan", @@ -131,6 +132,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], "documentation":"

Returns details about a scan, including whether or not a scan has completed.

" @@ -167,7 +169,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Returns a list of all the standard scans in an account. Does not return express scans.

" + "documentation":"

Returns a list of all scans in an account. Does not return EXPRESS scans.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -243,7 +245,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Use to update account-level configuration with an encryption key.

" + "documentation":"

Use to update the encryption configuration for an account.

" } }, "shapes":{ @@ -283,26 +285,26 @@ "members":{ "closedFindings":{ "shape":"FindingMetricsValuePerSeverity", - "documentation":"

The number of closed findings of each severity in an account on the specified date.

" + "documentation":"

The number of closed findings of each severity on the specified date.

" }, "date":{ "shape":"Timestamp", - "documentation":"

The date from which the finding metrics were retrieved.

" + "documentation":"

The date from which the findings metrics were retrieved.

" }, "meanTimeToClose":{ "shape":"FindingMetricsValuePerSeverity", - "documentation":"

The average time it takes to close findings of each severity in days.

" + "documentation":"

The average time in days it takes to close findings of each severity as of a specified date.

" }, "newFindings":{ "shape":"FindingMetricsValuePerSeverity", - "documentation":"

The number of new findings of each severity in account on the specified date.

" + "documentation":"

The number of new findings of each severity on the specified date.

" }, "openFindings":{ "shape":"FindingMetricsValuePerSeverity", - "documentation":"

The number of open findings of each severity in an account as of the specified date.

" + "documentation":"

The number of open findings of each severity as of the specified date.

" } }, - "documentation":"

A summary of findings metrics in an account.

" + "documentation":"

A summary of findings metrics for an account on a specified date.

" }, "AnalysisType":{ "type":"string", @@ -366,7 +368,7 @@ }, "findings":{ "shape":"Findings", - "documentation":"

A list of all requested findings.

" + "documentation":"

A list of all findings which were successfully fetched.

" } } }, @@ -465,11 +467,11 @@ }, "resourceId":{ "shape":"ResourceId", - "documentation":"

The identifier for an input resource used to create a scan.

" + "documentation":"

The identifier for the resource object to be scanned.

" }, "scanName":{ "shape":"ScanName", - "documentation":"

The unique name that CodeGuru Security uses to track revisions across multiple scans of the same resource. Only allowed for a STANDARD scan type. If not specified, it will be auto generated.

" + "documentation":"

The unique name that CodeGuru Security uses to track revisions across multiple scans of the same resource. Only allowed for a STANDARD scan type.

" }, "scanType":{ "shape":"ScanType", @@ -532,7 +534,7 @@ "members":{ "codeArtifactId":{ "shape":"Uuid", - "documentation":"

The identifier for the uploaded code resource.

" + "documentation":"

The identifier for the uploaded code resource. Pass this to CreateScan to use the uploaded resources.

" }, "requestHeaders":{ "shape":"RequestHeaderMap", @@ -540,7 +542,7 @@ }, "s3Url":{ "shape":"S3Url", - "documentation":"

A pre-signed S3 URL. You can upload the code file you want to scan and add the required requestHeaders using any HTTP client.

" + "documentation":"

A pre-signed S3 URL. You can upload the code file you want to scan with the required requestHeaders using any HTTP client.

" } } }, @@ -557,10 +559,10 @@ "members":{ "kmsKeyArn":{ "shape":"KmsKeyArn", - "documentation":"

The KMS key ARN to use for encryption. This must be provided as a header when uploading your code resource.

" + "documentation":"

The KMS key ARN that is used for encryption. If an AWS-managed key is used for encryption, returns empty.

" } }, - "documentation":"

Information about account-level configuration.

" + "documentation":"

Information about the encryption configuration for an account. Required to call UpdateAccountConfiguration.

" }, "ErrorCode":{ "type":"string", @@ -572,6 +574,10 @@ "INVALID_SCAN_NAME" ] }, + "ErrorMessage":{ + "type":"string", + "min":1 + }, "FilePath":{ "type":"structure", "members":{ @@ -623,7 +629,7 @@ }, "generatorId":{ "shape":"String", - "documentation":"

The identifier for the component that generated a finding such as AWSCodeGuruSecurity or AWSInspector.

" + "documentation":"

The identifier for the component that generated a finding such as AmazonCodeGuruSecurity.

" }, "id":{ "shape":"String", @@ -643,7 +649,7 @@ }, "severity":{ "shape":"Severity", - "documentation":"

The severity of the finding.

" + "documentation":"

The severity of the finding. Severity can be critical, high, medium, low, or informational. For information on severity levels, see Finding severity in the Amazon CodeGuru Security User Guide.

" }, "status":{ "shape":"Status", @@ -697,26 +703,26 @@ "members":{ "critical":{ "shape":"Double", - "documentation":"

The severity of the finding is critical and should be addressed immediately.

" + "documentation":"

A numeric value corresponding to a critical finding.

" }, "high":{ "shape":"Double", - "documentation":"

The severity of the finding is high and should be addressed as a near-term priority.

" + "documentation":"

A numeric value corresponding to a high severity finding.

" }, "info":{ "shape":"Double", - "documentation":"

The finding is related to quality or readability improvements and not considered actionable.

" + "documentation":"

A numeric value corresponding to an informational finding.

" }, "low":{ "shape":"Double", - "documentation":"

The severity of the finding is low and does require action on its own.

" + "documentation":"

A numeric value corresponding to a low severity finding.

" }, "medium":{ "shape":"Double", - "documentation":"

The severity of the finding is medium and should be addressed as a mid-term priority.

" + "documentation":"

A numeric value corresponding to a medium severity finding.

" } }, - "documentation":"

The severity of the issue in the code that generated a finding.

" + "documentation":"

A numeric value corresponding to the severity of a finding, such as the number of open findings or the average time it takes to close findings of a given severity.

" }, "Findings":{ "type":"list", @@ -737,7 +743,7 @@ "members":{ "encryptionConfig":{ "shape":"EncryptionConfig", - "documentation":"

An EncryptionConfig object that contains the KMS key ARN to use for encryption. By default, CodeGuru Security uses an AWS-managed key for encryption. To specify your own key, call UpdateAccountConfiguration.

" + "documentation":"

An EncryptionConfig object that contains the KMS key ARN that is used for encryption. By default, CodeGuru Security uses an AWS-managed key for encryption. To specify your own key, call UpdateAccountConfiguration. If you do not specify a customer-managed key, returns empty.

" } } }, @@ -747,7 +753,7 @@ "members":{ "maxResults":{ "shape":"GetFindingsRequestMaxResultsInteger", - "documentation":"

The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results.

", + "documentation":"

The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results. If not specified, returns 1000 results.

", "location":"querystring", "locationName":"maxResults" }, @@ -774,7 +780,7 @@ "GetFindingsRequestMaxResultsInteger":{ "type":"integer", "box":true, - "max":100, + "max":1000, "min":1 }, "GetFindingsResponse":{ @@ -796,7 +802,7 @@ "members":{ "date":{ "shape":"Timestamp", - "documentation":"

The date you want to retrieve summary metrics from, rounded to the nearest day. The date must be within the past two years since metrics data is only stored for two years. If a date outside of this range is passed, the response will be empty.

", + "documentation":"

The date you want to retrieve summary metrics from, rounded to the nearest day. The date must be within the past two years.

", "location":"querystring", "locationName":"date" } @@ -847,6 +853,10 @@ "shape":"Timestamp", "documentation":"

The time the scan was created.

" }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

Details about the error that causes a scan to fail to be retrieved.

" + }, "numberOfRevisions":{ "shape":"Long", "documentation":"

The number of times a scan has been re-run on a revised resource.

" @@ -865,7 +875,7 @@ }, "scanState":{ "shape":"ScanState", - "documentation":"

The current state of the scan. Pass either InProgress, Successful, or Failed.

" + "documentation":"

The current state of the scan. Returns either InProgress, Successful, or Failed.

" }, "updatedAt":{ "shape":"Timestamp", @@ -918,13 +928,13 @@ "members":{ "endDate":{ "shape":"Timestamp", - "documentation":"

The end date of the interval which you want to retrieve metrics from.

", + "documentation":"

The end date of the interval which you want to retrieve metrics from. Round to the nearest day.

", "location":"querystring", "locationName":"endDate" }, "maxResults":{ "shape":"ListFindingsMetricsRequestMaxResultsInteger", - "documentation":"

The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results.

", + "documentation":"

The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results. If not specified, returns 1000 results.

", "location":"querystring", "locationName":"maxResults" }, @@ -936,7 +946,7 @@ }, "startDate":{ "shape":"Timestamp", - "documentation":"

The start date of the interval which you want to retrieve metrics from.

", + "documentation":"

The start date of the interval which you want to retrieve metrics from. Rounds to the nearest day.

", "location":"querystring", "locationName":"startDate" } @@ -966,7 +976,7 @@ "members":{ "maxResults":{ "shape":"ListScansRequestMaxResultsInteger", - "documentation":"

The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results.

", + "documentation":"

The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results. If not specified, returns 100 results.

", "location":"querystring", "locationName":"maxResults" }, @@ -1003,7 +1013,7 @@ "members":{ "resourceArn":{ "shape":"ScanNameArn", - "documentation":"

The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan.

", + "documentation":"

The ARN of the ScanName object. You can retrieve this ARN by calling CreateScan, ListScans, or GetScan.

", "location":"uri", "locationName":"resourceArn" } @@ -1027,7 +1037,7 @@ "members":{ "categoriesWithMostFindings":{ "shape":"CategoriesWithMostFindings", - "documentation":"

A list of CategoryWithFindingNum objects for the top 5 finding categories with the most open findings in an account.

" + "documentation":"

A list of CategoryWithFindingNum objects for the top 5 finding categories with the most findings.

" }, "date":{ "shape":"Timestamp", @@ -1035,18 +1045,18 @@ }, "openFindings":{ "shape":"FindingMetricsValuePerSeverity", - "documentation":"

The number of open findings of each severity in an account.

" + "documentation":"

The number of open findings of each severity.

" }, "scansWithMostOpenCriticalFindings":{ "shape":"ScansWithMostOpenCriticalFindings", - "documentation":"

A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open findings in an account.

" + "documentation":"

A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open critical findings.

" }, "scansWithMostOpenFindings":{ "shape":"ScansWithMostOpenFindings", - "documentation":"

A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open critical findings in an account.

" + "documentation":"

A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open findings.

" } }, - "documentation":"

Information about summary metrics in an account.

" + "documentation":"

A summary of metrics for an account as of a specified date.

" }, "NextToken":{ "type":"string", @@ -1101,24 +1111,24 @@ "members":{ "id":{ "shape":"String", - "documentation":"

The identifier for the resource.

" + "documentation":"

The scanName of the scan that was run on the resource.

" }, "subResourceId":{ "shape":"String", - "documentation":"

The identifier for a section of the resource, such as an AWS Lambda layer.

" + "documentation":"

The identifier for a section of the resource.

" } }, - "documentation":"

Information about a resource, such as an Amazon S3 bucket or AWS Lambda function, that contains a finding.

" + "documentation":"

Information about a resource that contains a finding.

" }, "ResourceId":{ "type":"structure", "members":{ "codeArtifactId":{ "shape":"Uuid", - "documentation":"

The identifier for the code file uploaded to the resource where a finding was detected.

" + "documentation":"

The identifier for the code file uploaded to the resource object. Returned by CreateUploadUrl when you upload resources to be scanned.

" } }, - "documentation":"

The identifier for a resource object that contains resources where a finding was detected.

", + "documentation":"

The identifier for a resource object that contains resources to scan. Specifying a codeArtifactId is required to create a scan.

", "union":true }, "ResourceNotFoundException":{ @@ -1176,14 +1186,14 @@ "members":{ "findingNumber":{ "shape":"Integer", - "documentation":"

The number of open findings generated by a scan.

" + "documentation":"

The number of findings generated by a scan.

" }, "scanName":{ "shape":"String", "documentation":"

The name of the scan.

" } }, - "documentation":"

Information about a scan with open findings.

" + "documentation":"

Information about the number of findings generated by a scan.

" }, "ScanState":{ "type":"string", @@ -1276,7 +1286,7 @@ "members":{ "code":{ "shape":"String", - "documentation":"

The suggested code to add to your file.

" + "documentation":"

The suggested code fix. If applicable, includes code patch to replace your source code.

" }, "description":{ "shape":"String", @@ -1316,7 +1326,7 @@ "members":{ "resourceArn":{ "shape":"ScanNameArn", - "documentation":"

The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan.

", + "documentation":"

The ARN of the ScanName object. You can retrieve this ARN by calling CreateScan, ListScans, or GetScan.

", "location":"uri", "locationName":"resourceArn" }, @@ -1378,7 +1388,7 @@ "members":{ "resourceArn":{ "shape":"ScanNameArn", - "documentation":"

The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan.

", + "documentation":"

The ARN of the ScanName object. You can retrieve this ARN by calling CreateScan, ListScans, or GetScan.

", "location":"uri", "locationName":"resourceArn" }, @@ -1401,7 +1411,7 @@ "members":{ "encryptionConfig":{ "shape":"EncryptionConfig", - "documentation":"

The KMS key ARN you want to use for encryption. Defaults to service-side encryption if missing.

" + "documentation":"

The customer-managed KMS key ARN you want to use for encryption. If not specified, CodeGuru Security will use an AWS-managed key for encryption. If you previously specified a customer-managed KMS key and want CodeGuru Security to use an AWS-managed key for encryption instead, pass nothing.

" } } }, @@ -1411,7 +1421,7 @@ "members":{ "encryptionConfig":{ "shape":"EncryptionConfig", - "documentation":"

An EncryptionConfig object that contains the KMS key ARN to use for encryption.

" + "documentation":"

An EncryptionConfig object that contains the KMS key ARN that is used for encryption. If you did not specify a customer-managed KMS key in the request, returns empty.

" } } }, @@ -1496,7 +1506,9 @@ }, "itemCount":{ "shape":"Integer", - "documentation":"

The number of times the vulnerability appears in your code.

" + "documentation":"

The number of times the vulnerability appears in your code.

", + "deprecated":true, + "deprecatedMessage":"This shape is not used." }, "referenceUrls":{ "shape":"ReferenceUrls", diff --git a/botocore/data/codepipeline/2015-07-09/paginators-1.json b/botocore/data/codepipeline/2015-07-09/paginators-1.json index dca90cb7d3..00b8348e02 100644 --- a/botocore/data/codepipeline/2015-07-09/paginators-1.json +++ b/botocore/data/codepipeline/2015-07-09/paginators-1.json @@ -34,6 +34,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "tags" + }, + "ListRuleExecutions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "ruleExecutionDetails" } } } diff --git a/botocore/data/codepipeline/2015-07-09/service-2.json b/botocore/data/codepipeline/2015-07-09/service-2.json index 4d63dc9e42..013a313a9c 100644 --- a/botocore/data/codepipeline/2015-07-09/service-2.json +++ b/botocore/data/codepipeline/2015-07-09/service-2.json @@ -11,7 +11,8 @@ "serviceId":"CodePipeline", "signatureVersion":"v4", "targetPrefix":"CodePipeline_20150709", - "uid":"codepipeline-2015-07-09" + "uid":"codepipeline-2015-07-09", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcknowledgeJob":{ @@ -297,7 +298,7 @@ {"shape":"PipelineNotFoundException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Gets a summary of the most recent executions for a pipeline.

" + "documentation":"

Gets a summary of the most recent executions for a pipeline.

When applying the filter for pipeline executions that have succeeded in the stage, the operation returns all executions in the current pipeline version beginning on February 1, 2024.

" }, "ListPipelines":{ "name":"ListPipelines", @@ -313,6 +314,36 @@ ], "documentation":"

Gets a summary of all of the pipelines associated with your account.

" }, + "ListRuleExecutions":{ + "name":"ListRuleExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRuleExecutionsInput"}, + "output":{"shape":"ListRuleExecutionsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"PipelineExecutionNotFoundException"} + ], + "documentation":"

Lists the rule executions that have occurred in a pipeline configured for conditions with rules.

" + }, + "ListRuleTypes":{ + "name":"ListRuleTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRuleTypesInput"}, + "output":{"shape":"ListRuleTypesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Lists the rules for the condition.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -343,6 +374,24 @@ ], "documentation":"

Gets a listing of all the webhooks in this Amazon Web Services Region for this account. The output lists all webhooks and includes the webhook URL and ARN and the configuration for each webhook.

" }, + "OverrideStageCondition":{ + "name":"OverrideStageCondition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"OverrideStageConditionInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"StageNotFoundException"}, + {"shape":"ConditionNotOverridableException"}, + {"shape":"NotLatestPipelineExecutionException"}, + {"shape":"ConcurrentPipelineExecutionsLimitExceededException"} + ], + "documentation":"

Used to override a stage condition.

" + }, "PollForJobs":{ "name":"PollForJobs", "http":{ @@ -383,7 +432,8 @@ {"shape":"PipelineNotFoundException"}, {"shape":"StageNotFoundException"}, {"shape":"ActionNotFoundException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ConcurrentPipelineExecutionsLimitExceededException"} ], "documentation":"

Provides information to CodePipeline about new revisions to a source.

" }, @@ -512,7 +562,8 @@ {"shape":"PipelineNotFoundException"}, {"shape":"StageNotFoundException"}, {"shape":"StageNotRetryableException"}, - {"shape":"NotLatestPipelineExecutionException"} + {"shape":"NotLatestPipelineExecutionException"}, + {"shape":"ConcurrentPipelineExecutionsLimitExceededException"} ], "documentation":"

You can retry a stage that has failed without having to run a pipeline again from the beginning. You do this by either retrying the failed actions in a stage or by retrying all actions in the stage starting from the first action in the stage. When you retry the failed actions in a stage, all actions that are still in progress continue working, and failed actions are triggered again. When you retry a failed stage from the first action in the stage, the stage cannot have any actions in progress. Before a stage can be retried, it must either have all actions failed or some actions failed and some succeeded.

" }, @@ -1686,6 +1737,17 @@ "type":"string", "enum":["S3"] }, + "BeforeEntryConditions":{ + "type":"structure", + "required":["conditions"], + "members":{ + "conditions":{ + "shape":"ConditionList", + "documentation":"

The conditions that are configured as entry conditions.

" + } + }, + "documentation":"

The conditions for making checks for entry to a stage.

" + }, "BlockerDeclaration":{ "type":"structure", "required":[ @@ -1746,6 +1808,89 @@ "documentation":"

The pipeline has reached the limit for concurrent pipeline executions.

", "exception":true }, + "Condition":{ + "type":"structure", + "members":{ + "result":{ + "shape":"Result", + "documentation":"

The action to be done when the condition is met. For example, rolling back an execution for a failure condition.

" + }, + "rules":{ + "shape":"RuleDeclarationList", + "documentation":"

The rules that make up the condition.

" + } + }, + "documentation":"

The condition for the stage. A condition is made up of the rules and the result for the condition.

" + }, + "ConditionExecution":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ConditionExecutionStatus", + "documentation":"

The status of the run for a condition.

" + }, + "summary":{ + "shape":"ExecutionSummary", + "documentation":"

The summary of information about a run for a condition.

" + }, + "lastStatusChange":{ + "shape":"Timestamp", + "documentation":"

The last status change of the condition.

" + } + }, + "documentation":"

The run of a condition.

" + }, + "ConditionExecutionStatus":{ + "type":"string", + "enum":[ + "InProgress", + "Failed", + "Errored", + "Succeeded", + "Cancelled", + "Abandoned", + "Overridden" + ] + }, + "ConditionList":{ + "type":"list", + "member":{"shape":"Condition"}, + "max":1, + "min":1 + }, + "ConditionNotOverridableException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Unable to override because the condition does not allow overrides.

", + "exception":true + }, + "ConditionState":{ + "type":"structure", + "members":{ + "latestExecution":{ + "shape":"ConditionExecution", + "documentation":"

The state of the latest run of the rule.

" + }, + "ruleStates":{ + "shape":"RuleStateList", + "documentation":"

The state of the rules for the condition.

" + } + }, + "documentation":"

Information about the state of the condition.

" + }, + "ConditionStateList":{ + "type":"list", + "member":{"shape":"ConditionState"} + }, + "ConditionType":{ + "type":"string", + "enum":[ + "BEFORE_ENTRY", + "ON_SUCCESS" + ] + }, "ConflictException":{ "type":"structure", "members":{ @@ -2135,6 +2280,10 @@ "result":{ "shape":"Result", "documentation":"

The specified result for when the failure conditions are met, such as rolling back the stage.

" + }, + "conditions":{ + "shape":"ConditionList", + "documentation":"

The conditions that are configured as failure conditions.

" } }, "documentation":"

The configuration that specifies the result, such as rollback, to occur upon stage failure.

" @@ -2812,7 +2961,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Action execution history is retained for up to 12 months, based on action execution start times. Default value is 100.

Detailed execution history is available for executions run on or after February 21, 2019.

" + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Action execution history is retained for up to 12 months, based on action execution start times. Default value is 100.

" }, "nextToken":{ "shape":"NextToken", @@ -2931,6 +3080,64 @@ }, "documentation":"

Represents the output of a ListPipelines action.

" }, + "ListRuleExecutionsInput":{ + "type":"structure", + "required":["pipelineName"], + "members":{ + "pipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline for which you want to get execution summary information.

" + }, + "filter":{ + "shape":"RuleExecutionFilter", + "documentation":"

Input information used to filter rule execution history.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Pipeline history is limited to the most recent 12 months, based on pipeline execution start times. Default value is 100.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token that was returned from the previous ListRuleExecutions call, which can be used to return the next set of rule executions in the list.

" + } + } + }, + "ListRuleExecutionsOutput":{ + "type":"structure", + "members":{ + "ruleExecutionDetails":{ + "shape":"RuleExecutionDetailList", + "documentation":"

Details about the output for listing rule executions.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token that can be used in the next ListRuleExecutions call. To view all items in the list, continue to call this operation with each subsequent token until no more nextToken values are returned.

" + } + } + }, + "ListRuleTypesInput":{ + "type":"structure", + "members":{ + "ruleOwnerFilter":{ + "shape":"RuleOwner", + "documentation":"

The rule owner to filter on.

" + }, + "regionFilter":{ + "shape":"AWSRegionName", + "documentation":"

The rule Region to filter on.

" + } + } + }, + "ListRuleTypesOutput":{ + "type":"structure", + "required":["ruleTypes"], + "members":{ + "ruleTypes":{ + "shape":"RuleTypeList", + "documentation":"

Lists the rules that are configured for the condition.

" + } + } + }, "ListTagsForResourceInput":{ "type":"structure", "required":["resourceArn"], @@ -3120,6 +3327,33 @@ "exception":true }, "OutputVariablesValue":{"type":"string"}, + "OverrideStageConditionInput":{ + "type":"structure", + "required":[ + "pipelineName", + "stageName", + "pipelineExecutionId", + "conditionType" + ], + "members":{ + "pipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline with the stage that will override the condition.

" + }, + "stageName":{ + "shape":"StageName", + "documentation":"

The name of the stage for the override.

" + }, + "pipelineExecutionId":{ + "shape":"PipelineExecutionId", + "documentation":"

The ID of the pipeline execution for the override.

" + }, + "conditionType":{ + "shape":"ConditionType", + "documentation":"

The type of condition to override for the stage, such as entry conditions, failure conditions, or success conditions.

" + } + } + }, "Percentage":{ "type":"integer", "max":100, @@ -3892,6 +4126,11 @@ "type":"list", "member":{"shape":"ResolvedPipelineVariable"} }, + "ResolvedRuleConfigurationMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, "ResourceArn":{ "type":"string", "pattern":"arn:aws(-[\\w]+)*:codepipeline:.+:[0-9]{12}:.+" @@ -3905,7 +4144,10 @@ }, "Result":{ "type":"string", - "enum":["ROLLBACK"] + "enum":[ + "ROLLBACK", + "FAIL" + ] }, "RetryStageExecutionInput":{ "type":"structure", @@ -3997,6 +4239,457 @@ } } }, + "RuleCategory":{ + "type":"string", + "enum":["Rule"] + }, + "RuleConfigurationKey":{ + "type":"string", + "max":50, + "min":1 + }, + "RuleConfigurationMap":{ + "type":"map", + "key":{"shape":"RuleConfigurationKey"}, + "value":{"shape":"RuleConfigurationValue"}, + "max":200, + "min":0 + }, + "RuleConfigurationProperty":{ + "type":"structure", + "required":[ + "name", + "required", + "key", + "secret" + ], + "members":{ + "name":{ + "shape":"RuleConfigurationKey", + "documentation":"

The name of the rule configuration property.

" + }, + "required":{ + "shape":"Boolean", + "documentation":"

Whether the configuration property is a required value.

" + }, + "key":{ + "shape":"Boolean", + "documentation":"

Whether the configuration property is a key.

" + }, + "secret":{ + "shape":"Boolean", + "documentation":"

Whether the configuration property is secret.

When updating a pipeline, passing * * * * * without changing any other values of the action preserves the previous value of the secret.

" + }, + "queryable":{ + "shape":"Boolean", + "documentation":"

Indicates whether the property can be queried.

If you create a pipeline with a condition and rule, and that rule contains a queryable property, the value for that configuration property is subject to other restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the action configuration property that is displayed to users.

" + }, + "type":{ + "shape":"RuleConfigurationPropertyType", + "documentation":"

The type of the configuration property.

" + } + }, + "documentation":"

Represents information about a rule configuration property.

" + }, + "RuleConfigurationPropertyList":{ + "type":"list", + "member":{"shape":"RuleConfigurationProperty"}, + "max":10 + }, + "RuleConfigurationPropertyType":{ + "type":"string", + "enum":[ + "String", + "Number", + "Boolean" + ] + }, + "RuleConfigurationValue":{ + "type":"string", + "max":10000, + "min":1 + }, + "RuleDeclaration":{ + "type":"structure", + "required":[ + "name", + "ruleTypeId" + ], + "members":{ + "name":{ + "shape":"RuleName", + "documentation":"

The name of the rule that is created for the condition, such as CheckAllResults.

" + }, + "ruleTypeId":{ + "shape":"RuleTypeId", + "documentation":"

The ID for the rule type, which is made up of the combined values for category, owner, provider, and version.

" + }, + "configuration":{ + "shape":"RuleConfigurationMap", + "documentation":"

The action configuration fields for the rule.

" + }, + "inputArtifacts":{ + "shape":"InputArtifactList", + "documentation":"

The input artifacts fields for the rule, such as specifying an input file for the rule.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The pipeline role ARN associated with the rule.

" + }, + "region":{ + "shape":"AWSRegionName", + "documentation":"

The Region for the condition associated with the rule.

" + }, + "timeoutInMinutes":{ + "shape":"RuleTimeout", + "documentation":"

The action timeout for the rule.

" + } + }, + "documentation":"

Represents information about the rule to be created for an associated condition. An example would be creating a new rule for an entry condition, such as a rule that checks for a test result before allowing the run to enter the deployment stage.

" + }, + "RuleDeclarationList":{ + "type":"list", + "member":{"shape":"RuleDeclaration"}, + "max":5, + "min":1 + }, + "RuleExecution":{ + "type":"structure", + "members":{ + "ruleExecutionId":{ + "shape":"RuleExecutionId", + "documentation":"

The execution ID for the run of the rule.

" + }, + "status":{ + "shape":"RuleExecutionStatus", + "documentation":"

The status of the run of the rule, such as FAILED.

" + }, + "summary":{ + "shape":"ExecutionSummary", + "documentation":"

A summary of the run of the rule.

" + }, + "lastStatusChange":{ + "shape":"Timestamp", + "documentation":"

The last status change of the rule.

" + }, + "token":{ + "shape":"RuleExecutionToken", + "documentation":"

The system-generated token used to identify a unique request.

" + }, + "lastUpdatedBy":{ + "shape":"LastUpdatedBy", + "documentation":"

The ARN of the user who last changed the rule.

" + }, + "externalExecutionId":{ + "shape":"ExecutionId", + "documentation":"

The external ID of the run of the rule.

" + }, + "externalExecutionUrl":{ + "shape":"Url", + "documentation":"

The URL of a resource external to Amazon Web Services that is used when running the rule (for example, an external repository URL).

" + }, + "errorDetails":{"shape":"ErrorDetails"} + }, + "documentation":"

Represents information about each time a rule is run as part of the pipeline execution for a pipeline configured with conditions.

" + }, + "RuleExecutionDetail":{ + "type":"structure", + "members":{ + "pipelineExecutionId":{ + "shape":"PipelineExecutionId", + "documentation":"

The ID of the pipeline execution in the stage where the rule was run. Use the GetPipelineState action to retrieve the current pipelineExecutionId of the stage.

" + }, + "ruleExecutionId":{ + "shape":"RuleExecutionId", + "documentation":"

The ID of the run for the rule.

" + }, + "pipelineVersion":{ + "shape":"PipelineVersion", + "documentation":"

The version number of the pipeline with the stage where the rule was run.

" + }, + "stageName":{ + "shape":"StageName", + "documentation":"

The name of the stage where the rule was run.

" + }, + "ruleName":{ + "shape":"RuleName", + "documentation":"

The name of the rule that was run in the stage.

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the rule execution.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The date and time of the last change to the rule execution, in timestamp format.

" + }, + "updatedBy":{ + "shape":"LastUpdatedBy", + "documentation":"

The ARN of the user who changed the rule execution details.

" + }, + "status":{ + "shape":"RuleExecutionStatus", + "documentation":"

The status of the rule execution. Status categories are InProgress, Succeeded, and Failed.

" + }, + "input":{ + "shape":"RuleExecutionInput", + "documentation":"

Input details for the rule execution, such as role ARN, Region, and input artifacts.

" + }, + "output":{ + "shape":"RuleExecutionOutput", + "documentation":"

Output details for the rule execution, such as the rule execution result.

" + } + }, + "documentation":"

The details of the runs for a rule and the results produced on an artifact as it passes through stages in the pipeline.

" + }, + "RuleExecutionDetailList":{ + "type":"list", + "member":{"shape":"RuleExecutionDetail"} + }, + "RuleExecutionFilter":{ + "type":"structure", + "members":{ + "pipelineExecutionId":{ + "shape":"PipelineExecutionId", + "documentation":"

The pipeline execution ID used to filter rule execution history.

" + }, + "latestInPipelineExecution":{"shape":"LatestInPipelineExecutionFilter"} + }, + "documentation":"

Filter values for the rule execution.

" + }, + "RuleExecutionId":{ + "type":"string", + "max":200, + "min":1 + }, + "RuleExecutionInput":{ + "type":"structure", + "members":{ + "ruleTypeId":{ + "shape":"RuleTypeId", + "documentation":"

The ID for the rule type, which is made up of the combined values for category, owner, provider, and version.

" + }, + "configuration":{ + "shape":"RuleConfigurationMap", + "documentation":"

Configuration data for a rule execution, such as the resolved values for that run.

" + }, + "resolvedConfiguration":{ + "shape":"ResolvedRuleConfigurationMap", + "documentation":"

Configuration data for a rule execution with all variable references replaced with their real values for the execution.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the IAM service role that performs the declared rule. This is assumed through the roleArn for the pipeline.

" + }, + "region":{ + "shape":"AWSRegionName", + "documentation":"

The Amazon Web Services Region for the rule, such as us-east-1.

" + }, + "inputArtifacts":{ + "shape":"ArtifactDetailList", + "documentation":"

Details of input artifacts of the rule that correspond to the rule execution.

" + } + }, + "documentation":"

Input information used for a rule execution.

" + }, + "RuleExecutionOutput":{ + "type":"structure", + "members":{ + "executionResult":{ + "shape":"RuleExecutionResult", + "documentation":"

Execution result information listed in the output details for a rule execution.

" + } + }, + "documentation":"

Output details listed for a rule execution, such as the rule execution result.

" + }, + "RuleExecutionResult":{ + "type":"structure", + "members":{ + "externalExecutionId":{ + "shape":"ExternalExecutionId", + "documentation":"

The external ID for the rule execution.

" + }, + "externalExecutionSummary":{ + "shape":"ExternalExecutionSummary", + "documentation":"

The external provider summary for the rule execution.

" + }, + "externalExecutionUrl":{ + "shape":"Url", + "documentation":"

The deepest external link to the external resource (for example, a repository URL or deployment endpoint) that is used when running the rule.

" + }, + "errorDetails":{"shape":"ErrorDetails"} + }, + "documentation":"

Execution result information, such as the external execution ID.

" + }, + "RuleExecutionStatus":{ + "type":"string", + "enum":[ + "InProgress", + "Abandoned", + "Succeeded", + "Failed" + ] + }, + "RuleExecutionToken":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9\\-\\.]+" + }, + "RuleName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[A-Za-z0-9.@\\-_]+" + }, + "RuleOwner":{ + "type":"string", + "enum":["AWS"] + }, + "RuleProvider":{ + "type":"string", + "max":35, + "min":1, + "pattern":"[0-9A-Za-z_-]+" + }, + "RuleRevision":{ + "type":"structure", + "required":[ + "revisionId", + "revisionChangeId", + "created" + ], + "members":{ + "revisionId":{ + "shape":"Revision", + "documentation":"

The system-generated unique ID that identifies the revision number of the rule.

" + }, + "revisionChangeId":{ + "shape":"RevisionChangeIdentifier", + "documentation":"

The unique identifier of the change that set the state to this revision (for example, a deployment ID or timestamp).

" + }, + "created":{ + "shape":"Timestamp", + "documentation":"

The date and time when the most recent version of the rule was created, in timestamp format.

" + } + }, + "documentation":"

The change to a rule that creates a revision of the rule.

" + }, + "RuleState":{ + "type":"structure", + "members":{ + "ruleName":{ + "shape":"RuleName", + "documentation":"

The name of the rule.

" + }, + "currentRevision":{ + "shape":"RuleRevision", + "documentation":"

The ID of the current revision of the artifact successfully worked on by the job.

" + }, + "latestExecution":{ + "shape":"RuleExecution", + "documentation":"

Represents information about the latest run of an rule.

" + }, + "entityUrl":{ + "shape":"Url", + "documentation":"

A URL link for more information about the state of the action, such as a details page.

" + }, + "revisionUrl":{ + "shape":"Url", + "documentation":"

A URL link for more information about the revision, such as a commit details page.

" + } + }, + "documentation":"

Returns information about the state of a rule.

Values returned in the revisionId field indicate the rule revision information, such as the commit ID, for the current state.

" + }, + "RuleStateList":{ + "type":"list", + "member":{"shape":"RuleState"} + }, + "RuleTimeout":{ + "type":"integer", + "box":true, + "max":86400, + "min":5 + }, + "RuleType":{ + "type":"structure", + "required":[ + "id", + "inputArtifactDetails" + ], + "members":{ + "id":{ + "shape":"RuleTypeId", + "documentation":"

Represents information about a rule type.

" + }, + "settings":{ + "shape":"RuleTypeSettings", + "documentation":"

Returns information about the settings for a rule type.

" + }, + "ruleConfigurationProperties":{ + "shape":"RuleConfigurationPropertyList", + "documentation":"

The configuration properties for the rule type.

" + }, + "inputArtifactDetails":{"shape":"ArtifactDetails"} + }, + "documentation":"

The rule type, which is made up of the combined values for category, owner, provider, and version.

" + }, + "RuleTypeId":{ + "type":"structure", + "required":[ + "category", + "provider" + ], + "members":{ + "category":{ + "shape":"RuleCategory", + "documentation":"

A category defines what kind of rule can be run in the stage, and constrains the provider type for the rule. Valid categories are limited to one of the following values.

  • INVOKE

  • Approval

  • Rule

" + }, + "owner":{ + "shape":"RuleOwner", + "documentation":"

The creator of the rule being called. The valid value for the Owner field in the rule category is AWS.

" + }, + "provider":{ + "shape":"RuleProvider", + "documentation":"

The provider of the service being called by the rule. Valid providers are determined by the rulecategory. For example, a managed rule in the Rule category type has an owner of AWS, which would be specified as AWS.

" + }, + "version":{ + "shape":"Version", + "documentation":"

A string that describes the rule version.

" + } + }, + "documentation":"

The ID for the rule type, which is made up of the combined values for category, owner, provider, and version.

" + }, + "RuleTypeList":{ + "type":"list", + "member":{"shape":"RuleType"} + }, + "RuleTypeSettings":{ + "type":"structure", + "members":{ + "thirdPartyConfigurationUrl":{ + "shape":"Url", + "documentation":"

The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service.

" + }, + "entityUrlTemplate":{ + "shape":"UrlTemplate", + "documentation":"

The URL returned to the CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for a CodeDeploy deployment group. This link is provided as part of the action display in the pipeline.

" + }, + "executionUrlTemplate":{ + "shape":"UrlTemplate", + "documentation":"

The URL returned to the CodePipeline console that contains a link to the top-level landing page for the external system, such as the console page for CodeDeploy. This link is shown on the pipeline view page in the CodePipeline console and provides a link to the execution entity of the external action.

" + }, + "revisionUrlTemplate":{ + "shape":"UrlTemplate", + "documentation":"

The URL returned to the CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.

" + } + }, + "documentation":"

Returns information about the settings for a rule type.

" + }, "S3ArtifactLocation":{ "type":"structure", "required":[ @@ -4102,7 +4795,7 @@ "documentation":"

The source revision, or version of your source artifact, with the changes that you want to run in the pipeline execution.

" } }, - "documentation":"

A list that allows you to specify, or override, the source revision for a pipeline execution that's being started. A source revision is the version with all the changes to your application code, or source artifact, for the pipeline execution.

" + "documentation":"

A list that allows you to specify, or override, the source revision for a pipeline execution that's being started. A source revision is the version with all the changes to your application code, or source artifact, for the pipeline execution.

For the S3_OBJECT_VERSION_ID and S3_OBJECT_KEY types of source revisions, either of the types can be used independently, or they can be used together to override the source with a specific ObjectKey and VersionID.

" }, "SourceRevisionOverrideList":{ "type":"list", @@ -4115,7 +4808,8 @@ "enum":[ "COMMIT_ID", "IMAGE_DIGEST", - "S3_OBJECT_VERSION_ID" + "S3_OBJECT_VERSION_ID", + "S3_OBJECT_KEY" ] }, "StageActionDeclarationList":{ @@ -4126,6 +4820,34 @@ "type":"list", "member":{"shape":"BlockerDeclaration"} }, + "StageConditionState":{ + "type":"structure", + "members":{ + "latestExecution":{ + "shape":"StageConditionsExecution", + "documentation":"

Represents information about the latest run of a condition for a stage.

" + }, + "conditionStates":{ + "shape":"ConditionStateList", + "documentation":"

The states of the conditions for a run of a condition for a stage.

" + } + }, + "documentation":"

The state of a run of a condition for a stage.

" + }, + "StageConditionsExecution":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ConditionExecutionStatus", + "documentation":"

The status of a run of a condition for a stage.

" + }, + "summary":{ + "shape":"ExecutionSummary", + "documentation":"

A summary of the run of the condition for a stage.

" + } + }, + "documentation":"

Represents information about the run of a condition for a stage.

" + }, "StageContext":{ "type":"structure", "members":{ @@ -4158,6 +4880,14 @@ "onFailure":{ "shape":"FailureConditions", "documentation":"

The method to use when a stage has not completed successfully. For example, configuring this field for rollback will roll back a failed stage automatically to the last successful pipeline execution in the stage.

" + }, + "onSuccess":{ + "shape":"SuccessConditions", + "documentation":"

The method to use when a stage has succeeded. For example, configuring this field for conditions will allow the stage to succeed when the conditions are met.

" + }, + "beforeEntry":{ + "shape":"BeforeEntryConditions", + "documentation":"

The method to use when a stage allows entry. For example, configuring this field for conditions will allow entry to the stage when the conditions are met.

" } }, "documentation":"

Represents information about a stage and its definition.

" @@ -4249,6 +4979,18 @@ "latestExecution":{ "shape":"StageExecution", "documentation":"

Information about the latest execution in the stage, including its ID and status.

" + }, + "beforeEntryConditionState":{ + "shape":"StageConditionState", + "documentation":"

The state of the entry conditions for a stage.

" + }, + "onSuccessConditionState":{ + "shape":"StageConditionState", + "documentation":"

The state of the success conditions for a stage.

" + }, + "onFailureConditionState":{ + "shape":"StageConditionState", + "documentation":"

The state of the failure conditions for a stage.

" } }, "documentation":"

Represents information about the state of the stage.

" @@ -4364,6 +5106,17 @@ }, "documentation":"

Filter for pipeline executions that have successfully completed the stage in the current pipeline version.

" }, + "SuccessConditions":{ + "type":"structure", + "required":["conditions"], + "members":{ + "conditions":{ + "shape":"ConditionList", + "documentation":"

The conditions that are success conditions.

" + } + }, + "documentation":"

The conditions for making checks that, if met, succeed a stage.

" + }, "Tag":{ "type":"structure", "required":[ diff --git a/botocore/data/cognito-identity/2014-06-30/endpoint-rule-set-1.json b/botocore/data/cognito-identity/2014-06-30/endpoint-rule-set-1.json index fccd2912a8..348fc5c4bb 100644 --- a/botocore/data/cognito-identity/2014-06-30/endpoint-rule-set-1.json +++ b/botocore/data/cognito-identity/2014-06-30/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/cognito-identity/2014-06-30/service-2.json b/botocore/data/cognito-identity/2014-06-30/service-2.json index 8d2f084b98..f93ded368d 100644 --- a/botocore/data/cognito-identity/2014-06-30/service-2.json +++ b/botocore/data/cognito-identity/2014-06-30/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"cognito-identity", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon Cognito Identity", "serviceId":"Cognito Identity", "signatureVersion":"v4", "targetPrefix":"AWSCognitoIdentityService", - "uid":"cognito-identity-2014-06-30" + "uid":"cognito-identity-2014-06-30", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateIdentityPool":{ @@ -114,7 +116,8 @@ {"shape":"ExternalServiceException"} ], "documentation":"

Returns credentials for the provided identity ID. Any provided logins will be validated against supported login providers. If the token is for cognito-identity.amazonaws.com, it will be passed through to AWS Security Token Service with the appropriate role for the token.

This is a public API. You do not need any credentials to call this API.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "GetId":{ "name":"GetId", @@ -135,7 +138,8 @@ {"shape":"ExternalServiceException"} ], "documentation":"

Generates (or retrieves) a Cognito ID. Supplying multiple logins will create an implicit linked account.

This is a public API. You do not need any credentials to call this API.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "GetIdentityPoolRoles":{ "name":"GetIdentityPoolRoles", @@ -173,7 +177,8 @@ {"shape":"ExternalServiceException"} ], "documentation":"

Gets an OpenID token, using a known Cognito ID. This known Cognito ID is returned by GetId. You can optionally add additional logins for the identity. Supplying multiple logins creates an implicit link.

The OpenID token is valid for 10 minutes.

This is a public API. You do not need any credentials to call this API.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "GetOpenIdTokenForDeveloperIdentity":{ "name":"GetOpenIdTokenForDeveloperIdentity", @@ -384,7 +389,8 @@ {"shape":"ExternalServiceException"} ], "documentation":"

Unlinks a federated identity from an existing account. Unlinked logins will be considered new identities next time they are seen. Removing the last linked login will make this identity inaccessible.

This is a public API. You do not need any credentials to call this API.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "UntagResource":{ "name":"UntagResource", diff --git a/botocore/data/cognito-idp/2016-04-18/service-2.json b/botocore/data/cognito-idp/2016-04-18/service-2.json index a98e7dfbda..7606573db6 100644 --- a/botocore/data/cognito-idp/2016-04-18/service-2.json +++ b/botocore/data/cognito-idp/2016-04-18/service-2.json @@ -10,7 +10,8 @@ "serviceId":"Cognito Identity Provider", "signatureVersion":"v4", "targetPrefix":"AWSCognitoIdentityProviderService", - "uid":"cognito-idp-2016-04-18" + "uid":"cognito-idp-2016-04-18", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddCustomAttributes":{ @@ -69,7 +70,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

This IAM-authenticated API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.

Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users confirm their accounts when they respond to their invitation email message and choose a password.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

This IAM-authenticated API operation confirms user sign-up as an administrator. Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation. No confirmation code is required.

This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can configure your user pool to not send confirmation codes to new users and instead confirm them with this API operation on the back end.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminCreateUser":{ "name":"AdminCreateUser", @@ -97,7 +98,7 @@ {"shape":"UnsupportedUserStateException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a new user in the specified user pool.

If MessageAction isn't set, the default is to send a welcome message via email or phone (SMS).

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password.

Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email.

In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Creates a new user in the specified user pool.

If MessageAction isn't set, the default is to send a welcome message via email or phone (SMS).

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password.

Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email.

In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminDeleteUser":{ "name":"AdminDeleteUser", @@ -268,7 +269,7 @@ {"shape":"UserNotFoundException"}, {"shape":"UserNotConfirmedException"} ], - "documentation":"

Initiates the authentication flow, as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Initiates the authentication flow, as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminLinkProviderForUser":{ "name":"AdminLinkProviderForUser", @@ -385,7 +386,7 @@ {"shape":"InvalidSmsRoleTrustRelationshipException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Resets the specified user's password in a user pool as an administrator. Works on any user.

To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Deactivates a user's password, requiring them to change it. If a user tries to sign in after the API is called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then perform the actions that reset your user's password: the forgot-password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Resets the specified user's password in a user pool as an administrator. Works on any user.

To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Deactivates a user's password, requiring them to change it. If a user tries to sign in after the API is called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then perform the actions that reset your user's password: the forgot-password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminRespondToAuthChallenge":{ "name":"AdminRespondToAuthChallenge", @@ -403,6 +404,7 @@ {"shape":"ExpiredCodeException"}, {"shape":"UnexpectedLambdaException"}, {"shape":"InvalidPasswordException"}, + {"shape":"PasswordHistoryPolicyViolationException"}, {"shape":"UserLambdaValidationException"}, {"shape":"InvalidLambdaResponseException"}, {"shape":"TooManyRequestsException"}, @@ -417,7 +419,7 @@ {"shape":"UserNotConfirmedException"}, {"shape":"SoftwareTokenMFANotFoundException"} ], - "documentation":"

Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. An AdminRespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.

For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. An AdminRespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.

For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminSetUserMFAPreference":{ "name":"AdminSetUserMFAPreference", @@ -453,7 +455,8 @@ {"shape":"InternalErrorException"}, {"shape":"TooManyRequestsException"}, {"shape":"InvalidParameterException"}, - {"shape":"InvalidPasswordException"} + {"shape":"InvalidPasswordException"}, + {"shape":"PasswordHistoryPolicyViolationException"} ], "documentation":"

Sets the specified user's password in a user pool as an administrator. Works on any user.

The password can be temporary or permanent. If it is temporary, the user status enters the FORCE_CHANGE_PASSWORD state. When the user next tries to sign in, the InitiateAuth/AdminInitiateAuth response will contain the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before it expires, the user won't be able to sign in, and an administrator must reset their password.

Once the user has set a new password, or the password is permanent, the user status is set to Confirmed.

AdminSetUserPassword can set a password for the user profile that Amazon Cognito creates for third-party federated users. When you set a password, the federated user's status changes from EXTERNAL_PROVIDER to CONFIRMED. A user in this state can sign in as a federated user, and initiate authentication flows in the API like a linked native user. They can also modify their password and attributes in token-authenticated API requests like ChangePassword and UpdateUserAttributes. As a best security practice and to keep users in sync with your external IdP, don't set passwords on federated user profiles. To set up a federated user for native sign-in with a linked native user, refer to Linking federated users to an existing user profile.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, @@ -535,7 +538,7 @@ {"shape":"InvalidEmailRoleAccessPolicyException"}, {"shape":"InvalidSmsRoleTrustRelationshipException"} ], - "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user. To delete an attribute from your user, submit the attribute in your API request with a blank value.

For custom attributes, you must prepend the custom: prefix to the attribute name.

In addition to updating user attributes, this API can also be used to mark phone and email as verified.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user. To delete an attribute from your user, submit the attribute in your API request with a blank value.

For custom attributes, you must prepend the custom: prefix to the attribute name.

In addition to updating user attributes, this API can also be used to mark phone and email as verified.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminUserGlobalSignOut":{ "name":"AdminUserGlobalSignOut", @@ -572,8 +575,9 @@ {"shape":"SoftwareTokenMFANotFoundException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "documentation":"

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs in. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "ChangePassword":{ "name":"ChangePassword", @@ -587,6 +591,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidPasswordException"}, + {"shape":"PasswordHistoryPolicyViolationException"}, {"shape":"NotAuthorizedException"}, {"shape":"TooManyRequestsException"}, {"shape":"LimitExceededException"}, @@ -597,7 +602,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Changes the password for a specified user in a user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "ConfirmDevice":{ "name":"ConfirmDevice", @@ -623,7 +629,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Confirms tracking of the device. This API call is the call that begins device tracking. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "ConfirmForgotPassword":{ "name":"ConfirmForgotPassword", @@ -639,6 +646,7 @@ {"shape":"UserLambdaValidationException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidPasswordException"}, + {"shape":"PasswordHistoryPolicyViolationException"}, {"shape":"NotAuthorizedException"}, {"shape":"CodeMismatchException"}, {"shape":"ExpiredCodeException"}, @@ -652,7 +660,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Allows a user to enter a confirmation code to reset a forgotten password.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "ConfirmSignUp":{ "name":"ConfirmSignUp", @@ -680,7 +689,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.

Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "CreateGroup":{ "name":"CreateGroup", @@ -776,7 +786,7 @@ {"shape":"UserPoolTaggingException"}, {"shape":"InternalErrorException"} ], - "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Creates a new Amazon Cognito user pool and sets the password policy for the pool.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Creates a new Amazon Cognito user pool and sets the password policy for the pool.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateUserPoolClient":{ "name":"CreateUserPoolClient", @@ -884,7 +894,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Allows a user to delete their own user profile.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "DeleteUserAttributes":{ "name":"DeleteUserAttributes", @@ -906,7 +917,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Deletes the attributes for a user.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "DeleteUserPool":{ "name":"DeleteUserPool", @@ -1098,7 +1110,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Forgets the specified device. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "ForgotPassword":{ "name":"ForgotPassword", @@ -1125,8 +1138,9 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.

If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException. If your app client has a client secret and you don't provide a SECRET_HASH parameter, this API returns NotAuthorizedException.

To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", - "authtype":"none" + "documentation":"

Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.

If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException. If your app client has a client secret and you don't provide a SECRET_HASH parameter, this API returns NotAuthorizedException.

To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "GetCSVHeader":{ "name":"GetCSVHeader", @@ -1166,7 +1180,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Gets the device. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "GetGroup":{ "name":"GetGroup", @@ -1217,7 +1232,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets the detailed activity logging configuration for a user pool.

" + "documentation":"

Gets the logging configuration of a user pool.

" }, "GetSigningCertificate":{ "name":"GetSigningCertificate", @@ -1271,7 +1286,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Gets the user attributes and metadata for a user.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "GetUserAttributeVerificationCode":{ "name":"GetUserAttributeVerificationCode", @@ -1300,8 +1316,9 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", - "authtype":"none" + "documentation":"

Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "GetUserPoolMfaConfig":{ "name":"GetUserPoolMfaConfig", @@ -1339,7 +1356,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior.

  • Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints.

    Amazon Cognito returns an Access Token has been revoked error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin.

  • Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck enabled for its user pool IdP configuration in CognitoIdentityProvider.

  • Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests.

Other requests might be valid until your user's token expires.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "InitiateAuth":{ "name":"InitiateAuth", @@ -1366,8 +1384,9 @@ {"shape":"InvalidSmsRoleTrustRelationshipException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", - "authtype":"none" + "documentation":"

Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "ListDevices":{ "name":"ListDevices", @@ -1390,7 +1409,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Lists the sign-in devices that Amazon Cognito has registered to the current user. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "ListGroups":{ "name":"ListGroups", @@ -1569,8 +1589,9 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Resends the confirmation (for confirmation of registration) to a specific user in the user pool.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", - "authtype":"none" + "documentation":"

Resends the confirmation (for confirmation of registration) to a specific user in the user pool.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "RespondToAuthChallenge":{ "name":"RespondToAuthChallenge", @@ -1589,6 +1610,7 @@ {"shape":"UnexpectedLambdaException"}, {"shape":"UserLambdaValidationException"}, {"shape":"InvalidPasswordException"}, + {"shape":"PasswordHistoryPolicyViolationException"}, {"shape":"InvalidLambdaResponseException"}, {"shape":"TooManyRequestsException"}, {"shape":"InvalidUserPoolConfigurationException"}, @@ -1603,8 +1625,9 @@ {"shape":"SoftwareTokenMFANotFoundException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.

For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", - "authtype":"none" + "documentation":"

Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.

For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "RevokeToken":{ "name":"RevokeToken", @@ -1624,7 +1647,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "SetLogDeliveryConfiguration":{ "name":"SetLogDeliveryConfiguration", @@ -1641,7 +1665,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Sets up or modifies the detailed activity logging configuration of a user pool.

" + "documentation":"

Sets up or modifies the logging configuration of a user pool. User pools can export user notification logs and advanced security features user activity logs.

" }, "SetRiskConfiguration":{ "name":"SetRiskConfiguration", @@ -1699,7 +1723,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "SetUserPoolMfaConfig":{ "name":"SetUserPoolMfaConfig", @@ -1719,7 +1744,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Sets the user pool multi-factor authentication (MFA) configuration.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

" + "documentation":"

Sets the user pool multi-factor authentication (MFA) configuration.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

" }, "SetUserSettings":{ "name":"SetUserSettings", @@ -1740,7 +1765,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "SignUp":{ "name":"SignUp", @@ -1768,8 +1794,9 @@ {"shape":"CodeDeliveryFailureException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Registers the user in the specified user pool and creates a user name, password, and user attributes.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", - "authtype":"none" + "documentation":"

Registers the user in the specified user pool and creates a user name, password, and user attributes.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "StartUserImportJob":{ "name":"StartUserImportJob", @@ -1859,7 +1886,8 @@ {"shape":"InternalErrorException"} ], "documentation":"

Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "UpdateDeviceStatus":{ "name":"UpdateDeviceStatus", @@ -1882,7 +1910,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Updates the device status. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "UpdateGroup":{ "name":"UpdateGroup", @@ -1966,8 +1995,9 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom: prefix.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", - "authtype":"none" + "documentation":"

With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom: prefix.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "UpdateUserPool":{ "name":"UpdateUserPool", @@ -1990,7 +2020,7 @@ {"shape":"UserPoolTaggingException"}, {"shape":"InvalidEmailRoleAccessPolicyException"} ], - "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "UpdateUserPoolClient":{ "name":"UpdateUserPoolClient", @@ -2054,7 +2084,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "VerifyUserAttribute":{ "name":"VerifyUserAttribute", @@ -2080,7 +2111,8 @@ {"shape":"ForbiddenException"} ], "documentation":"

Verifies the specified user attributes in the user pool.

If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] } }, "shapes":{ @@ -3084,6 +3116,23 @@ }, "documentation":"

The global sign-out response, as an administrator.

" }, + "AdvancedSecurityAdditionalFlowsType":{ + "type":"structure", + "members":{ + "CustomAuthMode":{ + "shape":"AdvancedSecurityEnabledModeType", + "documentation":"

The operating mode of advanced security features in custom authentication with Custom authentication challenge Lambda triggers.

" + } + }, + "documentation":"

Advanced security configuration options for additional authentication types in your user pool, including custom authentication.

" + }, + "AdvancedSecurityEnabledModeType":{ + "type":"string", + "enum":[ + "AUDIT", + "ENFORCED" + ] + }, "AdvancedSecurityModeType":{ "type":"string", "enum":[ @@ -3482,7 +3531,7 @@ "documentation":"

The Amazon Resource Name (arn) of a CloudWatch Logs log group where your user pool sends logs. The log group must not be encrypted with Key Management Service and must be in the same Amazon Web Services account as your user pool.

To send logs to log groups with a resource policy of a size greater than 5120 characters, configure a log group with a path that starts with /aws/vendedlogs. For more information, see Enabling logging from certain Amazon Web Services services.

" } }, - "documentation":"

The CloudWatch logging destination of a user pool detailed activity logging configuration.

" + "documentation":"

Configuration for the CloudWatch log group destination of user pool detailed activity logging, or of user activity log export with advanced security features.

" }, "CodeDeliveryDetailsListType":{ "type":"list", @@ -3984,7 +4033,7 @@ }, "PreventUserExistenceErrors":{ "shape":"PreventUserExistenceErrorTypes", - "documentation":"

Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool.

Valid values include:

  • ENABLED - This prevents user existence-related errors.

  • LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.

" + "documentation":"

Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool.

Valid values include:

  • ENABLED - This prevents user existence-related errors.

  • LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.

Defaults to LEGACY when you don't provide a value.

" }, "EnableTokenRevocation":{ "shape":"WrappedBooleanType", @@ -4876,7 +4925,10 @@ }, "EventSourceName":{ "type":"string", - "enum":["userNotification"] + "enum":[ + "userNotification", + "userAuthEvents" + ] }, "EventType":{ "type":"string", @@ -4923,6 +4975,16 @@ "Invalid" ] }, + "FirehoseConfigurationType":{ + "type":"structure", + "members":{ + "StreamArn":{ + "shape":"ArnType", + "documentation":"

The ARN of an Amazon Data Firehose stream that's the destination for advanced security features log export.

" + } + }, + "documentation":"

Configuration for the Amazon Data Firehose stream destination of user activity log export with advanced security features.

" + }, "ForbiddenException":{ "type":"structure", "members":{ @@ -5105,7 +5167,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The ID of the user pool where you want to view detailed activity logging configuration.

" + "documentation":"

The ID of the user pool that has the logging configuration that you want to view.

" } } }, @@ -5114,7 +5176,7 @@ "members":{ "LogDeliveryConfiguration":{ "shape":"LogDeliveryConfigurationType", - "documentation":"

The detailed activity logging configuration of the requested user pool.

" + "documentation":"

The logging configuration of the requested user pool.

" } } }, @@ -5994,7 +6056,7 @@ "LogConfigurationListType":{ "type":"list", "member":{"shape":"LogConfigurationType"}, - "max":1, + "max":2, "min":0 }, "LogConfigurationType":{ @@ -6006,15 +6068,23 @@ "members":{ "LogLevel":{ "shape":"LogLevel", - "documentation":"

The errorlevel selection of logs that a user pool sends for detailed activity logging.

" + "documentation":"

The errorlevel selection of logs that a user pool sends for detailed activity logging. To send userNotification activity with information about message delivery, choose ERROR with CloudWatchLogsConfiguration. To send userAuthEvents activity with user logs from advanced security features, choose INFO with one of CloudWatchLogsConfiguration, FirehoseConfiguration, or S3Configuration.

" }, "EventSource":{ "shape":"EventSourceName", - "documentation":"

The source of events that your user pool sends for detailed activity logging.

" + "documentation":"

The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to userNotification. To send info-level logs about advanced security features user activity, set to userAuthEvents.

" }, "CloudWatchLogsConfiguration":{ "shape":"CloudWatchLogsConfigurationType", - "documentation":"

The CloudWatch logging destination of a user pool.

" + "documentation":"

The CloudWatch log group destination of user pool detailed activity logs, or of user activity log export with advanced security features.

" + }, + "S3Configuration":{ + "shape":"S3ConfigurationType", + "documentation":"

The Amazon S3 bucket destination of user activity log export with advanced security features. To activate this setting, advanced security features must be active in your user pool.

" + }, + "FirehoseConfiguration":{ + "shape":"FirehoseConfigurationType", + "documentation":"

The Amazon Data Firehose stream destination of user activity log export with advanced security features. To activate this setting, advanced security features must be active in your user pool.

" } }, "documentation":"

The logging parameters of a user pool.

" @@ -6028,18 +6098,21 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The ID of the user pool where you configured detailed activity logging.

" + "documentation":"

The ID of the user pool where you configured logging.

" }, "LogConfigurations":{ "shape":"LogConfigurationListType", - "documentation":"

The detailed activity logging destination of a user pool.

" + "documentation":"

A logging destination of a user pool. User pools can have multiple logging destinations for message-delivery and user-activity logs.

" } }, - "documentation":"

The logging parameters of a user pool.

" + "documentation":"

The logging parameters of a user pool returned in response to GetLogDeliveryConfiguration.

" }, "LogLevel":{ "type":"string", - "enum":["ERROR"] + "enum":[ + "ERROR", + "INFO" + ] }, "LogoutURLsListType":{ "type":"list", @@ -6217,6 +6290,19 @@ "min":1, "pattern":"[\\S]+" }, + "PasswordHistoryPolicyViolationException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "documentation":"

The message returned when a user's new password matches a previous password and doesn't comply with the password-history policy.

", + "exception":true + }, + "PasswordHistorySizeType":{ + "type":"integer", + "max":24, + "min":0 + }, "PasswordPolicyMinLengthType":{ "type":"integer", "max":99, @@ -6245,6 +6331,10 @@ "shape":"BooleanType", "documentation":"

In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.

" }, + "PasswordHistorySize":{ + "shape":"PasswordHistorySizeType", + "documentation":"

The number of previous passwords that you want Amazon Cognito to restrict each user from reusing. Users can't set a password that matches any of n previous passwords, where n is the value of PasswordHistorySize.

Password history isn't enforced and isn't displayed in DescribeUserPool responses when you set this value to 0 or don't provide it. To activate this setting, advanced security features must be active in your user pool.

" + }, "TemporaryPasswordValidityDays":{ "shape":"TemporaryPasswordValidityDaysType", "documentation":"

The number of days a temporary password is valid in the password policy. If the user doesn't sign in during this time, an administrator must reset their password. Defaults to 7. If you submit a value of 0, Amazon Cognito treats it as a null value and sets TemporaryPasswordValidityDays to its default value.

When you set TemporaryPasswordValidityDays for a user pool, you can no longer set a value for the legacy UnusedAccountValidityDays parameter in that user pool.

" @@ -6725,12 +6815,28 @@ "High" ] }, + "S3ArnType":{ + "type":"string", + "max":1024, + "min":3, + "pattern":"arn:[\\w+=/,.@-]+:[\\w+=/,.@-]+:::[\\w+=/,.@-]+(:[\\w+=/,.@-]+)?(:[\\w+=/,.@-]+)?" + }, "S3BucketType":{ "type":"string", "max":1024, "min":3, "pattern":"^[0-9A-Za-z\\.\\-_]*(?The ARN of an Amazon S3 bucket that's the destination for advanced security features log export.

" + } + }, + "documentation":"

Configuration for the Amazon S3 bucket destination of user activity log export with advanced security features.

" + }, "SESConfigurationSet":{ "type":"string", "max":64, @@ -6850,11 +6956,11 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The ID of the user pool where you want to configure detailed activity logging .

" + "documentation":"

The ID of the user pool where you want to configure logging.

" }, "LogConfigurations":{ "shape":"LogConfigurationListType", - "documentation":"

A collection of all of the detailed activity logging configurations for a user pool.

" + "documentation":"

A collection of the logging configurations for a user pool.

" } } }, @@ -7085,7 +7191,7 @@ }, "UserSub":{ "shape":"StringType", - "documentation":"

The UUID of the authenticated user. This isn't the same as username.

" + "documentation":"

The 128-bit ID of the authenticated user. This isn't the same as username.

" } }, "documentation":"

The response from the server for a registration request.

" @@ -7751,7 +7857,7 @@ }, "PreventUserExistenceErrors":{ "shape":"PreventUserExistenceErrorTypes", - "documentation":"

Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool.

Valid values include:

  • ENABLED - This prevents user existence-related errors.

  • LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.

" + "documentation":"

Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool.

Valid values include:

  • ENABLED - This prevents user existence-related errors.

  • LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.

Defaults to LEGACY when you don't provide a value.

" }, "EnableTokenRevocation":{ "shape":"WrappedBooleanType", @@ -8080,7 +8186,11 @@ "members":{ "AdvancedSecurityMode":{ "shape":"AdvancedSecurityModeType", - "documentation":"

The operating mode of advanced security features in your user pool.

" + "documentation":"

The operating mode of advanced security features for standard authentication types in your user pool, including username-password and secure remote password (SRP) authentication.

" + }, + "AdvancedSecurityAdditionalFlows":{ + "shape":"AdvancedSecurityAdditionalFlowsType", + "documentation":"

Advanced security configuration options for additional authentication types in your user pool, including custom authentication.

" } }, "documentation":"

User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED.

For more information, see Adding advanced security to a user pool.

" @@ -8197,7 +8307,7 @@ }, "PreventUserExistenceErrors":{ "shape":"PreventUserExistenceErrorTypes", - "documentation":"

Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool.

Valid values include:

  • ENABLED - This prevents user existence-related errors.

  • LEGACY - This represents the old behavior of Amazon Cognito where user existence related errors aren't prevented.

" + "documentation":"

Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool.

Valid values include:

  • ENABLED - This prevents user existence-related errors.

  • LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.

Defaults to LEGACY when you don't provide a value.

" }, "EnableTokenRevocation":{ "shape":"WrappedBooleanType", diff --git a/botocore/data/compute-optimizer/2019-11-01/service-2.json b/botocore/data/compute-optimizer/2019-11-01/service-2.json index 3d3eaf8493..779b6921c7 100644 --- a/botocore/data/compute-optimizer/2019-11-01/service-2.json +++ b/botocore/data/compute-optimizer/2019-11-01/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"compute-optimizer", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS Compute Optimizer", "serviceId":"Compute Optimizer", "signatureVersion":"v4", "signingName":"compute-optimizer", "targetPrefix":"ComputeOptimizerService", - "uid":"compute-optimizer-2019-11-01" + "uid":"compute-optimizer-2019-11-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "DeleteRecommendationPreferences":{ @@ -173,6 +175,26 @@ ], "documentation":"

Export optimization recommendations for your licenses.

Recommendations are exported in a comma-separated values (CSV) file, and its metadata in a JavaScript Object Notation (JSON) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

You can have only one license export job in progress per Amazon Web Services Region.

" }, + "ExportRDSDatabaseRecommendations":{ + "name":"ExportRDSDatabaseRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportRDSDatabaseRecommendationsRequest"}, + "output":{"shape":"ExportRDSDatabaseRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Export optimization recommendations for your Amazon Relational Database Service (Amazon RDS).

Recommendations are exported in a comma-separated values (CSV) file, and its metadata in a JavaScript Object Notation (JSON) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

You can have only one Amazon RDS export job in progress per Amazon Web Services Region.

" + }, "GetAutoScalingGroupRecommendations":{ "name":"GetAutoScalingGroupRecommendations", "http":{ @@ -389,6 +411,46 @@ ], "documentation":"

Returns license recommendations for Amazon EC2 instances that run on a specific license.

Compute Optimizer generates recommendations for licenses that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide.

" }, + "GetRDSDatabaseRecommendationProjectedMetrics":{ + "name":"GetRDSDatabaseRecommendationProjectedMetrics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRDSDatabaseRecommendationProjectedMetricsRequest"}, + "output":{"shape":"GetRDSDatabaseRecommendationProjectedMetricsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns the projected metrics of Amazon RDS recommendations.

" + }, + "GetRDSDatabaseRecommendations":{ + "name":"GetRDSDatabaseRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRDSDatabaseRecommendationsRequest"}, + "output":{"shape":"GetRDSDatabaseRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns Amazon RDS recommendations.

Compute Optimizer generates recommendations for Amazon RDS that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide.

" + }, "GetRecommendationPreferences":{ "name":"GetRecommendationPreferences", "http":{ @@ -508,6 +570,7 @@ "type":"list", "member":{"shape":"AccountId"} }, + "AllocatedStorage":{"type":"integer"}, "AutoScalingConfiguration":{ "type":"string", "enum":[ @@ -588,6 +651,10 @@ "shape":"AutoScalingGroupConfiguration", "documentation":"

An array of objects that describe the current configuration of the Auto Scaling group.

" }, + "currentInstanceGpuInfo":{ + "shape":"GpuInfo", + "documentation":"

Describes the GPU accelerator settings for the current instance type of the Auto Scaling group.

" + }, "recommendationOptions":{ "shape":"AutoScalingGroupRecommendationOptions", "documentation":"

An array of objects that describe the recommendation options for the Auto Scaling group.

" @@ -607,10 +674,6 @@ "inferredWorkloadTypes":{ "shape":"InferredWorkloadTypes", "documentation":"

The applications that might be running on the instances in the Auto Scaling group as inferred by Compute Optimizer.

Compute Optimizer can infer if one of the following applications might be running on the instances:

  • AmazonEmr - Infers that Amazon EMR might be running on the instances.

  • ApacheCassandra - Infers that Apache Cassandra might be running on the instances.

  • ApacheHadoop - Infers that Apache Hadoop might be running on the instances.

  • Memcached - Infers that Memcached might be running on the instances.

  • NGINX - Infers that NGINX might be running on the instances.

  • PostgreSql - Infers that PostgreSQL might be running on the instances.

  • Redis - Infers that Redis might be running on the instances.

  • Kafka - Infers that Kafka might be running on the instance.

  • SQLServer - Infers that SQLServer might be running on the instance.

" - }, - "currentInstanceGpuInfo":{ - "shape":"GpuInfo", - "documentation":"

Describes the GPU accelerator settings for the current instance type of the Auto Scaling group.

" } }, "documentation":"

Describes an Auto Scaling group recommendation.

" @@ -622,6 +685,10 @@ "shape":"AutoScalingGroupConfiguration", "documentation":"

An array of objects that describe an Auto Scaling group configuration.

" }, + "instanceGpuInfo":{ + "shape":"GpuInfo", + "documentation":"

Describes the GPU accelerator settings for the recommended instance type of the Auto Scaling group.

" + }, "projectedUtilizationMetrics":{ "shape":"ProjectedUtilizationMetrics", "documentation":"

An array of objects that describe the projected utilization metrics of the Auto Scaling group recommendation option.

The Cpu and Memory metrics are the only projected utilization metrics returned. Additionally, the Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

" @@ -638,17 +705,13 @@ "shape":"SavingsOpportunity", "documentation":"

An object that describes the savings opportunity for the Auto Scaling group recommendation option. Savings opportunity includes the estimated monthly savings amount and percentage.

" }, - "migrationEffort":{ - "shape":"MigrationEffort", - "documentation":"

The level of effort required to migrate from the current instance type to the recommended instance type.

For example, the migration effort is Low if Amazon EMR is the inferred workload type and an Amazon Web Services Graviton instance type is recommended. The migration effort is Medium if a workload type couldn't be inferred but an Amazon Web Services Graviton instance type is recommended. The migration effort is VeryLow if both the current and recommended instance types are of the same CPU architecture.

" - }, - "instanceGpuInfo":{ - "shape":"GpuInfo", - "documentation":"

Describes the GPU accelerator settings for the recommended instance type of the Auto Scaling group.

" - }, "savingsOpportunityAfterDiscounts":{ "shape":"AutoScalingGroupSavingsOpportunityAfterDiscounts", "documentation":"

An object that describes the savings opportunity for the Auto Scaling group recommendation option that includes Savings Plans and Reserved Instances discounts. Savings opportunity includes the estimated monthly savings and percentage.

" + }, + "migrationEffort":{ + "shape":"MigrationEffort", + "documentation":"

The level of effort required to migrate from the current instance type to the recommended instance type.

For example, the migration effort is Low if Amazon EMR is the inferred workload type and an Amazon Web Services Graviton instance type is recommended. The migration effort is Medium if a workload type couldn't be inferred but an Amazon Web Services Graviton instance type is recommended. The migration effort is VeryLow if both the current and recommended instance types are of the same CPU architecture.

" } }, "documentation":"

Describes a recommendation option for an Auto Scaling group.

" @@ -741,6 +804,7 @@ "CNY" ] }, + "CurrentDBInstanceClass":{"type":"string"}, "CurrentInstanceType":{"type":"string"}, "CurrentPerformanceRisk":{ "type":"string", @@ -811,6 +875,33 @@ "P99_5" ] }, + "DBInstanceClass":{"type":"string"}, + "DBStorageConfiguration":{ + "type":"structure", + "members":{ + "storageType":{ + "shape":"StorageType", + "documentation":"

The type of RDS storage.

" + }, + "allocatedStorage":{ + "shape":"AllocatedStorage", + "documentation":"

The size of the RDS storage in gigabytes (GB).

" + }, + "iops":{ + "shape":"NullableIOPS", + "documentation":"

The provisioned IOPs of the RDS storage.

" + }, + "maxAllocatedStorage":{ + "shape":"NullableMaxAllocatedStorage", + "documentation":"

The maximum limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the RDS instance.

" + }, + "storageThroughput":{ + "shape":"NullableStorageThroughput", + "documentation":"

The storage throughput of the RDS storage.

" + } + }, + "documentation":"

The configuration of the recommended RDS storage.

" + }, "DeleteRecommendationPreferencesRequest":{ "type":"structure", "required":[ @@ -820,7 +911,7 @@ "members":{ "resourceType":{ "shape":"ResourceType", - "documentation":"

The target resource type of the recommendation preference to delete.

The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group.

The valid values for this parameter are Ec2Instance and AutoScalingGroup.

" + "documentation":"

The target resource type of the recommendation preference to delete.

The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group.

" }, "scope":{ "shape":"Scope", @@ -1167,13 +1258,13 @@ "shape":"CurrentPerformanceRisk", "documentation":"

The risk of the current Amazon ECS service not meeting the performance needs of its workloads. The higher the risk, the more likely the current service can't meet the performance requirements of its workload.

" }, - "tags":{ - "shape":"Tags", - "documentation":"

A list of tags assigned to your Amazon ECS service recommendations.

" - }, "effectiveRecommendationPreferences":{ "shape":"ECSEffectiveRecommendationPreferences", "documentation":"

Describes the effective recommendation preferences for Amazon ECS services.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

A list of tags assigned to your Amazon ECS service recommendations.

" } }, "documentation":"

Describes an Amazon ECS service recommendation.

" @@ -1187,7 +1278,7 @@ }, "values":{ "shape":"FilterValues", - "documentation":"

The value of the filter.

The valid values for this parameter are as follows:

  • If you specify the name parameter as Finding, specify Optimized, NotOptimized, or Unavailable.

  • If you specify the name parameter as FindingReasonCode, specify CPUUnderprovisioned, CPUOverprovisioned, MemoryUnderprovisioned, or MemoryOverprovisioned.

" + "documentation":"

The value of the filter.

The valid values for this parameter are as follows:

  • If you specify the name parameter as Finding, specify Optimized, Underprovisioned, or Overprovisioned.

  • If you specify the name parameter as FindingReasonCode, specify CPUUnderprovisioned, CPUOverprovisioned, MemoryUnderprovisioned, or MemoryOverprovisioned.

" } }, "documentation":"

Describes a filter that returns a more specific list of Amazon ECS service recommendations. Use this filter with the GetECSServiceRecommendations action.

" @@ -1236,6 +1327,10 @@ "documentation":"

The CPU size of the Amazon ECS service recommendation option.

" }, "savingsOpportunity":{"shape":"SavingsOpportunity"}, + "savingsOpportunityAfterDiscounts":{ + "shape":"ECSSavingsOpportunityAfterDiscounts", + "documentation":"

Describes the savings opportunity for Amazon ECS service recommendations or for the recommendation option.

Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

" + }, "projectedUtilizationMetrics":{ "shape":"ECSServiceProjectedUtilizationMetrics", "documentation":"

An array of objects that describe the projected utilization metrics of the Amazon ECS service recommendation option.

" @@ -1243,10 +1338,6 @@ "containerRecommendations":{ "shape":"ContainerRecommendations", "documentation":"

The CPU and memory size recommendations for the containers within the task of your Amazon ECS service.

" - }, - "savingsOpportunityAfterDiscounts":{ - "shape":"ECSSavingsOpportunityAfterDiscounts", - "documentation":"

Describes the savings opportunity for Amazon ECS service recommendations or for the recommendation option.

Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

" } }, "documentation":"

Describes the recommendation options for an Amazon ECS service.

" @@ -1334,7 +1425,7 @@ "members":{ "cpuVendorArchitectures":{ "shape":"CpuVendorArchitectures", - "documentation":"

Describes the CPU vendor and architecture for an instance or Auto Scaling group recommendations.

For example, when you specify AWS_ARM64 with:

" + "documentation":"

Describes the CPU vendor and architecture for an instance or Auto Scaling group recommendations.

For example, when you specify AWS_ARM64 with:

" }, "enhancedInfrastructureMetrics":{ "shape":"EnhancedInfrastructureMetrics", @@ -1367,6 +1458,8 @@ }, "documentation":"

Describes the effective recommendation preferences for a resource.

" }, + "Engine":{"type":"string"}, + "EngineVersion":{"type":"string"}, "EnhancedInfrastructureMetrics":{ "type":"string", "enum":[ @@ -1663,6 +1756,44 @@ "s3Destination":{"shape":"S3Destination"} } }, + "ExportRDSDatabaseRecommendationsRequest":{ + "type":"structure", + "required":["s3DestinationConfig"], + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

The Amazon Web Services account IDs for the export Amazon RDS recommendations.

If your account is the management account or the delegated administrator of an organization, use this parameter to specify the member account you want to export recommendations to.

This parameter can't be specified together with the include member accounts parameter. The parameters are mutually exclusive.

If this parameter or the include member accounts parameter is omitted, the recommendations for member accounts aren't included in the export.

You can specify multiple account IDs per request.

" + }, + "filters":{ + "shape":"RDSDBRecommendationFilters", + "documentation":"

An array of objects to specify a filter that exports a more specific set of Amazon RDS recommendations.

" + }, + "fieldsToExport":{ + "shape":"ExportableRDSDBFields", + "documentation":"

The recommendations data to include in the export file. For more information about the fields that can be exported, see Exported files in the Compute Optimizer User Guide.

" + }, + "s3DestinationConfig":{"shape":"S3DestinationConfig"}, + "fileFormat":{ + "shape":"FileFormat", + "documentation":"

The format of the export file.

The CSV file is the only export file format currently supported.

" + }, + "includeMemberAccounts":{ + "shape":"IncludeMemberAccounts", + "documentation":"

If your account is the management account or the delegated administrator of an organization, this parameter indicates whether to include recommendations for resources in all member accounts of the organization.

The member accounts must also be opted in to Compute Optimizer, and trusted access for Compute Optimizer must be enabled in the organization account. For more information, see Compute Optimizer and Amazon Web Services Organizations trusted access in the Compute Optimizer User Guide.

If this parameter is omitted, recommendations for member accounts of the organization aren't included in the export file.

If this parameter or the account ID parameter is omitted, recommendations for member accounts aren't included in the export.

" + }, + "recommendationPreferences":{"shape":"RecommendationPreferences"} + } + }, + "ExportRDSDatabaseRecommendationsResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The identification number of the export job.

To view the status of an export job, use the DescribeRecommendationExportJobs action and specify the job ID.

" + }, + "s3Destination":{"shape":"S3Destination"} + } + }, "ExportableAutoScalingGroupField":{ "type":"string", "enum":[ @@ -1718,6 +1849,8 @@ "EffectiveRecommendationPreferencesCpuVendorArchitectures", "EffectiveRecommendationPreferencesEnhancedInfrastructureMetrics", "EffectiveRecommendationPreferencesInferredWorkloadTypes", + "EffectiveRecommendationPreferencesPreferredResources", + "EffectiveRecommendationPreferencesLookBackPeriod", "InferredWorkloadTypes", "RecommendationOptionsMigrationEffort", "CurrentInstanceGpuInfo", @@ -1729,9 +1862,7 @@ "EffectiveRecommendationPreferencesSavingsEstimationMode", "RecommendationOptionsSavingsOpportunityAfterDiscountsPercentage", "RecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts", - "RecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts", - "EffectiveRecommendationPreferencesPreferredResources", - "EffectiveRecommendationPreferencesLookBackPeriod" + "RecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts" ] }, "ExportableAutoScalingGroupFields":{ @@ -1831,8 +1962,8 @@ "InferredWorkloadTypes", "RecommendationOptionsMigrationEffort", "EffectiveRecommendationPreferencesExternalMetricsSource", - "InstanceState", "Tags", + "InstanceState", "ExternalMetricStatusCode", "ExternalMetricStatusReason", "CurrentInstanceGpuInfo", @@ -1925,6 +2056,74 @@ "type":"list", "member":{"shape":"ExportableLicenseField"} }, + "ExportableRDSDBField":{ + "type":"string", + "enum":[ + "ResourceArn", + "AccountId", + "Engine", + "EngineVersion", + "Idle", + "MultiAZDBInstance", + "CurrentDBInstanceClass", + "CurrentStorageConfigurationStorageType", + "CurrentStorageConfigurationAllocatedStorage", + "CurrentStorageConfigurationMaxAllocatedStorage", + "CurrentStorageConfigurationIOPS", + "CurrentStorageConfigurationStorageThroughput", + "CurrentInstanceOnDemandHourlyPrice", + "CurrentStorageOnDemandMonthlyPrice", + "LookbackPeriodInDays", + "UtilizationMetricsCpuMaximum", + "UtilizationMetricsMemoryMaximum", + "UtilizationMetricsEBSVolumeStorageSpaceUtilizationMaximum", + "UtilizationMetricsNetworkReceiveThroughputMaximum", + "UtilizationMetricsNetworkTransmitThroughputMaximum", + "UtilizationMetricsEBSVolumeReadIOPSMaximum", + "UtilizationMetricsEBSVolumeWriteIOPSMaximum", + "UtilizationMetricsEBSVolumeReadThroughputMaximum", + "UtilizationMetricsEBSVolumeWriteThroughputMaximum", + "UtilizationMetricsDatabaseConnectionsMaximum", + "InstanceFinding", + "InstanceFindingReasonCodes", + "StorageFinding", + "StorageFindingReasonCodes", + "InstanceRecommendationOptionsDBInstanceClass", + "InstanceRecommendationOptionsRank", + "InstanceRecommendationOptionsPerformanceRisk", + "InstanceRecommendationOptionsProjectedUtilizationMetricsCpuMaximum", + "StorageRecommendationOptionsStorageType", + "StorageRecommendationOptionsAllocatedStorage", + "StorageRecommendationOptionsMaxAllocatedStorage", + "StorageRecommendationOptionsIOPS", + "StorageRecommendationOptionsStorageThroughput", + "StorageRecommendationOptionsRank", + "InstanceRecommendationOptionsInstanceOnDemandHourlyPrice", + "InstanceRecommendationOptionsSavingsOpportunityPercentage", + "InstanceRecommendationOptionsEstimatedMonthlySavingsCurrency", + "InstanceRecommendationOptionsEstimatedMonthlySavingsValue", + "InstanceRecommendationOptionsSavingsOpportunityAfterDiscountsPercentage", + "InstanceRecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts", + "InstanceRecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts", + "StorageRecommendationOptionsOnDemandMonthlyPrice", + "StorageRecommendationOptionsSavingsOpportunityPercentage", + "StorageRecommendationOptionsEstimatedMonthlySavingsCurrency", + "StorageRecommendationOptionsEstimatedMonthlySavingsValue", + "StorageRecommendationOptionsSavingsOpportunityAfterDiscountsPercentage", + "StorageRecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts", + "StorageRecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts", + "EffectiveRecommendationPreferencesCpuVendorArchitectures", + "EffectiveRecommendationPreferencesEnhancedInfrastructureMetrics", + "EffectiveRecommendationPreferencesLookBackPeriod", + "EffectiveRecommendationPreferencesSavingsEstimationMode", + "LastRefreshTimestamp", + "Tags" + ] + }, + "ExportableRDSDBFields":{ + "type":"list", + "member":{"shape":"ExportableRDSDBField"} + }, "ExportableVolumeField":{ "type":"string", "enum":[ @@ -1956,8 +2155,8 @@ "RecommendationOptionsSavingsOpportunityPercentage", "RecommendationOptionsEstimatedMonthlySavingsCurrency", "RecommendationOptionsEstimatedMonthlySavingsValue", - "RootVolume", "Tags", + "RootVolume", "CurrentConfigurationRootVolume", "EffectiveRecommendationPreferencesSavingsEstimationMode", "RecommendationOptionsSavingsOpportunityAfterDiscountsPercentage", @@ -2514,6 +2713,91 @@ } } }, + "GetRDSDatabaseRecommendationProjectedMetricsRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "stat", + "period", + "startTime", + "endTime" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN that identifies the Amazon RDS.

The following is the format of the ARN:

arn:aws:rds:{region}:{accountId}:db:{resourceName}

" + }, + "stat":{ + "shape":"MetricStatistic", + "documentation":"

The statistic of the projected metrics.

" + }, + "period":{ + "shape":"Period", + "documentation":"

The granularity, in seconds, of the projected metrics data points.

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp of the first projected metrics data point to return.

" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp of the last projected metrics data point to return.

" + }, + "recommendationPreferences":{"shape":"RecommendationPreferences"} + } + }, + "GetRDSDatabaseRecommendationProjectedMetricsResponse":{ + "type":"structure", + "members":{ + "recommendedOptionProjectedMetrics":{ + "shape":"RDSDatabaseRecommendedOptionProjectedMetrics", + "documentation":"

An array of objects that describes the projected metrics.

" + } + } + }, + "GetRDSDatabaseRecommendationsRequest":{ + "type":"structure", + "members":{ + "resourceArns":{ + "shape":"ResourceArns", + "documentation":"

The ARN that identifies the Amazon RDS.

The following is the format of the ARN:

arn:aws:rds:{region}:{accountId}:db:{resourceName}

The following is the format of a DB Cluster ARN:

arn:aws:rds:{region}:{accountId}:cluster:{resourceName}

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to advance to the next page of Amazon RDS recommendations.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of Amazon RDS recommendations to return with a single request.

To retrieve the remaining results, make another request with the returned nextToken value.

" + }, + "filters":{ + "shape":"RDSDBRecommendationFilters", + "documentation":"

An array of objects to specify a filter that returns a more specific list of Amazon RDS recommendations.

" + }, + "accountIds":{ + "shape":"AccountIds", + "documentation":"

Return the Amazon RDS recommendations to the specified Amazon Web Services account IDs.

If your account is the management account or the delegated administrator of an organization, use this parameter to return the Amazon RDS recommendations to specific member accounts.

You can only specify one account ID per request.

" + }, + "recommendationPreferences":{"shape":"RecommendationPreferences"} + } + }, + "GetRDSDatabaseRecommendationsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to advance to the next page of Amazon RDS recommendations.

" + }, + "rdsDBRecommendations":{ + "shape":"RDSDBRecommendations", + "documentation":"

An array of objects that describe the Amazon RDS recommendations.

" + }, + "errors":{ + "shape":"GetRecommendationErrors", + "documentation":"

An array of objects that describe errors of the request.

" + } + } + }, "GetRecommendationError":{ "type":"structure", "members":{ @@ -2542,7 +2826,7 @@ "members":{ "resourceType":{ "shape":"ResourceType", - "documentation":"

The target resource type of the recommendation preference for which to return preferences.

The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group.

The valid values for this parameter are Ec2Instance and AutoScalingGroup.

" + "documentation":"

The target resource type of the recommendation preference for which to return preferences.

The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group.

" }, "scope":{ "shape":"Scope", @@ -2633,6 +2917,13 @@ }, "High":{"type":"long"}, "Identifier":{"type":"string"}, + "Idle":{ + "type":"string", + "enum":[ + "True", + "False" + ] + }, "IncludeMemberAccounts":{"type":"boolean"}, "InferredWorkloadSaving":{ "type":"structure", @@ -2725,7 +3016,7 @@ }, "finding":{ "shape":"Finding", - "documentation":"

The finding classification of the instance.

Findings for instances include:

  • Underprovisioned —An instance is considered under-provisioned when at least one specification of your instance, such as CPU, memory, or network, does not meet the performance requirements of your workload. Under-provisioned instances may lead to poor application performance.

  • Overprovisioned —An instance is considered over-provisioned when at least one specification of your instance, such as CPU, memory, or network, can be sized down while still meeting the performance requirements of your workload, and no specification is under-provisioned. Over-provisioned instances may lead to unnecessary infrastructure cost.

  • Optimized —An instance is considered optimized when all specifications of your instance, such as CPU, memory, and network, meet the performance requirements of your workload and is not over provisioned. For optimized resources, Compute Optimizer might recommend a new generation instance type.

" + "documentation":"

The finding classification of the instance.

Findings for instances include:

  • Underprovisioned —An instance is considered under-provisioned when at least one specification of your instance, such as CPU, memory, or network, does not meet the performance requirements of your workload. Under-provisioned instances may lead to poor application performance.

  • Overprovisioned —An instance is considered over-provisioned when at least one specification of your instance, such as CPU, memory, or network, can be sized down while still meeting the performance requirements of your workload, and no specification is under-provisioned. Over-provisioned instances may lead to unnecessary infrastructure cost.

  • Optimized —An instance is considered optimized when all specifications of your instance, such as CPU, memory, and network, meet the performance requirements of your workload and is not over provisioned. For optimized resources, Compute Optimizer might recommend a new generation instance type.

The valid values in your API responses appear as OVER_PROVISIONED, UNDER_PROVISIONED, or OPTIMIZED.

" }, "findingReasonCodes":{ "shape":"InstanceRecommendationFindingReasonCodes", @@ -2822,6 +3113,10 @@ "shape":"InstanceType", "documentation":"

The instance type of the instance recommendation.

" }, + "instanceGpuInfo":{ + "shape":"GpuInfo", + "documentation":"

Describes the GPU accelerator settings for the recommended instance type.

" + }, "projectedUtilizationMetrics":{ "shape":"ProjectedUtilizationMetrics", "documentation":"

An array of objects that describe the projected utilization metrics of the instance recommendation option.

The Cpu and Memory metrics are the only projected utilization metrics returned. Additionally, the Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

" @@ -2842,17 +3137,13 @@ "shape":"SavingsOpportunity", "documentation":"

An object that describes the savings opportunity for the instance recommendation option. Savings opportunity includes the estimated monthly savings amount and percentage.

" }, - "migrationEffort":{ - "shape":"MigrationEffort", - "documentation":"

The level of effort required to migrate from the current instance type to the recommended instance type.

For example, the migration effort is Low if Amazon EMR is the inferred workload type and an Amazon Web Services Graviton instance type is recommended. The migration effort is Medium if a workload type couldn't be inferred but an Amazon Web Services Graviton instance type is recommended. The migration effort is VeryLow if both the current and recommended instance types are of the same CPU architecture.

" - }, - "instanceGpuInfo":{ - "shape":"GpuInfo", - "documentation":"

Describes the GPU accelerator settings for the recommended instance type.

" - }, "savingsOpportunityAfterDiscounts":{ "shape":"InstanceSavingsOpportunityAfterDiscounts", "documentation":"

An object that describes the savings opportunity for the instance recommendation option that includes Savings Plans and Reserved Instances discounts. Savings opportunity includes the estimated monthly savings and percentage.

" + }, + "migrationEffort":{ + "shape":"MigrationEffort", + "documentation":"

The level of effort required to migrate from the current instance type to the recommended instance type.

For example, the migration effort is Low if Amazon EMR is the inferred workload type and an Amazon Web Services Graviton instance type is recommended. The migration effort is Medium if a workload type couldn't be inferred but an Amazon Web Services Graviton instance type is recommended. The migration effort is VeryLow if both the current and recommended instance types are of the same CPU architecture.

" } }, "documentation":"

Describes a recommendation option for an Amazon EC2 instance.

" @@ -3115,13 +3406,13 @@ "shape":"CurrentPerformanceRisk", "documentation":"

The risk of the current Lambda function not meeting the performance needs of its workloads. The higher the risk, the more likely the current Lambda function requires more memory.

" }, - "tags":{ - "shape":"Tags", - "documentation":"

A list of tags assigned to your Lambda function recommendations.

" - }, "effectiveRecommendationPreferences":{ "shape":"LambdaEffectiveRecommendationPreferences", "documentation":"

Describes the effective recommendation preferences for Lambda functions.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

A list of tags assigned to your Lambda function recommendations.

" } }, "documentation":"

Describes an Lambda function recommendation.

" @@ -3534,8 +3825,11 @@ }, "NextToken":{"type":"string"}, "NullableCpu":{"type":"integer"}, + "NullableIOPS":{"type":"integer"}, + "NullableMaxAllocatedStorage":{"type":"integer"}, "NullableMemory":{"type":"integer"}, "NullableMemoryReservation":{"type":"integer"}, + "NullableStorageThroughput":{"type":"integer"}, "NumberOfCores":{"type":"integer"}, "NumberOfInvocations":{"type":"long"}, "NumberOfMemberAccountsOptedIn":{"type":"integer"}, @@ -3633,7 +3927,7 @@ "members":{ "resourceType":{ "shape":"ResourceType", - "documentation":"

The target resource type of the recommendation preference to create.

The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group.

The valid values for this parameter are Ec2Instance and AutoScalingGroup.

" + "documentation":"

The target resource type of the recommendation preference to create.

The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group.

" }, "scope":{ "shape":"Scope", @@ -3653,7 +3947,7 @@ }, "lookBackPeriod":{ "shape":"LookBackPeriodPreference", - "documentation":"

The preference to control the number of days the utilization metrics of the Amazon Web Services resource are analyzed. When this preference isn't specified, we use the default value DAYS_14.

You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types.

" + "documentation":"

The preference to control the number of days the utilization metrics of the Amazon Web Services resource are analyzed. When this preference isn't specified, we use the default value DAYS_14.

You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types.

  • Amazon EC2 instance lookback preferences can be set at the organization, account, and resource levels.

  • Auto Scaling group lookback preferences can only be set at the resource level.

" }, "utilizationPreferences":{ "shape":"UtilizationPreferences", @@ -3674,6 +3968,407 @@ "members":{ } }, + "RDSDBInstanceRecommendationOption":{ + "type":"structure", + "members":{ + "dbInstanceClass":{ + "shape":"DBInstanceClass", + "documentation":"

Describes the DB instance class recommendation option for your Amazon RDS instance.

" + }, + "projectedUtilizationMetrics":{ + "shape":"RDSDBProjectedUtilizationMetrics", + "documentation":"

An array of objects that describe the projected utilization metrics of the RDS instance recommendation option.

" + }, + "performanceRisk":{ + "shape":"PerformanceRisk", + "documentation":"

The performance risk of the RDS instance recommendation option.

" + }, + "rank":{ + "shape":"Rank", + "documentation":"

The rank identifier of the RDS instance recommendation option.

" + }, + "savingsOpportunity":{"shape":"SavingsOpportunity"}, + "savingsOpportunityAfterDiscounts":{ + "shape":"RDSInstanceSavingsOpportunityAfterDiscounts", + "documentation":"

Describes the savings opportunity for Amazon RDS recommendations or for the recommendation option.

Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

" + } + }, + "documentation":"

Describes the recommendation options for an Amazon RDS instance.

" + }, + "RDSDBInstanceRecommendationOptions":{ + "type":"list", + "member":{"shape":"RDSDBInstanceRecommendationOption"} + }, + "RDSDBMetricName":{ + "type":"string", + "enum":[ + "CPU", + "Memory", + "EBSVolumeStorageSpaceUtilization", + "NetworkReceiveThroughput", + "NetworkTransmitThroughput", + "EBSVolumeReadIOPS", + "EBSVolumeWriteIOPS", + "EBSVolumeReadThroughput", + "EBSVolumeWriteThroughput", + "DatabaseConnections" + ] + }, + "RDSDBMetricStatistic":{ + "type":"string", + "enum":[ + "Maximum", + "Minimum", + "Average" + ] + }, + "RDSDBProjectedUtilizationMetrics":{ + "type":"list", + "member":{"shape":"RDSDBUtilizationMetric"} + }, + "RDSDBRecommendation":{ + "type":"structure", + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the current Amazon RDS.

The following is the format of the ARN:

arn:aws:rds:{region}:{accountId}:db:{resourceName}

" + }, + "accountId":{ + "shape":"AccountId", + "documentation":"

The Amazon Web Services account ID of the Amazon RDS.

" + }, + "engine":{ + "shape":"Engine", + "documentation":"

The engine of the RDS instance.

" + }, + "engineVersion":{ + "shape":"EngineVersion", + "documentation":"

The database engine version.

" + }, + "currentDBInstanceClass":{ + "shape":"CurrentDBInstanceClass", + "documentation":"

The DB instance class of the current RDS instance.

" + }, + "currentStorageConfiguration":{ + "shape":"DBStorageConfiguration", + "documentation":"

The configuration of the current RDS storage.

" + }, + "idle":{ + "shape":"Idle", + "documentation":"

This indicates if the RDS instance is idle or not.

" + }, + "instanceFinding":{ + "shape":"RDSInstanceFinding", + "documentation":"

The finding classification of an Amazon RDS instance.

Findings for Amazon RDS instance include:

  • Underprovisioned — When Compute Optimizer detects that there’s not enough resource specifications, an Amazon RDS is considered under-provisioned.

  • Overprovisioned — When Compute Optimizer detects that there’s excessive resource specifications, an Amazon RDS is considered over-provisioned.

  • Optimized — When the specifications of your Amazon RDS instance meet the performance requirements of your workload, the service is considered optimized.

" + }, + "storageFinding":{ + "shape":"RDSStorageFinding", + "documentation":"

The finding classification of Amazon RDS storage.

Findings for Amazon RDS instance include:

  • Underprovisioned — When Compute Optimizer detects that there’s not enough storage, an Amazon RDS is considered under-provisioned.

  • Overprovisioned — When Compute Optimizer detects that there’s excessive storage, an Amazon RDS is considered over-provisioned.

  • Optimized — When the storage of your Amazon RDS meet the performance requirements of your workload, the service is considered optimized.

" + }, + "instanceFindingReasonCodes":{ + "shape":"RDSInstanceFindingReasonCodes", + "documentation":"

The reason for the finding classification of an Amazon RDS instance.

" + }, + "storageFindingReasonCodes":{ + "shape":"RDSStorageFindingReasonCodes", + "documentation":"

The reason for the finding classification of Amazon RDS storage.

" + }, + "instanceRecommendationOptions":{ + "shape":"RDSDBInstanceRecommendationOptions", + "documentation":"

An array of objects that describe the recommendation options for the Amazon RDS instance.

" + }, + "storageRecommendationOptions":{ + "shape":"RDSDBStorageRecommendationOptions", + "documentation":"

An array of objects that describe the recommendation options for Amazon RDS storage.

" + }, + "utilizationMetrics":{ + "shape":"RDSDBUtilizationMetrics", + "documentation":"

An array of objects that describe the utilization metrics of the Amazon RDS.

" + }, + "effectiveRecommendationPreferences":{ + "shape":"RDSEffectiveRecommendationPreferences", + "documentation":"

Describes the effective recommendation preferences for Amazon RDS.

" + }, + "lookbackPeriodInDays":{ + "shape":"LookBackPeriodInDays", + "documentation":"

The number of days the Amazon RDS utilization metrics were analyzed.

" + }, + "lastRefreshTimestamp":{ + "shape":"LastRefreshTimestamp", + "documentation":"

The timestamp of when the Amazon RDS recommendation was last generated.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

A list of tags assigned to your Amazon RDS recommendations.

" + } + }, + "documentation":"

Describes an Amazon RDS recommendation.

" + }, + "RDSDBRecommendationFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"RDSDBRecommendationFilterName", + "documentation":"

The name of the filter.

Specify Finding to return recommendations with a specific finding classification.

You can filter your Amazon RDS recommendations by tag:key and tag-key tags.

A tag:key is a key and value combination of a tag assigned to your Amazon RDS recommendations. Use the tag key in the filter name and the tag value as the filter value. For example, to find all Amazon RDS service recommendations that have a tag with the key of Owner and the value of TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

A tag-key is the key of a tag assigned to your Amazon RDS recommendations. Use this filter to find all of your Amazon RDS recommendations that have a tag with a specific key. This doesn’t consider the tag value. For example, you can find your Amazon RDS service recommendations with a tag key value of Owner or without any tag keys assigned.

" + }, + "values":{ + "shape":"FilterValues", + "documentation":"

The value of the filter.

" + } + }, + "documentation":"

Describes a filter that returns a more specific list of Amazon RDS recommendations. Use this filter with the GetECSServiceRecommendations action.

" + }, + "RDSDBRecommendationFilterName":{ + "type":"string", + "enum":[ + "InstanceFinding", + "InstanceFindingReasonCode", + "StorageFinding", + "StorageFindingReasonCode", + "Idle" + ] + }, + "RDSDBRecommendationFilters":{ + "type":"list", + "member":{"shape":"RDSDBRecommendationFilter"} + }, + "RDSDBRecommendations":{ + "type":"list", + "member":{"shape":"RDSDBRecommendation"} + }, + "RDSDBStorageRecommendationOption":{ + "type":"structure", + "members":{ + "storageConfiguration":{ + "shape":"DBStorageConfiguration", + "documentation":"

The recommended storage configuration.

" + }, + "rank":{ + "shape":"Rank", + "documentation":"

The rank identifier of the RDS storage recommendation option.

" + }, + "savingsOpportunity":{"shape":"SavingsOpportunity"}, + "savingsOpportunityAfterDiscounts":{ + "shape":"RDSStorageSavingsOpportunityAfterDiscounts", + "documentation":"

Describes the savings opportunity for Amazon RDS storage recommendations or for the recommendation option.

Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

" + } + }, + "documentation":"

Describes the recommendation options for Amazon RDS storage.

" + }, + "RDSDBStorageRecommendationOptions":{ + "type":"list", + "member":{"shape":"RDSDBStorageRecommendationOption"} + }, + "RDSDBUtilizationMetric":{ + "type":"structure", + "members":{ + "name":{ + "shape":"RDSDBMetricName", + "documentation":"

The name of the utilization metric.

" + }, + "statistic":{ + "shape":"RDSDBMetricStatistic", + "documentation":"

The statistic of the utilization metric.

The Compute Optimizer API, Command Line Interface (CLI), and SDKs return utilization metrics using only the Maximum statistic, which is the highest value observed during the specified period.

The Compute Optimizer console displays graphs for some utilization metrics using the Average statistic, which is the value of Sum / SampleCount during the specified period. For more information, see Viewing resource recommendations in the Compute Optimizer User Guide. You can also get averaged utilization metric data for your resources using Amazon CloudWatch. For more information, see the Amazon CloudWatch User Guide.

" + }, + "value":{ + "shape":"MetricValue", + "documentation":"

The value of the utilization metric.

" + } + }, + "documentation":"

Describes the utilization metric of an Amazon RDS.

To determine the performance difference between your current Amazon RDS and the recommended option, compare the utilization metric data of your service against its projected utilization metric data.

" + }, + "RDSDBUtilizationMetrics":{ + "type":"list", + "member":{"shape":"RDSDBUtilizationMetric"} + }, + "RDSDatabaseProjectedMetric":{ + "type":"structure", + "members":{ + "name":{ + "shape":"RDSDBMetricName", + "documentation":"

The name of the projected metric.

" + }, + "timestamps":{ + "shape":"Timestamps", + "documentation":"

The timestamps of the projected metric.

" + }, + "values":{ + "shape":"MetricValues", + "documentation":"

The values for the projected metric.

" + } + }, + "documentation":"

Describes the projected metrics of an Amazon RDS recommendation option.

To determine the performance difference between your current Amazon RDS and the recommended option, compare the metric data of your service against its projected metric data.

" + }, + "RDSDatabaseProjectedMetrics":{ + "type":"list", + "member":{"shape":"RDSDatabaseProjectedMetric"} + }, + "RDSDatabaseRecommendedOptionProjectedMetric":{ + "type":"structure", + "members":{ + "recommendedDBInstanceClass":{ + "shape":"RecommendedDBInstanceClass", + "documentation":"

The recommended DB instance class for the Amazon RDS.

" + }, + "rank":{ + "shape":"Rank", + "documentation":"

The rank identifier of the RDS instance recommendation option.

" + }, + "projectedMetrics":{ + "shape":"RDSDatabaseProjectedMetrics", + "documentation":"

An array of objects that describe the projected metric.

" + } + }, + "documentation":"

Describes the projected metrics of an Amazon RDS recommendation option.

To determine the performance difference between your current Amazon RDS and the recommended option, compare the metric data of your service against its projected metric data.

" + }, + "RDSDatabaseRecommendedOptionProjectedMetrics":{ + "type":"list", + "member":{"shape":"RDSDatabaseRecommendedOptionProjectedMetric"} + }, + "RDSEffectiveRecommendationPreferences":{ + "type":"structure", + "members":{ + "cpuVendorArchitectures":{ + "shape":"CpuVendorArchitectures", + "documentation":"

Describes the CPU vendor and architecture for Amazon RDS recommendations.

" + }, + "enhancedInfrastructureMetrics":{ + "shape":"EnhancedInfrastructureMetrics", + "documentation":"

Describes the activation status of the enhanced infrastructure metrics preference.

A status of Active confirms that the preference is applied in the latest recommendation refresh, and a status of Inactive confirms that it's not yet applied to recommendations.

For more information, see Enhanced infrastructure metrics in the Compute Optimizer User Guide.

" + }, + "lookBackPeriod":{ + "shape":"LookBackPeriodPreference", + "documentation":"

The number of days the utilization metrics of the Amazon RDS are analyzed.

" + }, + "savingsEstimationMode":{ + "shape":"RDSSavingsEstimationMode", + "documentation":"

Describes the savings estimation mode preference applied for calculating savings opportunity for Amazon RDS.

" + } + }, + "documentation":"

Describes the effective recommendation preferences for Amazon RDS.

" + }, + "RDSInstanceEstimatedMonthlySavings":{ + "type":"structure", + "members":{ + "currency":{ + "shape":"Currency", + "documentation":"

The currency of the estimated monthly savings.

" + }, + "value":{ + "shape":"Value", + "documentation":"

The value of the estimated monthly savings for Amazon RDS instances.

" + } + }, + "documentation":"

Describes the estimated monthly savings possible for Amazon RDS instances by adopting Compute Optimizer recommendations. This is based on Amazon RDS pricing after applying Savings Plans discounts.

" + }, + "RDSInstanceFinding":{ + "type":"string", + "enum":[ + "Optimized", + "Underprovisioned", + "Overprovisioned" + ] + }, + "RDSInstanceFindingReasonCode":{ + "type":"string", + "enum":[ + "CPUOverprovisioned", + "NetworkBandwidthOverprovisioned", + "EBSIOPSOverprovisioned", + "EBSThroughputOverprovisioned", + "CPUUnderprovisioned", + "NetworkBandwidthUnderprovisioned", + "EBSThroughputUnderprovisioned", + "NewGenerationDBInstanceClassAvailable", + "NewEngineVersionAvailable" + ] + }, + "RDSInstanceFindingReasonCodes":{ + "type":"list", + "member":{"shape":"RDSInstanceFindingReasonCode"} + }, + "RDSInstanceSavingsOpportunityAfterDiscounts":{ + "type":"structure", + "members":{ + "savingsOpportunityPercentage":{ + "shape":"SavingsOpportunityPercentage", + "documentation":"

The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s Amazon RDS instance recommendations. This includes any applicable Savings Plans discounts.

" + }, + "estimatedMonthlySavings":{ + "shape":"RDSInstanceEstimatedMonthlySavings", + "documentation":"

The estimated monthly savings possible by adopting Compute Optimizer’s Amazon RDS instance recommendations. This includes any applicable Savings Plans discounts.

" + } + }, + "documentation":"

Describes the savings opportunity for Amazon RDS instance recommendations after applying Savings Plans discounts.

Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

" + }, + "RDSSavingsEstimationMode":{ + "type":"structure", + "members":{ + "source":{ + "shape":"RDSSavingsEstimationModeSource", + "documentation":"

Describes the source for calculating the savings opportunity for Amazon RDS.

" + } + }, + "documentation":"

Describes the savings estimation mode used for calculating savings opportunity for Amazon RDS.

" + }, + "RDSSavingsEstimationModeSource":{ + "type":"string", + "enum":[ + "PublicPricing", + "CostExplorerRightsizing", + "CostOptimizationHub" + ] + }, + "RDSStorageEstimatedMonthlySavings":{ + "type":"structure", + "members":{ + "currency":{ + "shape":"Currency", + "documentation":"

The currency of the estimated monthly savings.

" + }, + "value":{ + "shape":"Value", + "documentation":"

The value of the estimated monthly savings for Amazon RDS storage.

" + } + }, + "documentation":"

Describes the estimated monthly savings possible for Amazon RDS storage by adopting Compute Optimizer recommendations. This is based on Amazon RDS pricing after applying Savings Plans discounts.

" + }, + "RDSStorageFinding":{ + "type":"string", + "enum":[ + "Optimized", + "Underprovisioned", + "Overprovisioned" + ] + }, + "RDSStorageFindingReasonCode":{ + "type":"string", + "enum":[ + "EBSVolumeAllocatedStorageUnderprovisioned", + "EBSVolumeThroughputUnderprovisioned", + "EBSVolumeIOPSOverprovisioned", + "EBSVolumeThroughputOverprovisioned", + "NewGenerationStorageTypeAvailable" + ] + }, + "RDSStorageFindingReasonCodes":{ + "type":"list", + "member":{"shape":"RDSStorageFindingReasonCode"} + }, + "RDSStorageSavingsOpportunityAfterDiscounts":{ + "type":"structure", + "members":{ + "savingsOpportunityPercentage":{ + "shape":"SavingsOpportunityPercentage", + "documentation":"

The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s Amazon RDS storage recommendations. This includes any applicable Savings Plans discounts.

" + }, + "estimatedMonthlySavings":{ + "shape":"RDSStorageEstimatedMonthlySavings", + "documentation":"

The estimated monthly savings possible by adopting Compute Optimizer’s Amazon RDS storage recommendations. This includes any applicable Savings Plans discounts.

" + } + }, + "documentation":"

Describes the savings opportunity for Amazon RDS storage recommendations after applying Savings Plans discounts.

Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

" + }, "Rank":{"type":"integer"}, "ReasonCodeSummaries":{ "type":"list", @@ -3755,10 +4450,10 @@ "members":{ "cpuVendorArchitectures":{ "shape":"CpuVendorArchitectures", - "documentation":"

Specifies the CPU vendor and architecture for Amazon EC2 instance and Auto Scaling group recommendations.

For example, when you specify AWS_ARM64 with:

" + "documentation":"

Specifies the CPU vendor and architecture for Amazon EC2 instance and Auto Scaling group recommendations.

For example, when you specify AWS_ARM64 with:

" } }, - "documentation":"

Describes the recommendation preferences to return in the response of a GetAutoScalingGroupRecommendations, GetEC2InstanceRecommendations, and GetEC2RecommendationProjectedMetrics request.

" + "documentation":"

Describes the recommendation preferences to return in the response of a GetAutoScalingGroupRecommendations, GetEC2InstanceRecommendations, GetEC2RecommendationProjectedMetrics, GetRDSDatabaseRecommendations, and GetRDSDatabaseRecommendationProjectedMetrics request.

" }, "RecommendationPreferencesDetail":{ "type":"structure", @@ -3829,7 +4524,9 @@ "EbsVolume", "LambdaFunction", "EcsService", - "License" + "License", + "RdsDBInstance", + "RdsDBInstanceStorage" ] }, "RecommendationSources":{ @@ -3870,6 +4567,7 @@ }, "documentation":"

A summary of a recommendation.

" }, + "RecommendedDBInstanceClass":{"type":"string"}, "RecommendedInstanceType":{"type":"string"}, "RecommendedOptionProjectedMetric":{ "type":"structure", @@ -3916,7 +4614,8 @@ "LambdaFunction", "NotApplicable", "EcsService", - "License" + "License", + "RdsDBInstance" ] }, "RootVolume":{"type":"boolean"}, @@ -4047,6 +4746,7 @@ ] }, "StatusReason":{"type":"string"}, + "StorageType":{"type":"string"}, "Summaries":{ "type":"list", "member":{"shape":"Summary"} @@ -4168,7 +4868,7 @@ "documentation":"

The parameters to set when customizing the resource utilization thresholds.

" } }, - "documentation":"

The preference to control the resource’s CPU utilization thresholds - threshold and headroom.

This preference is only available for the Amazon EC2 instance resource type.

" + "documentation":"

The preference to control the resource’s CPU utilization threshold, CPU utilization headroom, and memory utilization headroom.

This preference is only available for the Amazon EC2 instance resource type.

" }, "UtilizationPreferences":{ "type":"list", @@ -4258,13 +4958,13 @@ "shape":"CurrentPerformanceRisk", "documentation":"

The risk of the current EBS volume not meeting the performance needs of its workloads. The higher the risk, the more likely the current EBS volume doesn't have sufficient capacity.

" }, - "tags":{ - "shape":"Tags", - "documentation":"

A list of tags assigned to your Amazon EBS volume recommendations.

" - }, "effectiveRecommendationPreferences":{ "shape":"EBSEffectiveRecommendationPreferences", "documentation":"

Describes the effective recommendation preferences for Amazon EBS volume.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

A list of tags assigned to your Amazon EBS volume recommendations.

" } }, "documentation":"

Describes an Amazon Elastic Block Store (Amazon EBS) volume recommendation.

" diff --git a/botocore/data/config/2014-11-12/service-2.json b/botocore/data/config/2014-11-12/service-2.json index 4b17ca2c8e..0f20eea266 100644 --- a/botocore/data/config/2014-11-12/service-2.json +++ b/botocore/data/config/2014-11-12/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"config", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Config Service", "serviceFullName":"AWS Config", "serviceId":"Config Service", "signatureVersion":"v4", "targetPrefix":"StarlingDoveService", - "uid":"config-2014-11-12" + "uid":"config-2014-11-12", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchGetAggregateResourceConfig":{ @@ -5660,7 +5662,7 @@ "type":"string", "max":64, "min":1, - "pattern":".*\\S.*" + "pattern":"[A-Za-z0-9-_]+" }, "OrganizationConfigRuleNames":{ "type":"list", diff --git a/botocore/data/connect-contact-lens/2020-08-21/endpoint-rule-set-1.json b/botocore/data/connect-contact-lens/2020-08-21/endpoint-rule-set-1.json index 6abbb71bc8..2768477374 100644 --- a/botocore/data/connect-contact-lens/2020-08-21/endpoint-rule-set-1.json +++ b/botocore/data/connect-contact-lens/2020-08-21/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/connect-contact-lens/2020-08-21/service-2.json b/botocore/data/connect-contact-lens/2020-08-21/service-2.json index 08a0c4e78b..335298885f 100644 --- a/botocore/data/connect-contact-lens/2020-08-21/service-2.json +++ b/botocore/data/connect-contact-lens/2020-08-21/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"contact-lens", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"Amazon Connect Contact Lens", "serviceFullName":"Amazon Connect Contact Lens", "serviceId":"Connect Contact Lens", "signatureVersion":"v4", "signingName":"connect", - "uid":"connect-contact-lens-2020-08-21" + "uid":"connect-contact-lens-2020-08-21", + "auth":["aws.auth#sigv4"] }, "operations":{ "ListRealtimeContactAnalysisSegments":{ @@ -164,7 +166,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximimum number of results to return per page.

" + "documentation":"

The maximum number of results to return per page.

" }, "NextToken":{ "shape":"NextToken", @@ -251,6 +253,47 @@ "max":20, "min":0 }, + "PostContactSummary":{ + "type":"structure", + "required":["Status"], + "members":{ + "Content":{ + "shape":"PostContactSummaryContent", + "documentation":"

The content of the summary.

" + }, + "Status":{ + "shape":"PostContactSummaryStatus", + "documentation":"

Whether the summary was successfully COMPLETED or FAILED to be generated.

" + }, + "FailureCode":{ + "shape":"PostContactSummaryFailureCode", + "documentation":"

If the summary failed to be generated, one of the following failure codes occurs:

  • QUOTA_EXCEEDED: The number of concurrent analytics jobs reached your service quota.

  • INSUFFICIENT_CONVERSATION_CONTENT: The conversation needs to have at least one turn from both the participants in order to generate the summary.

  • FAILED_SAFETY_GUIDELINES: The generated summary cannot be provided because it failed to meet system safety guidelines.

  • INVALID_ANALYSIS_CONFIGURATION: This code occurs when, for example, you're using a language that isn't supported by generative AI-powered post-contact summaries.

  • INTERNAL_ERROR: Internal system error.

" + } + }, + "documentation":"

Information about the post-contact summary.

" + }, + "PostContactSummaryContent":{ + "type":"string", + "max":1762, + "min":1 + }, + "PostContactSummaryFailureCode":{ + "type":"string", + "enum":[ + "QUOTA_EXCEEDED", + "INSUFFICIENT_CONVERSATION_CONTENT", + "FAILED_SAFETY_GUIDELINES", + "INVALID_ANALYSIS_CONFIGURATION", + "INTERNAL_ERROR" + ] + }, + "PostContactSummaryStatus":{ + "type":"string", + "enum":[ + "FAILED", + "COMPLETED" + ] + }, "RealtimeContactAnalysisSegment":{ "type":"structure", "members":{ @@ -261,6 +304,10 @@ "Categories":{ "shape":"Categories", "documentation":"

The matched category rules.

" + }, + "PostContactSummary":{ + "shape":"PostContactSummary", + "documentation":"

Information about the post-contact summary.

" } }, "documentation":"

An analyzed segment for a real-time analysis session.

" @@ -316,7 +363,7 @@ }, "ParticipantId":{ "shape":"ParticipantId", - "documentation":"

The identifier of the participant.

" + "documentation":"

The identifier of the participant. Valid values are CUSTOMER or AGENT.

" }, "ParticipantRole":{ "shape":"ParticipantRole", @@ -336,7 +383,7 @@ }, "Sentiment":{ "shape":"SentimentValue", - "documentation":"

The sentiment of the detected for this piece of transcript.

" + "documentation":"

The sentiment detected for this piece of transcript.

" }, "IssuesDetected":{ "shape":"IssuesDetected", @@ -357,5 +404,5 @@ "pattern":".*\\S.*" } }, - "documentation":"

Contact Lens for Amazon Connect enables you to analyze conversations between customer and agents, by using speech transcription, natural language processing, and intelligent search capabilities. It performs sentiment analysis, detects issues, and enables you to automatically categorize contacts.

Contact Lens for Amazon Connect provides both real-time and post-call analytics of customer-agent conversations. For more information, see Analyze conversations using Contact Lens in the Amazon Connect Administrator Guide.

" + "documentation":"

Amazon Connect Contact Lens enables you to analyze conversations between customer and agents, by using speech transcription, natural language processing, and intelligent search capabilities. It performs sentiment analysis, detects issues, and enables you to automatically categorize contacts.

Amazon Connect Contact Lens provides both real-time and post-call analytics of customer-agent conversations. For more information, see Analyze conversations using speech analytics in the Amazon Connect Administrator Guide.

" } diff --git a/botocore/data/connect/2017-08-08/paginators-1.json b/botocore/data/connect/2017-08-08/paginators-1.json index e225ddc182..4358a0eeec 100644 --- a/botocore/data/connect/2017-08-08/paginators-1.json +++ b/botocore/data/connect/2017-08-08/paginators-1.json @@ -386,6 +386,30 @@ ], "output_token": "NextToken", "result_key": "ContactFlows" + }, + "ListAuthenticationProfiles": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AuthenticationProfileSummaryList" + }, + "SearchAgentStatuses": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ApproximateTotalCount" + ], + "output_token": "NextToken", + "result_key": "AgentStatuses" + }, + "SearchUserHierarchyGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ApproximateTotalCount" + ], + "output_token": "NextToken", + "result_key": "UserHierarchyGroups" } } } diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index 4d64cd21be..41a6491d23 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -11,7 +11,8 @@ "serviceId":"Connect", "signatureVersion":"v4", "signingName":"connect", - "uid":"connect-2017-08-08" + "uid":"connect-2017-08-08", + "auth":["aws.auth#sigv4"] }, "operations":{ "ActivateEvaluationForm":{ @@ -380,7 +381,7 @@ {"shape":"IdempotencyException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Claims an available phone number to your Amazon Connect instance or traffic distribution group. You can call this API only in the same Amazon Web Services Region where the Amazon Connect instance or traffic distribution group was created.

For more information about how to use this operation, see Claim a phone number in your country and Claim phone numbers to traffic distribution groups in the Amazon Connect Administrator Guide.

You can call the SearchAvailablePhoneNumbers API for available phone numbers that you can claim. Call the DescribePhoneNumber API to verify the status of a previous ClaimPhoneNumber operation.

If you plan to claim and release numbers frequently during a 30 day period, contact us for a service quota exception. Otherwise, it is possible you will be blocked from claiming and releasing any more numbers until 30 days past the oldest number released has expired.

By default you can claim and release up to 200% of your maximum number of active phone numbers during any 30 day period. If you claim and release phone numbers using the UI or API during a rolling 30 day cycle that exceeds 200% of your phone number service level quota, you will be blocked from claiming any more numbers until 30 days past the oldest number released has expired.

For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 30 day period you release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers until you open an Amazon Web Services support ticket.

" + "documentation":"

Claims an available phone number to your Amazon Connect instance or traffic distribution group. You can call this API only in the same Amazon Web Services Region where the Amazon Connect instance or traffic distribution group was created.

For more information about how to use this operation, see Claim a phone number in your country and Claim phone numbers to traffic distribution groups in the Amazon Connect Administrator Guide.

You can call the SearchAvailablePhoneNumbers API for available phone numbers that you can claim. Call the DescribePhoneNumber API to verify the status of a previous ClaimPhoneNumber operation.

If you plan to claim and release numbers frequently, contact us for a service quota exception. Otherwise, it is possible you will be blocked from claiming and releasing any more numbers until up to 180 days past the oldest number released has expired.

By default you can claim and release up to 200% of your maximum number of active phone numbers. If you claim and release phone numbers using the UI or API during a rolling 180 day cycle that exceeds 200% of your phone number service level quota, you will be blocked from claiming any more numbers until 180 days past the oldest number released has expired.

For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 180 day period you release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers until you open an Amazon Web Services support ticket.

" }, "CompleteAttachedFileUpload":{ "name":"CompleteAttachedFileUpload", @@ -513,7 +514,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Initiates an Amazon Connect instance with all the supported channels enabled. It does not attach any storage, such as Amazon Simple Storage Service (Amazon S3) or Amazon Kinesis. It also does not allow for any configurations on features, such as Contact Lens for Amazon Connect.

Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. If you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. You must wait 30 days before you can restart creating and deleting instances in your account.

" + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Initiates an Amazon Connect instance with all the supported channels enabled. It does not attach any storage, such as Amazon Simple Storage Service (Amazon S3) or Amazon Kinesis. It also does not allow for any configurations on features, such as Contact Lens for Amazon Connect.

For more information, see Create an Amazon Connect instance in the Amazon Connect Administrator Guide.

Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. If you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. You must wait 30 days before you can restart creating and deleting instances in your account.

" }, "CreateIntegrationAssociation":{ "name":"CreateIntegrationAssociation", @@ -583,7 +584,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates a new predefined attribute for the specified Amazon Connect instance.

" + "documentation":"

Creates a new predefined attribute for the specified Amazon Connect instance. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents.

" }, "CreatePrompt":{ "name":"CreatePrompt", @@ -601,7 +602,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates a prompt. For more information about prompts, such as supported file types and maximum length, see Create prompts in the Amazon Connect Administrator's Guide.

" + "documentation":"

Creates a prompt. For more information about prompts, such as supported file types and maximum length, see Create prompts in the Amazon Connect Administrator Guide.

" }, "CreateQueue":{ "name":"CreateQueue", @@ -696,7 +697,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates a security profile.

" + "documentation":"

Creates a security profile.

For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions.

" }, "CreateTaskTemplate":{ "name":"CreateTaskTemplate", @@ -985,7 +986,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Deletes the Amazon Connect instance.

Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. If you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. You must wait 30 days before you can restart creating and deleting instances in your account.

" + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Deletes the Amazon Connect instance. For more information, see Delete your Amazon Connect instance in the Amazon Connect Administrator Guide.

Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. If you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. You must wait 30 days before you can restart creating and deleting instances in your account.

" }, "DeleteIntegrationAssociation":{ "name":"DeleteIntegrationAssociation", @@ -1051,7 +1052,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Deletes a queue.

" + "documentation":"

Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin website.

" }, "DeleteQuickConnect":{ "name":"DeleteQuickConnect", @@ -1275,6 +1276,23 @@ ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Describes an agent status.

" }, + "DescribeAuthenticationProfile":{ + "name":"DescribeAuthenticationProfile", + "http":{ + "method":"GET", + "requestUri":"/authentication-profiles/{InstanceId}/{AuthenticationProfileId}" + }, + "input":{"shape":"DescribeAuthenticationProfileRequest"}, + "output":{"shape":"DescribeAuthenticationProfileResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change. To request access to this API, contact Amazon Web Services Support.

Describes the target authentication profile.

" + }, "DescribeContact":{ "name":"DescribeContact", "http":{ @@ -1458,7 +1476,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Describes a predefined attribute for the specified Amazon Connect instance.

" + "documentation":"

Describes a predefined attribute for the specified Amazon Connect instance. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents.

" }, "DescribePrompt":{ "name":"DescribePrompt", @@ -1560,7 +1578,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Gets basic information about the security profle.

" + "documentation":"

Gets basic information about the security profile.

For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions.

" }, "DescribeTrafficDistributionGroup":{ "name":"DescribeTrafficDistributionGroup", @@ -2029,7 +2047,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets metric data from the specified Amazon Connect instance.

GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals.

For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator's Guide.

" + "documentation":"

Gets metric data from the specified Amazon Connect instance.

GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals.

For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator Guide.

" }, "GetPromptFile":{ "name":"GetPromptFile", @@ -2098,7 +2116,7 @@ {"shape":"IdempotencyException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Imports a claimed phone number from an external service, such as Amazon Pinpoint, into an Amazon Connect instance. You can call this API only in the same Amazon Web Services Region where the Amazon Connect instance was created.

" + "documentation":"

Imports a claimed phone number from an external service, such as Amazon Pinpoint, into an Amazon Connect instance. You can call this API only in the same Amazon Web Services Region where the Amazon Connect instance was created.

Call the DescribePhoneNumber API to verify the status of a previous ImportPhoneNumber operation.

If you plan to claim or import numbers and then release numbers frequently, contact us for a service quota exception. Otherwise, it is possible you will be blocked from claiming and releasing any more numbers until up to 180 days past the oldest number released has expired.

By default you can claim or import and then release up to 200% of your maximum number of active phone numbers. If you claim or import and then release phone numbers using the UI or API during a rolling 180 day cycle that exceeds 200% of your phone number service level quota, you will be blocked from claiming or importing any more numbers until 180 days past the oldest number released has expired.

For example, if you already have 99 claimed or imported numbers and a service level quota of 99 phone numbers, and in any 180 day period you release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers until you open an Amazon Web Services Support ticket.

" }, "ListAgentStatuses":{ "name":"ListAgentStatuses", @@ -2151,6 +2169,23 @@ ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Returns a paginated list of all approved origins associated with the instance.

" }, + "ListAuthenticationProfiles":{ + "name":"ListAuthenticationProfiles", + "http":{ + "method":"GET", + "requestUri":"/authentication-profiles-summary/{InstanceId}" + }, + "input":{"shape":"ListAuthenticationProfilesRequest"}, + "output":{"shape":"ListAuthenticationProfilesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change. To request access to this API, contact Amazon Web Services Support.

Provides summary information about the authentication profiles in a specified Amazon Connect instance.

" + }, "ListBots":{ "name":"ListBots", "http":{ @@ -2465,7 +2500,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Lists predefined attributes for the specified Amazon Connect instance.

" + "documentation":"

Lists predefined attributes for the specified Amazon Connect instance. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents.

" }, "ListPrompts":{ "name":"ListPrompts", @@ -2653,7 +2688,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Lists the permissions granted to a security profile.

" + "documentation":"

Lists the permissions granted to a security profile.

For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions.

" }, "ListSecurityProfiles":{ "name":"ListSecurityProfiles", @@ -2670,7 +2705,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Provides summary information about the security profiles for the specified Amazon Connect instance.

For more information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide.

" + "documentation":"

Provides summary information about the security profiles for the specified Amazon Connect instance.

For more information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -2915,7 +2950,7 @@ {"shape":"IdempotencyException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Releases a phone number previously claimed to an Amazon Connect instance or traffic distribution group. You can call this API only in the Amazon Web Services Region where the number was claimed.

To release phone numbers from a traffic distribution group, use the ReleasePhoneNumber API, not the Amazon Connect admin website.

After releasing a phone number, the phone number enters into a cooldown period of 30 days. It cannot be searched for or claimed again until the period has ended. If you accidentally release a phone number, contact Amazon Web Services Support.

If you plan to claim and release numbers frequently during a 30 day period, contact us for a service quota exception. Otherwise, it is possible you will be blocked from claiming and releasing any more numbers until 30 days past the oldest number released has expired.

By default you can claim and release up to 200% of your maximum number of active phone numbers during any 30 day period. If you claim and release phone numbers using the UI or API during a rolling 30 day cycle that exceeds 200% of your phone number service level quota, you will be blocked from claiming any more numbers until 30 days past the oldest number released has expired.

For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 30 day period you release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers until you open an Amazon Web Services support ticket.

" + "documentation":"

Releases a phone number previously claimed to an Amazon Connect instance or traffic distribution group. You can call this API only in the Amazon Web Services Region where the number was claimed.

To release phone numbers from a traffic distribution group, use the ReleasePhoneNumber API, not the Amazon Connect admin website.

After releasing a phone number, the phone number enters into a cooldown period for up to 180 days. It cannot be searched for or claimed again until the period has ended. If you accidentally release a phone number, contact Amazon Web Services Support.

If you plan to claim and release numbers frequently, contact us for a service quota exception. Otherwise, it is possible you will be blocked from claiming and releasing any more numbers until up to 180 days past the oldest number released has expired.

By default you can claim and release up to 200% of your maximum number of active phone numbers. If you claim and release phone numbers using the UI or API during a rolling 180 day cycle that exceeds 200% of your phone number service level quota, you will be blocked from claiming any more numbers until 180 days past the oldest number released has expired.

For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 180 day period you release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers until you open an Amazon Web Services support ticket.

" }, "ReplicateInstance":{ "name":"ReplicateInstance", @@ -2971,6 +3006,23 @@ ], "documentation":"

When a contact is being recorded, and the recording has been suspended using SuspendContactRecording, this API resumes recording whatever recording is selected in the flow configuration: call, screen, or both. If only call recording or only screen recording is enabled, then it would resume.

Voice and screen recordings are supported.

" }, + "SearchAgentStatuses":{ + "name":"SearchAgentStatuses", + "http":{ + "method":"POST", + "requestUri":"/search-agent-statuses" + }, + "input":{"shape":"SearchAgentStatusesRequest"}, + "output":{"shape":"SearchAgentStatusesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Searches AgentStatuses in an Amazon Connect instance, with optional filtering.

" + }, "SearchAvailablePhoneNumbers":{ "name":"SearchAvailablePhoneNumbers", "http":{ @@ -3070,7 +3122,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Predefined attributes that meet certain criteria.

" + "documentation":"

Searches predefined attributes that meet certain criteria. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents.

" }, "SearchPrompts":{ "name":"SearchPrompts", @@ -3173,7 +3225,24 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Searches security profiles in an Amazon Connect instance, with optional filtering.

" + "documentation":"

Searches security profiles in an Amazon Connect instance, with optional filtering.

For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions.

" + }, + "SearchUserHierarchyGroups":{ + "name":"SearchUserHierarchyGroups", + "http":{ + "method":"POST", + "requestUri":"/search-user-hierarchy-groups" + }, + "input":{"shape":"SearchUserHierarchyGroupsRequest"}, + "output":{"shape":"SearchUserHierarchyGroupsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Searches UserHierarchyGroups in an Amazon Connect instance, with optional filtering.

The UserHierarchyGroup with \"LevelId\": \"0\" is the foundation for building levels on top of an instance. It is not user-definable, nor is it visible in the UI.

" }, "SearchUsers":{ "name":"SearchUsers", @@ -3241,7 +3310,7 @@ {"shape":"ResourceConflictException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Provides a pre-signed Amazon S3 URL in response for uploading your content.

You may only use this API to upload attachments to a Connect Case.

" + "documentation":"

Provides a pre-signed Amazon S3 URL in response for uploading your content.

You may only use this API to upload attachments to an Amazon Connect Case.

" }, "StartChatContact":{ "name":"StartChatContact", @@ -3258,7 +3327,7 @@ {"shape":"InternalServiceException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Initiates a flow to start a new chat for the customer. Response of this API provides a token required to obtain credentials from the CreateParticipantConnection API in the Amazon Connect Participant Service.

When a new chat contact is successfully created, clients must subscribe to the participant’s connection for the created chat within 5 minutes. This is achieved by invoking CreateParticipantConnection with WEBSOCKET and CONNECTION_CREDENTIALS.

A 429 error occurs in the following situations:

  • API rate limit is exceeded. API TPS throttling returns a TooManyRequests exception.

  • The quota for concurrent active chats is exceeded. Active chat throttling returns a LimitExceededException.

If you use the ChatDurationInMinutes parameter and receive a 400 error, your account may not support the ability to configure custom chat durations. For more information, contact Amazon Web Services Support.

For more information about chat, see Chat in the Amazon Connect Administrator Guide.

" + "documentation":"

Initiates a flow to start a new chat for the customer. Response of this API provides a token required to obtain credentials from the CreateParticipantConnection API in the Amazon Connect Participant Service.

When a new chat contact is successfully created, clients must subscribe to the participant’s connection for the created chat within 5 minutes. This is achieved by invoking CreateParticipantConnection with WEBSOCKET and CONNECTION_CREDENTIALS.

A 429 error occurs in the following situations:

  • API rate limit is exceeded. API TPS throttling returns a TooManyRequests exception.

  • The quota for concurrent active chats is exceeded. Active chat throttling returns a LimitExceededException.

If you use the ChatDurationInMinutes parameter and receive a 400 error, your account may not support the ability to configure custom chat durations. For more information, contact Amazon Web Services Support.

For more information about chat, see the following topics in the Amazon Connect Administrator Guide:

" }, "StartContactEvaluation":{ "name":"StartContactEvaluation", @@ -3310,7 +3379,7 @@ {"shape":"InternalServiceException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Initiates real-time message streaming for a new chat contact.

For more information about message streaming, see Enable real-time chat message streaming in the Amazon Connect Administrator Guide.

" + "documentation":"

Initiates real-time message streaming for a new chat contact.

For more information about message streaming, see Enable real-time chat message streaming in the Amazon Connect Administrator Guide.

For more information about chat, see the following topics in the Amazon Connect Administrator Guide:

" }, "StartOutboundVoiceContact":{ "name":"StartOutboundVoiceContact", @@ -3381,7 +3450,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Ends the specified contact. Use this API to stop queued callbacks. It does not work for voice contacts that use the following initiation methods:

  • DISCONNECT

  • TRANSFER

  • QUEUE_TRANSFER

Chat and task contacts can be terminated in any state, regardless of initiation method.

" + "documentation":"

Ends the specified contact. Use this API to stop queued callbacks. It does not work for voice contacts that use the following initiation methods:

  • DISCONNECT

  • TRANSFER

  • QUEUE_TRANSFER

  • EXTERNAL_OUTBOUND

  • MONITOR

Chat and task contacts can be terminated in any state, regardless of initiation method.

" }, "StopContactRecording":{ "name":"StopContactRecording", @@ -3551,6 +3620,22 @@ ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Updates agent status.

" }, + "UpdateAuthenticationProfile":{ + "name":"UpdateAuthenticationProfile", + "http":{ + "method":"POST", + "requestUri":"/authentication-profiles/{InstanceId}/{AuthenticationProfileId}" + }, + "input":{"shape":"UpdateAuthenticationProfileRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change. To request access to this API, contact Amazon Web Services Support.

Updates the selected authentication profile.

" + }, "UpdateContact":{ "name":"UpdateContact", "http":{ @@ -3708,7 +3793,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Updates routing priority and age on the contact (QueuePriority and QueueTimeAdjustmentInSeconds). These properties can be used to change a customer's position in the queue. For example, you can move a contact to the back of the queue by setting a lower routing priority relative to other contacts in queue; or you can move a contact to the front of the queue by increasing the routing age which will make the contact look artificially older and therefore higher up in the first-in-first-out routing order. Note that adjusting the routing age of a contact affects only its position in queue, and not its actual queue wait time as reported through metrics. These properties can also be updated by using the Set routing priority / age flow block.

" + "documentation":"

Updates routing priority and age on the contact (QueuePriority and QueueTimeAdjustmentInSeconds). These properties can be used to change a customer's position in the queue. For example, you can move a contact to the back of the queue by setting a lower routing priority relative to other contacts in queue; or you can move a contact to the front of the queue by increasing the routing age which will make the contact look artificially older and therefore higher up in the first-in-first-out routing order. Note that adjusting the routing age of a contact affects only its position in queue, and not its actual queue wait time as reported through metrics. These properties can also be updated by using the Set routing priority / age flow block.

Either QueuePriority or QueueTimeAdjustmentInSeconds should be provided within the request body, but not both.

" }, "UpdateContactSchedule":{ "name":"UpdateContactSchedule", @@ -3866,7 +3951,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates a predefined attribute for the specified Amazon Connect instance.

" + "documentation":"

Updates a predefined attribute for the specified Amazon Connect instance. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents.

" }, "UpdatePrompt":{ "name":"UpdatePrompt", @@ -4110,7 +4195,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates a security profile.

" + "documentation":"

Updates a security profile.

For information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface name of the security profile permissions, see List of security profile permissions.

" }, "UpdateTaskTemplate":{ "name":"UpdateTaskTemplate", @@ -4330,6 +4415,12 @@ "error":{"httpStatusCode":403}, "exception":true }, + "AccessTokenDuration":{ + "type":"integer", + "box":true, + "max":60, + "min":10 + }, "ActionSummaries":{ "type":"list", "member":{"shape":"ActionSummary"} @@ -4469,6 +4560,16 @@ "min":1, "sensitive":true }, + "AgentHierarchyGroup":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the group.

" + } + }, + "documentation":"

Information about an agent hierarchy group.

" + }, "AgentHierarchyGroups":{ "type":"structure", "members":{ @@ -4495,6 +4596,14 @@ }, "documentation":"

A structure that defines search criteria for contacts using agent hierarchy group levels. For more information about agent hierarchies, see Set Up Agent Hierarchies in the Amazon Connect Administrator Guide.

" }, + "AgentId":{ + "type":"string", + "max":256 + }, + "AgentIds":{ + "type":"list", + "member":{"shape":"AgentId"} + }, "AgentInfo":{ "type":"structure", "members":{ @@ -4509,7 +4618,16 @@ "AgentPauseDurationInSeconds":{ "shape":"AgentPauseDurationInSeconds", "documentation":"

Agent pause duration for a contact in seconds.

" - } + }, + "HierarchyGroups":{ + "shape":"HierarchyGroups", + "documentation":"

The agent hierarchy groups for the agent.

" + }, + "DeviceInfo":{ + "shape":"DeviceInfo", + "documentation":"

Information regarding Agent’s device.

" + }, + "Capabilities":{"shape":"ParticipantCapabilities"} }, "documentation":"

Information about the agent who accepted the contact.

" }, @@ -4523,6 +4641,16 @@ "type":"integer", "min":0 }, + "AgentQualityMetrics":{ + "type":"structure", + "members":{ + "Audio":{ + "shape":"AudioQualityMetricsInfo", + "documentation":"

Information about the audio quality of the Agent

" + } + }, + "documentation":"

Information about the quality of the Agent's media connection

" + }, "AgentResourceId":{ "type":"string", "max":256, @@ -4586,6 +4714,10 @@ "min":1 }, "AgentStatusId":{"type":"string"}, + "AgentStatusList":{ + "type":"list", + "member":{"shape":"AgentStatus"} + }, "AgentStatusName":{ "type":"string", "max":127, @@ -4614,6 +4746,38 @@ }, "documentation":"

Information about the agent's status.

" }, + "AgentStatusSearchConditionList":{ + "type":"list", + "member":{"shape":"AgentStatusSearchCriteria"} + }, + "AgentStatusSearchCriteria":{ + "type":"structure", + "members":{ + "OrConditions":{ + "shape":"AgentStatusSearchConditionList", + "documentation":"

A list of conditions which would be applied together with an OR condition.

" + }, + "AndConditions":{ + "shape":"AgentStatusSearchConditionList", + "documentation":"

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are name,

 description, state, type, displayOrder,
 and resourceID.

" + }, + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are name,

 description, state, type, displayOrder,
 and resourceID.

" + } + }, + "documentation":"

The search criteria to be used to return agent statuses.

" + }, + "AgentStatusSearchFilter":{ + "type":"structure", + "members":{ + "AttributeFilter":{ + "shape":"ControlPlaneAttributeFilter", + "documentation":"

An object that can be used to specify Tag conditions inside the SearchFilter. This accepts an OR of AND (List of List) input where:

  • The top level list specifies conditions that need to be applied with OR operator.

  • The inner list specifies conditions that need to be applied with AND operator.

" + } + }, + "documentation":"

Filters to be applied to search results.

" + }, "AgentStatusState":{ "type":"string", "enum":[ @@ -4673,6 +4837,16 @@ "max":100, "min":1 }, + "AgentsCriteria":{ + "type":"structure", + "members":{ + "AgentIds":{ + "shape":"AgentIds", + "documentation":"

An object to specify a list of agents, by user ID.

" + } + }, + "documentation":"

Can be used to define a list of preferred agents to target the contact to within the queue.
 Note that agents must have the queue in their routing profile in order to be offered the
 contact.

" + }, "AgentsMinOneMaxHundred":{ "type":"list", "member":{"shape":"UserId"}, @@ -4688,7 +4862,7 @@ "type":"map", "key":{"shape":"SecurityProfilePolicyKey"}, "value":{"shape":"SecurityProfilePolicyValue"}, - "max":2 + "max":4 }, "AllowedCapabilities":{ "type":"structure", @@ -4749,6 +4923,25 @@ }, "documentation":"

Configuration of the answering machine detection.

" }, + "AnsweringMachineDetectionStatus":{ + "type":"string", + "enum":[ + "ANSWERED", + "UNDETECTED", + "ERROR", + "HUMAN_ANSWERED", + "SIT_TONE_DETECTED", + "SIT_TONE_BUSY", + "SIT_TONE_INVALID_NUMBER", + "FAX_MACHINE_DETECTED", + "VOICEMAIL_BEEP", + "VOICEMAIL_NO_BEEP", + "AMD_UNRESOLVED", + "AMD_UNANSWERED", + "AMD_ERROR", + "AMD_NOT_APPLICABLE" + ] + }, "Application":{ "type":"structure", "members":{ @@ -5168,6 +5361,10 @@ } } }, + "AssociatedQueueIdList":{ + "type":"list", + "member":{"shape":"QueueId"} + }, "AssociationId":{ "type":"string", "max":100, @@ -5325,6 +5522,32 @@ }, "documentation":"

A list of conditions which would be applied together with an AND condition.

" }, + "AttributeCondition":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"PredefinedAttributeName", + "documentation":"

The name of predefined attribute.

" + }, + "Value":{ + "shape":"ProficiencyValue", + "documentation":"

The value of predefined attribute.

" + }, + "ProficiencyLevel":{ + "shape":"NullableProficiencyLevel", + "documentation":"

The proficiency level of the condition.

" + }, + "MatchCriteria":{ + "shape":"MatchCriteria", + "documentation":"

An object to define AgentsCriteria.

" + }, + "ComparisonOperator":{ + "shape":"ComparisonOperator", + "documentation":"

The operator of the condition.

" + } + }, + "documentation":"

An object to specify the predefined attribute condition.

" + }, "AttributeName":{ "type":"string", "max":32767, @@ -5358,6 +5581,124 @@ }, "documentation":"

Has audio-specific configurations as the operating parameter for Echo Reduction.

" }, + "AudioQualityMetricsInfo":{ + "type":"structure", + "members":{ + "QualityScore":{ + "shape":"AudioQualityScore", + "documentation":"

Number measuring the estimated quality of the media connection.

" + }, + "PotentialQualityIssues":{ + "shape":"PotentialAudioQualityIssues", + "documentation":"

List of potential issues causing degradation of quality on a media connection. If the service did not detect any potential quality issues the list is empty.

Valid values: HighPacketLoss | HighRoundTripTime | HighJitterBuffer

" + } + }, + "documentation":"

Contains information for score and potential quality issues for Audio

" + }, + "AudioQualityScore":{"type":"float"}, + "AuthenticationProfile":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"AuthenticationProfileId", + "documentation":"

A unique identifier for the authentication profile.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) for the authentication profile.

" + }, + "Name":{ + "shape":"AuthenticationProfileName", + "documentation":"

The name for the authentication profile.

" + }, + "Description":{ + "shape":"AuthenticationProfileDescription", + "documentation":"

The description for the authentication profile.

" + }, + "AllowedIps":{ + "shape":"IpCidrList", + "documentation":"

A list of IP address range strings that are allowed to access the Amazon Connect instance. For more information about how to configure IP addresses, see Configure IP address based access control in the Amazon Connect Administrator Guide.

" + }, + "BlockedIps":{ + "shape":"IpCidrList", + "documentation":"

A list of IP address range strings that are blocked from accessing the Amazon Connect instance. For more information about how to configure IP addresses, see Configure IP address based access control in the Amazon Connect Administrator Guide.

" + }, + "IsDefault":{ + "shape":"Boolean", + "documentation":"

Shows whether the authentication profile is the default authentication profile for the Amazon Connect instance. The default authentication profile applies to all agents in an Amazon Connect instance, unless overridden by another authentication profile.

" + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the authentication profile was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the authentication profile was last modified.

" + }, + "LastModifiedRegion":{ + "shape":"RegionName", + "documentation":"

The Amazon Web Services Region where the authentication profile was last modified.

" + }, + "PeriodicSessionDuration":{ + "shape":"AccessTokenDuration", + "documentation":"

The short lived session duration configuration for users logged in to Amazon Connect, in minutes. This value determines the maximum possible time before an agent is authenticated. For more information, see Configure the session duration in the Amazon Connect Administrator Guide.

" + }, + "MaxSessionDuration":{ + "shape":"RefreshTokenDuration", + "documentation":"

The long lived session duration for users logged in to Amazon Connect, in minutes. After this time period, users must log in again. For more information, see Configure the session duration in the Amazon Connect Administrator Guide.

" + } + }, + "documentation":"

This API is in preview release for Amazon Connect and is subject to change. To request access to this API, contact Amazon Web Services Support.

Information about an authentication profile. An authentication profile is a resource that stores the authentication settings for users in your contact center. You use authentication profiles to set up IP address range restrictions and session timeouts. For more information, see Set IP address restrictions or session timeouts.

" + }, + "AuthenticationProfileDescription":{ + "type":"string", + "max":250, + "min":1 + }, + "AuthenticationProfileId":{ + "type":"string", + "max":100, + "min":1 + }, + "AuthenticationProfileName":{ + "type":"string", + "max":128, + "min":1 + }, + "AuthenticationProfileSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"AuthenticationProfileId", + "documentation":"

The unique identifier of the authentication profile.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the authentication profile summary.

" + }, + "Name":{ + "shape":"AuthenticationProfileName", + "documentation":"

The name of the authentication profile summary.

" + }, + "IsDefault":{ + "shape":"Boolean", + "documentation":"

Shows whether the authentication profile is the default authentication profile for the Amazon Connect instance. The default authentication profile applies to all agents in an Amazon Connect instance, unless overridden by another authentication profile.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the authentication profile summary was last modified.

" + }, + "LastModifiedRegion":{ + "shape":"RegionName", + "documentation":"

The Amazon Web Services Region when the authentication profile summary was last modified.

" + } + }, + "documentation":"

This API is in preview release for Amazon Connect and is subject to change. To request access to this API, contact Amazon Web Services Support.

A summary of a given authentication profile.

" + }, + "AuthenticationProfileSummaryList":{ + "type":"list", + "member":{"shape":"AuthenticationProfileSummary"} + }, "AutoAccept":{"type":"boolean"}, "AvailableNumberSummary":{ "type":"structure", @@ -5807,6 +6148,20 @@ "type":"string", "max":500 }, + "CommonAttributeAndCondition":{ + "type":"structure", + "members":{ + "TagConditions":{ + "shape":"TagAndConditionList", + "documentation":"

A leaf node condition which can be used to specify a tag condition.

" + } + }, + "documentation":"

A list of conditions which would be applied together with an AND condition.

" + }, + "CommonAttributeOrConditionList":{ + "type":"list", + "member":{"shape":"CommonAttributeAndCondition"} + }, "CommonNameLength127":{ "type":"string", "max":127, @@ -5816,6 +6171,11 @@ "type":"string", "enum":["LT"] }, + "ComparisonOperator":{ + "type":"string", + "max":127, + "min":1 + }, "CompleteAttachedFileUploadRequest":{ "type":"structure", "required":[ @@ -5826,7 +6186,7 @@ "members":{ "InstanceId":{ "shape":"InstanceId", - "documentation":"

The unique identifier of the Connect instance.

", + "documentation":"

The unique identifier of the Amazon Connect instance.

", "location":"uri", "locationName":"InstanceId" }, @@ -5856,6 +6216,24 @@ "max":10, "min":1 }, + "Condition":{ + "type":"structure", + "members":{ + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are name and
 value.

" + }, + "NumberCondition":{ + "shape":"NumberCondition", + "documentation":"

A leaf node condition which can be used to specify a numeric condition.

" + } + }, + "documentation":"

A leaf node condition which can be used to specify a ProficiencyName, ProficiencyValue and ProficiencyLimit.

" + }, + "Conditions":{ + "type":"list", + "member":{"shape":"Condition"} + }, "ConflictException":{ "type":"structure", "members":{ @@ -5973,6 +6351,39 @@ "Tags":{ "shape":"ContactTagMap", "documentation":"

Tags associated with the contact. This contains both Amazon Web Services generated and user-defined tags.

" + }, + "ConnectedToSystemTimestamp":{ + "shape":"timestamp", + "documentation":"

The timestamp when customer endpoint connected to Amazon Connect.

" + }, + "RoutingCriteria":{ + "shape":"RoutingCriteria", + "documentation":"

Latest routing criteria on the contact.

" + }, + "Customer":{ + "shape":"Customer", + "documentation":"

Information about the Customer on the contact.

" + }, + "Campaign":{"shape":"Campaign"}, + "AnsweringMachineDetectionStatus":{ + "shape":"AnsweringMachineDetectionStatus", + "documentation":"

Indicates how an outbound campaign call is actually disposed if the contact is connected to Amazon Connect.

" + }, + "CustomerVoiceActivity":{ + "shape":"CustomerVoiceActivity", + "documentation":"

Information about customer’s voice activity.

" + }, + "QualityMetrics":{ + "shape":"QualityMetrics", + "documentation":"

Information about the quality of the participant's media connection.

" + }, + "DisconnectDetails":{ + "shape":"DisconnectDetails", + "documentation":"

Information about the call disconnect experience.

" + }, + "SegmentAttributes":{ + "shape":"SegmentAttributes", + "documentation":"

A set of system defined key-value pairs stored on individual contact segments using an attribute map. The attributes are standard Amazon Connect attributes and can be accessed in flows. Attribute keys can include only alphanumeric, -, and _ characters. This field can be used to show channel subtype. For example, connect:Guide or connect:SMS.

" } }, "documentation":"

Contains information about a contact.

" @@ -6503,6 +6914,21 @@ "max":255, "min":1 }, + "ControlPlaneAttributeFilter":{ + "type":"structure", + "members":{ + "OrConditions":{ + "shape":"CommonAttributeOrConditionList", + "documentation":"

A list of conditions which would be applied together with an OR condition.

" + }, + "AndCondition":{ + "shape":"CommonAttributeAndCondition", + "documentation":"

A list of conditions which would be applied together with an AND condition.

" + }, + "TagCondition":{"shape":"TagCondition"} + }, + "documentation":"

An object that can be used to specify Tag conditions inside the SearchFilter. This accepts an OR or AND (List of List) input where:

  • The top level list specifies conditions that need to be applied with OR operator.

  • The inner list specifies conditions that need to be applied with AND operator.

" + }, "ControlPlaneTagFilter":{ "type":"structure", "members":{ @@ -7910,6 +8336,41 @@ "type":"list", "member":{"shape":"CurrentMetric"} }, + "Customer":{ + "type":"structure", + "members":{ + "DeviceInfo":{ + "shape":"DeviceInfo", + "documentation":"

Information regarding Customer’s device.

" + }, + "Capabilities":{"shape":"ParticipantCapabilities"} + }, + "documentation":"

Information about the Customer on the contact.

" + }, + "CustomerQualityMetrics":{ + "type":"structure", + "members":{ + "Audio":{ + "shape":"AudioQualityMetricsInfo", + "documentation":"

Information about the audio quality of the Customer

" + } + }, + "documentation":"

Information about the quality of the Customer's media connection

" + }, + "CustomerVoiceActivity":{ + "type":"structure", + "members":{ + "GreetingStartTimestamp":{ + "shape":"timestamp", + "documentation":"

Timestamp that measures the beginning of the customer greeting from an outbound voice call.

" + }, + "GreetingEndTimestamp":{ + "shape":"timestamp", + "documentation":"

Timestamp that measures the end of the customer greeting from an outbound voice call.

" + } + }, + "documentation":"

Information about customer’s voice activity.

" + }, "DataSetId":{ "type":"string", "max":255, @@ -8600,6 +9061,36 @@ } } }, + "DescribeAuthenticationProfileRequest":{ + "type":"structure", + "required":[ + "AuthenticationProfileId", + "InstanceId" + ], + "members":{ + "AuthenticationProfileId":{ + "shape":"AuthenticationProfileId", + "documentation":"

A unique identifier for the authentication profile.

", + "location":"uri", + "locationName":"AuthenticationProfileId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "DescribeAuthenticationProfileResponse":{ + "type":"structure", + "members":{ + "AuthenticationProfile":{ + "shape":"AuthenticationProfile", + "documentation":"

The authentication profile object being described.

" + } + } + }, "DescribeContactEvaluationRequest":{ "type":"structure", "required":[ @@ -9308,6 +9799,24 @@ "error":{"httpStatusCode":403}, "exception":true }, + "DeviceInfo":{ + "type":"structure", + "members":{ + "PlatformName":{ + "shape":"PlatformName", + "documentation":"

Name of the platform that the participant used for the call.

" + }, + "PlatformVersion":{ + "shape":"PlatformVersion", + "documentation":"

Version of the platform that the participant used for the call.

" + }, + "OperatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

Operating system that the participant used for the call.

" + } + }, + "documentation":"

Information regarding the device.

" + }, "Dimensions":{ "type":"structure", "members":{ @@ -9680,6 +10189,16 @@ } } }, + "DisconnectDetails":{ + "type":"structure", + "members":{ + "PotentialDisconnectIssue":{ + "shape":"PotentialDisconnectIssue", + "documentation":"

Indicates the potential disconnection issues for a call. This field is not populated if the service does not detect potential issues.

" + } + }, + "documentation":"

Information about the call disconnect experience.

" + }, "DisconnectReason":{ "type":"structure", "members":{ @@ -9773,6 +10292,7 @@ "error":{"httpStatusCode":409}, "exception":true }, + "DurationInSeconds":{"type":"integer"}, "Email":{ "type":"string", "sensitive":true @@ -10756,6 +11276,42 @@ "OnCaseUpdate" ] }, + "Expiry":{ + "type":"structure", + "members":{ + "DurationInSeconds":{ + "shape":"DurationInSeconds", + "documentation":"

The number of seconds to wait before expiring the routing step.

" + }, + "ExpiryTimestamp":{ + "shape":"timestamp", + "documentation":"

The timestamp indicating when the routing step expires.

" + } + }, + "documentation":"

An object to specify the expiration of a routing step.

" + }, + "Expression":{ + "type":"structure", + "members":{ + "AttributeCondition":{ + "shape":"AttributeCondition", + "documentation":"

An object to specify the predefined attribute condition.

" + }, + "AndExpression":{ + "shape":"Expressions", + "documentation":"

List of routing expressions which will be AND-ed together.

" + }, + "OrExpression":{ + "shape":"Expressions", + "documentation":"

List of routing expressions which will be OR-ed together.

" + } + }, + "documentation":"

A tagged union to specify expression for a routing step.

" + }, + "Expressions":{ + "type":"list", + "member":{"shape":"Expression"} + }, "FailedRequest":{ "type":"structure", "members":{ @@ -10830,7 +11386,7 @@ }, "DoubleValue":{ "shape":"Double", - "documentation":"

a Double number value type.

" + "documentation":"

A Double number value type.

" }, "EmptyValue":{ "shape":"EmptyFieldValue", @@ -11354,15 +11910,15 @@ }, "Filters":{ "shape":"FiltersV2List", - "documentation":"

The filters to apply to returned metrics. You can filter on the following resources:

  • Queues

  • Routing profiles

  • Agents

  • Channels

  • User hierarchy groups

  • Feature

  • Routing step expression

At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.

To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator's Guide.

Note the following limits:

  • Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: QUEUE | ROUTING_PROFILE | AGENT | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | FEATURE | CASE_TEMPLATE_ARN | CASE_STATUS | contact/segmentAttributes/connect:Subtype | ROUTING_STEP_EXPRESSION | Q_CONNECT_ENABLED

  • Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters.

    contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It is available only to contacts analyzed by Contact Lens conversational analytics.

    connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key.

    ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is.

    Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the Q_CONNECT_ENABLED filter key.

    • TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow.

    • FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow

    This filter is available only for contact record-driven metrics.

" + "documentation":"

The filters to apply to returned metrics. You can filter on the following resources:

  • Agents

  • Campaigns

  • Channels

  • Feature

  • Queues

  • Routing profiles

  • Routing step expression

  • User hierarchy groups

At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.

For metrics for outbound campaigns analytics, you can also use campaigns to satisfy at least one filter requirement.

To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator Guide.

Note the following limits:

  • Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | FEATURE | FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID | FLOWS_NEXT_RESOURCE_QUEUE_ID | FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | INITIATION_METHOD | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED |

  • Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters.

    contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It is available only to contacts analyzed by Contact Lens conversational analytics.

    connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key.

    ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is.

    Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the Q_CONNECT_ENABLED filter key.

    • TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow.

    • FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow

    This filter is available only for contact record-driven metrics.

    Campaign ARNs are valid filterValues for the CAMPAIGN filter key.

" }, "Groupings":{ "shape":"GroupingsV2", - "documentation":"

The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues.

If no grouping is specified, a summary of all metrics is returned.

Valid grouping keys: QUEUE | ROUTING_PROFILE | AGENT | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN | CASE_STATUS | contact/segmentAttributes/connect:Subtype | ROUTING_STEP_EXPRESSION | Q_CONNECT_ENABLED

" + "documentation":"

The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues.

If no grouping is specified, a summary of all metrics is returned.

Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | INITIATION_METHOD | Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION

" }, "Metrics":{ "shape":"MetricsV2", - "documentation":"

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator's Guide.

ABANDONMENT_RATE

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Abandonment rate

AGENT_ADHERENT_TIME

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Adherent time

AGENT_ANSWER_RATE

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Agent answer rate

AGENT_NON_ADHERENT_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Non-adherent time

AGENT_NON_RESPONSE

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Agent non-response

AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

UI name: Agent non-response without customer abandons

AGENT_OCCUPANCY

Unit: Percentage

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

UI name: Occupancy

AGENT_SCHEDULE_ADHERENCE

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Adherence

AGENT_SCHEDULED_TIME

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Scheduled time

AVG_ABANDON_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average queue abandon time

AVG_ACTIVE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Average active time

AVG_AFTER_CONTACT_WORK_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average after contact work time

Feature is a valid filter but not a valid grouping.

AVG_AGENT_CONNECTING_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Average agent API connecting time

The Negate key in Metric Level Filters is not applicable for this metric.

AVG_AGENT_PAUSE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Average agent pause time

AVG_CASE_RELATED_CONTACTS

Unit: Count

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Average contacts per case

AVG_CASE_RESOLUTION_TIME

Unit: Seconds

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Average case resolution time

AVG_CONTACT_DURATION

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average contact duration

Feature is a valid filter but not a valid grouping.

AVG_CONVERSATION_DURATION

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average conversation duration

AVG_GREETING_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent greeting time

AVG_HANDLE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression

UI name: Average handle time

Feature is a valid filter but not a valid grouping.

AVG_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average customer hold time

Feature is a valid filter but not a valid grouping.

AVG_HOLD_TIME_ALL_CONTACTS

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average customer hold time all contacts

AVG_HOLDS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average holds

Feature is a valid filter but not a valid grouping.

AVG_INTERACTION_AND_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent interaction and customer hold time

AVG_INTERACTION_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent interaction time

Feature is a valid filter but not a valid grouping.

AVG_INTERRUPTIONS_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent interruptions

AVG_INTERRUPTION_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent interruption time

AVG_NON_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average non-talk time

AVG_QUEUE_ANSWER_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average queue answer time

Feature is a valid filter but not a valid grouping.

AVG_RESOLUTION_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average resolution time

AVG_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average talk time

AVG_TALK_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent talk time

AVG_TALK_TIME_CUSTOMER

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average customer talk time

CASES_CREATED

Unit: Count

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Cases created

CONTACTS_ABANDONED

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

UI name: Contact abandoned

CONTACTS_CREATED

Unit: Count

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts created

Feature is a valid filter but not a valid grouping.

CONTACTS_HANDLED

Unit: Count

Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

UI name: API contacts handled

Feature is a valid filter but not a valid grouping.

CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT

Unit: Count

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts handled (connected to agent timestamp)

CONTACTS_HOLD_ABANDONS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts hold disconnect

CONTACTS_ON_HOLD_AGENT_DISCONNECT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contacts hold agent disconnect

CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contacts hold customer disconnect

CONTACTS_PUT_ON_HOLD

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contacts put on hold

CONTACTS_TRANSFERRED_OUT_EXTERNAL

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contacts transferred out external

CONTACTS_TRANSFERRED_OUT_INTERNAL

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contacts transferred out internal

CONTACTS_QUEUED

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts queued

CONTACTS_QUEUED_BY_ENQUEUE

Unit: Count

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

UI name: Contacts queued (enqueue timestamp)

CONTACTS_RESOLVED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

UI name: Contacts resolved in X

CONTACTS_TRANSFERRED_OUT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts transferred out

Feature is a valid filter but not a valid grouping.

CONTACTS_TRANSFERRED_OUT_BY_AGENT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts transferred out by agent

CONTACTS_TRANSFERRED_OUT_FROM_QUEUE

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts transferred out queue

CURRENT_CASES

Unit: Count

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Current cases

MAX_QUEUED_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Maximum queued time

PERCENT_CASES_FIRST_CONTACT_RESOLVED

Unit: Percent

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Cases resolved on first contact

PERCENT_CONTACTS_STEP_EXPIRED

Unit: Percent

Valid groupings and filters: Queue, RoutingStepExpression

UI name: Not available

PERCENT_CONTACTS_STEP_JOINED

Unit: Percent

Valid groupings and filters: Queue, RoutingStepExpression

UI name: Not available

PERCENT_NON_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Non-talk time percent

PERCENT_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Talk time percent

PERCENT_TALK_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Agent talk time percent

PERCENT_TALK_TIME_CUSTOMER

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Customer talk time percent

REOPENED_CASE_ACTIONS

Unit: Count

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Cases reopened

RESOLVED_CASE_ACTIONS

Unit: Count

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Cases resolved

SERVICE_LEVEL

You can include up to 20 SERVICE_LEVEL metrics in a request.

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

UI name: Service level X

STEP_CONTACTS_QUEUED

Unit: Count

Valid groupings and filters: Queue, RoutingStepExpression

UI name: Not available

SUM_AFTER_CONTACT_WORK_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: After contact work time

SUM_CONNECTING_TIME_AGENT

Unit: Seconds

Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Agent API connecting time

The Negate key in Metric Level Filters is not applicable for this metric.

SUM_CONTACT_FLOW_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contact flow time

SUM_CONTACT_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Agent on contact time

SUM_CONTACTS_ANSWERED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

UI name: Contacts answered in X seconds

SUM_CONTACTS_ABANDONED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

UI name: Contacts abandoned in X seconds

SUM_CONTACTS_DISCONNECTED

Valid metric filter key: DISCONNECT_REASON

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contact disconnected

SUM_ERROR_STATUS_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Error status time

SUM_HANDLE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contact handle time

SUM_HOLD_TIME

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Customer hold time

SUM_IDLE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

UI name: Agent idle time

SUM_INTERACTION_AND_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Agent interaction and hold time

SUM_INTERACTION_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Agent interaction time

SUM_NON_PRODUCTIVE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

UI name: Non-Productive Time

SUM_ONLINE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

UI name: Online time

SUM_RETRY_CALLBACK_ATTEMPTS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Callback attempts

" + "documentation":"

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide.

ABANDONMENT_RATE

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Abandonment rate

AGENT_ADHERENT_TIME

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Adherent time

AGENT_ANSWER_RATE

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Agent answer rate

AGENT_NON_ADHERENT_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Non-adherent time

AGENT_NON_RESPONSE

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Agent non-response

AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

UI name: Agent non-response without customer abandons

AGENT_OCCUPANCY

Unit: Percentage

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

UI name: Occupancy

AGENT_SCHEDULE_ADHERENCE

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Adherence

AGENT_SCHEDULED_TIME

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Scheduled time

AVG_ABANDON_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average queue abandon time

AVG_ACTIVE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Average active time

AVG_AFTER_CONTACT_WORK_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average after contact work time

Feature is a valid filter but not a valid grouping.

AVG_AGENT_CONNECTING_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Average agent API connecting time

The Negate key in Metric Level Filters is not applicable for this metric.

AVG_AGENT_PAUSE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Average agent pause time

AVG_CASE_RELATED_CONTACTS

Unit: Count

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Average contacts per case

AVG_CASE_RESOLUTION_TIME

Unit: Seconds

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Average case resolution time

AVG_CONTACT_DURATION

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average contact duration

Feature is a valid filter but not a valid grouping.

AVG_CONVERSATION_DURATION

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average conversation duration

AVG_DIALS_PER_MINUTE

This metric is available only for contacts analyzed by outbound campaigns analytics.

Unit: Count

Valid groupings and filters: Campaign, Agent, Queue, Routing Profile

UI name: Average dials per minute

AVG_FLOW_TIME

Unit: Seconds

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

UI name: Average flow time

AVG_GREETING_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent greeting time

AVG_HANDLE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression

UI name: Average handle time

Feature is a valid filter but not a valid grouping.

AVG_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average customer hold time

Feature is a valid filter but not a valid grouping.

AVG_HOLD_TIME_ALL_CONTACTS

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average customer hold time all contacts

AVG_HOLDS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average holds

Feature is a valid filter but not a valid grouping.

AVG_INTERACTION_AND_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent interaction and customer hold time

AVG_INTERACTION_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent interaction time

Feature is a valid filter but not a valid grouping.

AVG_INTERRUPTIONS_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent interruptions

AVG_INTERRUPTION_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent interruption time

AVG_NON_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average non-talk time

AVG_QUEUE_ANSWER_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average queue answer time

Feature is a valid filter but not a valid grouping.

AVG_RESOLUTION_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average resolution time

AVG_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average talk time

AVG_TALK_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average agent talk time

AVG_TALK_TIME_CUSTOMER

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Average customer talk time

AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION

This metric is available only for contacts analyzed by outbound campaigns analytics.

Unit: Seconds

Valid groupings and filters: Campaign

UI name: Average wait time after customer connection

CAMPAIGN_CONTACTS_ABANDONED_AFTER_X

This metric is available only for contacts analyzed by outbound campaigns analytics.

Unit: Count

Valid groupings and filters: Campaign, Agent

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than).

UI name: Campaign contacts abandoned after X

CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE

This metric is available only for contacts analyzed by outbound campaigns analytics.

Unit: Percent

Valid groupings and filters: Campaign, Agent

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than).

UI name: Campaign contacts abandoned after X rate

CASES_CREATED

Unit: Count

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Cases created

CONTACTS_CREATED

Unit: Count

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts created

Feature is a valid filter but not a valid grouping.

CONTACTS_HANDLED

Unit: Count

Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

UI name: API contacts handled

Feature is a valid filter but not a valid grouping.

CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT

Unit: Count

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts handled (connected to agent timestamp)

CONTACTS_HOLD_ABANDONS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts hold disconnect

CONTACTS_ON_HOLD_AGENT_DISCONNECT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contacts hold agent disconnect

CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contacts hold customer disconnect

CONTACTS_PUT_ON_HOLD

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contacts put on hold

CONTACTS_TRANSFERRED_OUT_EXTERNAL

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contacts transferred out external

CONTACTS_TRANSFERRED_OUT_INTERNAL

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contacts transferred out internal

CONTACTS_QUEUED

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts queued

CONTACTS_QUEUED_BY_ENQUEUE

Unit: Count

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

UI name: Contacts queued (enqueue timestamp)

CONTACTS_REMOVED_FROM_QUEUE_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

UI name: Contacts removed from queue in X seconds

CONTACTS_RESOLVED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

UI name: Contacts resolved in X

CONTACTS_TRANSFERRED_OUT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts transferred out

Feature is a valid filter but not a valid grouping.

CONTACTS_TRANSFERRED_OUT_BY_AGENT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts transferred out by agent

CONTACTS_TRANSFERRED_OUT_FROM_QUEUE

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contacts transferred out queue

CURRENT_CASES

Unit: Count

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Current cases

DELIVERY_ATTEMPTS

This metric is available only for contacts analyzed by outbound campaigns analytics.

Unit: Count

Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON

Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason

UI name: Delivery attempts

DELIVERY_ATTEMPT_DISPOSITION_RATE

This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled.

Unit: Percent

Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON

Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason

Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings.

UI name: Delivery attempt disposition rate

FLOWS_OUTCOME

Unit: Count

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

UI name: Flows outcome

FLOWS_STARTED

Unit: Count

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp

UI name: Flows started

HUMAN_ANSWERED_CALLS

This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled.

Unit: Count

Valid groupings and filters: Campaign, Agent

UI name: Human answered

MAX_FLOW_TIME

Unit: Seconds

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

UI name: Maximum flow time

MAX_QUEUED_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Maximum queued time

MIN_FLOW_TIME

Unit: Seconds

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

UI name: Minimum flow time

PERCENT_CASES_FIRST_CONTACT_RESOLVED

Unit: Percent

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Cases resolved on first contact

PERCENT_CONTACTS_STEP_EXPIRED

Unit: Percent

Valid groupings and filters: Queue, RoutingStepExpression

UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.

PERCENT_CONTACTS_STEP_JOINED

Unit: Percent

Valid groupings and filters: Queue, RoutingStepExpression

UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.

PERCENT_FLOWS_OUTCOME

Unit: Percent

Valid metric filter key: FLOWS_OUTCOME_TYPE

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

UI name: Flows outcome percentage.

The FLOWS_OUTCOME_TYPE is not a valid grouping.

PERCENT_NON_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Non-talk time percent

PERCENT_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Talk time percent

PERCENT_TALK_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Agent talk time percent

PERCENT_TALK_TIME_CUSTOMER

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Customer talk time percent

REOPENED_CASE_ACTIONS

Unit: Count

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Cases reopened

RESOLVED_CASE_ACTIONS

Unit: Count

Required filter key: CASE_TEMPLATE_ARN

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

UI name: Cases resolved

SERVICE_LEVEL

You can include up to 20 SERVICE_LEVEL metrics in a request.

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

UI name: Service level X

STEP_CONTACTS_QUEUED

Unit: Count

Valid groupings and filters: Queue, RoutingStepExpression

UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.

SUM_AFTER_CONTACT_WORK_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: After contact work time

SUM_CONNECTING_TIME_AGENT

Unit: Seconds

Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Agent API connecting time

The Negate key in Metric Level Filters is not applicable for this metric.

SUM_CONTACTS_ABANDONED

Unit: Count

Metric filter:

  • Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

UI name: Contact abandoned

SUM_CONTACTS_ABANDONED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

UI name: Contacts abandoned in X seconds

SUM_CONTACTS_ANSWERED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

UI name: Contacts answered in X seconds

SUM_CONTACT_FLOW_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contact flow time

SUM_CONTACT_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

UI name: Agent on contact time

SUM_CONTACTS_DISCONNECTED

Valid metric filter key: DISCONNECT_REASON

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Contact disconnected

SUM_ERROR_STATUS_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

UI name: Error status time

SUM_HANDLE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Contact handle time

SUM_HOLD_TIME

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Customer hold time

SUM_IDLE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

UI name: Agent idle time

SUM_INTERACTION_AND_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

UI name: Agent interaction and hold time

SUM_INTERACTION_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

UI name: Agent interaction time

SUM_NON_PRODUCTIVE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

UI name: Non-Productive Time

SUM_ONLINE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

UI name: Online time

SUM_RETRY_CALLBACK_ATTEMPTS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

UI name: Callback attempts

" }, "NextToken":{ "shape":"NextToken2500", @@ -11683,6 +12239,32 @@ }, "documentation":"

Information about the hierarchy group.

" }, + "HierarchyGroups":{ + "type":"structure", + "members":{ + "Level1":{ + "shape":"AgentHierarchyGroup", + "documentation":"

The group at level one of the agent hierarchy.

" + }, + "Level2":{ + "shape":"AgentHierarchyGroup", + "documentation":"

The group at level two of the agent hierarchy.

" + }, + "Level3":{ + "shape":"AgentHierarchyGroup", + "documentation":"

The group at level three of the agent hierarchy.

" + }, + "Level4":{ + "shape":"AgentHierarchyGroup", + "documentation":"

The group at level four of the agent hierarchy.

" + }, + "Level5":{ + "shape":"AgentHierarchyGroup", + "documentation":"

The group at level five of the agent hierarchy.

" + } + }, + "documentation":"

Information about the agent hierarchy. Hierarchies can be configured with up to five levels.

" + }, "HierarchyLevel":{ "type":"structure", "members":{ @@ -12167,6 +12749,7 @@ } }, "InboundCallsEnabled":{"type":"boolean"}, + "Index":{"type":"integer"}, "InitiationMethodList":{ "type":"list", "member":{"shape":"ContactInitiationMethod"} @@ -12555,6 +13138,16 @@ "type":"list", "member":{"shape":"InvisibleFieldInfo"} }, + "IpCidr":{ + "type":"string", + "max":50, + "min":2, + "pattern":"^[A-Za-z0-9:/]*$" + }, + "IpCidrList":{ + "type":"list", + "member":{"shape":"IpCidr"} + }, "JoinToken":{ "type":"string", "sensitive":true @@ -12809,6 +13402,44 @@ } } }, + "ListAuthenticationProfilesRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "MaxResults":{ + "shape":"MaxResult1000", + "documentation":"

The maximum number of results to return per page.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListAuthenticationProfilesResponse":{ + "type":"structure", + "members":{ + "AuthenticationProfileSummaryList":{ + "shape":"AuthenticationProfileSummaryList", + "documentation":"

A summary of a given authentication profile.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, "ListBotsRequest":{ "type":"structure", "required":[ @@ -12856,6 +13487,20 @@ } } }, + "ListCondition":{ + "type":"structure", + "members":{ + "TargetListType":{ + "shape":"TargetListType", + "documentation":"

The type of target list that will be used to filter the users.

" + }, + "Conditions":{ + "shape":"Conditions", + "documentation":"

A list of Condition objects which would be applied together with an AND condition.

" + } + }, + "documentation":"

A leaf node condition which can be used to specify a List condition to search users with attributes included in Lists like Proficiencies.

" + }, "ListContactEvaluationsRequest":{ "type":"structure", "required":[ @@ -14652,7 +15297,17 @@ } } }, - "Long":{"type":"long"}, + "Long":{"type":"long"}, + "MatchCriteria":{ + "type":"structure", + "members":{ + "AgentsCriteria":{ + "shape":"AgentsCriteria", + "documentation":"

An object to define agentIds.

" + } + }, + "documentation":"

An object to define AgentsCriteria.

" + }, "MaxResult10":{ "type":"integer", "max":10, @@ -14831,7 +15486,7 @@ "members":{ "MetricFilterKey":{ "shape":"String", - "documentation":"

The key to use for filtering data.

Valid metric filter keys: INITIATION_METHOD, DISCONNECT_REASON. These are the same values as the InitiationMethod and DisconnectReason in the contact record. For more information, see ContactTraceRecord in the Amazon Connect Administrator's Guide.

" + "documentation":"

The key to use for filtering data.

Valid metric filter keys: INITIATION_METHOD, DISCONNECT_REASON. These are the same values as the InitiationMethod and DisconnectReason in the contact record. For more information, see ContactTraceRecord in the Amazon Connect Administrator Guide.

" }, "MetricFilterValues":{ "shape":"MetricFilterValueList", @@ -15036,6 +15691,46 @@ }, "documentation":"

The type of notification recipient.

" }, + "NullableProficiencyLevel":{ + "type":"float", + "max":5.0, + "min":1.0 + }, + "NullableProficiencyLimitValue":{"type":"integer"}, + "NumberComparisonType":{ + "type":"string", + "enum":[ + "GREATER_OR_EQUAL", + "GREATER", + "LESSER_OR_EQUAL", + "LESSER", + "EQUAL", + "NOT_EQUAL", + "RANGE" + ] + }, + "NumberCondition":{ + "type":"structure", + "members":{ + "FieldName":{ + "shape":"String", + "documentation":"

The name of the field in the number condition.

" + }, + "MinValue":{ + "shape":"NullableProficiencyLimitValue", + "documentation":"

The minValue to be used while evaluating the number condition.

" + }, + "MaxValue":{ + "shape":"NullableProficiencyLimitValue", + "documentation":"

The maxValue to be used while evaluating the number condition.

" + }, + "ComparisonType":{ + "shape":"NumberComparisonType", + "documentation":"

The type of comparison to be made when evaluating the number condition.

" + } + }, + "documentation":"

A leaf node condition which can be used to specify a numeric condition.

The currently supported value for FieldName is limit.

" + }, "NumberReference":{ "type":"structure", "members":{ @@ -15074,6 +15769,11 @@ }, "documentation":"

Information about the property value used in automation of a numeric questions. Label values are associated with minimum and maximum values for the numeric question.

  • Sentiment scores have a minimum value of -5 and maximum value of 5.

  • Duration labels, such as NON_TALK_TIME, CONTACT_DURATION, AGENT_INTERACTION_DURATION, CUSTOMER_HOLD_TIME have a minimum value of 0 and maximum value of 28800.

  • Percentages have a minimum value of 0 and maximum value of 100.

  • NUMBER_OF_INTERRUPTIONS has a minimum value of 0 and maximum value of 1000.

" }, + "OperatingSystem":{ + "type":"string", + "max":128, + "min":0 + }, "Origin":{ "type":"string", "max":267 @@ -15676,6 +16376,32 @@ "DESK_PHONE" ] }, + "PlatformName":{ + "type":"string", + "max":128, + "min":0 + }, + "PlatformVersion":{ + "type":"string", + "max":128, + "min":0 + }, + "PotentialAudioQualityIssue":{ + "type":"string", + "max":128, + "min":0 + }, + "PotentialAudioQualityIssues":{ + "type":"list", + "member":{"shape":"PotentialAudioQualityIssue"}, + "max":3, + "min":0 + }, + "PotentialDisconnectIssue":{ + "type":"string", + "max":128, + "min":0 + }, "PredefinedAttribute":{ "type":"structure", "members":{ @@ -15807,6 +16533,11 @@ "max":5.0, "min":1.0 }, + "ProficiencyValue":{ + "type":"string", + "max":128, + "min":1 + }, "Prompt":{ "type":"structure", "members":{ @@ -16004,6 +16735,20 @@ "members":{ } }, + "QualityMetrics":{ + "type":"structure", + "members":{ + "Agent":{ + "shape":"AgentQualityMetrics", + "documentation":"

Information about the quality of Agent media connection.

" + }, + "Customer":{ + "shape":"CustomerQualityMetrics", + "documentation":"

Information about the quality of Customer media connection.

" + } + }, + "documentation":"

Information about the quality of the participant's media connection.

" + }, "Queue":{ "type":"structure", "members":{ @@ -16529,6 +17274,28 @@ "max":5, "min":0 }, + "RealTimeContactAnalysisPostContactSummaryContent":{ + "type":"string", + "max":1270, + "min":1 + }, + "RealTimeContactAnalysisPostContactSummaryFailureCode":{ + "type":"string", + "enum":[ + "QUOTA_EXCEEDED", + "INSUFFICIENT_CONVERSATION_CONTENT", + "FAILED_SAFETY_GUIDELINES", + "INVALID_ANALYSIS_CONFIGURATION", + "INTERNAL_ERROR" + ] + }, + "RealTimeContactAnalysisPostContactSummaryStatus":{ + "type":"string", + "enum":[ + "FAILED", + "COMPLETED" + ] + }, "RealTimeContactAnalysisSegmentAttachments":{ "type":"structure", "required":[ @@ -16623,6 +17390,25 @@ }, "documentation":"

Segment type containing a list of detected issues.

" }, + "RealTimeContactAnalysisSegmentPostContactSummary":{ + "type":"structure", + "required":["Status"], + "members":{ + "Content":{ + "shape":"RealTimeContactAnalysisPostContactSummaryContent", + "documentation":"

The content of the summary.

" + }, + "Status":{ + "shape":"RealTimeContactAnalysisPostContactSummaryStatus", + "documentation":"

Whether the summary was successfully COMPLETED or FAILED to be generated.

" + }, + "FailureCode":{ + "shape":"RealTimeContactAnalysisPostContactSummaryFailureCode", + "documentation":"

If the summary failed to be generated, one of the following failure codes occurs:

  • QUOTA_EXCEEDED: The number of concurrent analytics jobs reached your service quota.

  • INSUFFICIENT_CONVERSATION_CONTENT: The conversation needs to have at least one turn from both the participants in order to generate the summary.

  • FAILED_SAFETY_GUIDELINES: The generated summary cannot be provided because it failed to meet system safety guidelines.

  • INVALID_ANALYSIS_CONFIGURATION: This code occurs when, for example, you're using a language that isn't supported by generative AI-powered post-contact summaries.

  • INTERNAL_ERROR: Internal system error.

" + } + }, + "documentation":"

Information about the post-contact summary for a real-time contact segment.

" + }, "RealTimeContactAnalysisSegmentTranscript":{ "type":"structure", "required":[ @@ -16679,13 +17465,14 @@ "Categories", "Issues", "Event", - "Attachments" + "Attachments", + "PostContactSummary" ] }, "RealTimeContactAnalysisSegmentTypes":{ "type":"list", "member":{"shape":"RealTimeContactAnalysisSegmentType"}, - "max":5 + "max":6 }, "RealTimeContactAnalysisSentimentLabel":{ "type":"string", @@ -16791,6 +17578,10 @@ "Attachments":{ "shape":"RealTimeContactAnalysisSegmentAttachments", "documentation":"

The analyzed attachments.

" + }, + "PostContactSummary":{ + "shape":"RealTimeContactAnalysisSegmentPostContactSummary", + "documentation":"

Information about the post-contact summary.

" } }, "documentation":"

An analyzed segment for a real-time analysis session.

", @@ -16891,6 +17682,12 @@ "max":4096, "min":0 }, + "RefreshTokenDuration":{ + "type":"integer", + "box":true, + "max":720, + "min":360 + }, "RegionName":{ "type":"string", "pattern":"[a-z]{2}(-[a-z]+){1,2}(-[0-9])?" @@ -17118,6 +17915,71 @@ "members":{ } }, + "RoutingCriteria":{ + "type":"structure", + "members":{ + "Steps":{ + "shape":"Steps", + "documentation":"

List of routing steps. When Amazon Connect does not find an available agent meeting the requirements in a step for a given step duration, the routing criteria will move on to the next step sequentially until a join is completed with an agent. When all steps are exhausted, the contact will be offered to any agent in the queue.

" + }, + "ActivationTimestamp":{ + "shape":"timestamp", + "documentation":"

The timestamp indicating when the routing criteria is set to active. A routing criteria is activated when contact is transferred to a queue. ActivationTimestamp will be set on routing criteria for contacts in agent queue even though Routing criteria is never activated for contacts in agent queue.

" + }, + "Index":{ + "shape":"Index", + "documentation":"

Information about the index of the routing criteria.

" + } + }, + "documentation":"

Latest routing criteria on the contact.

" + }, + "RoutingCriteriaInput":{ + "type":"structure", + "members":{ + "Steps":{ + "shape":"RoutingCriteriaInputSteps", + "documentation":"

When Amazon Connect does not find an available agent meeting the requirements in a step for
 a given step duration, the routing criteria will move on to the next step sequentially until a
 join is completed with an agent. When all steps are exhausted, the contact will be offered to any agent in the queue.

" + } + }, + "documentation":"

An object to define the RoutingCriteria.

" + }, + "RoutingCriteriaInputStep":{ + "type":"structure", + "members":{ + "Expiry":{ + "shape":"RoutingCriteriaInputStepExpiry", + "documentation":"

An object to specify the expiration of a routing step.

" + }, + "Expression":{ + "shape":"Expression", + "documentation":"

A tagged union to specify expression for a routing step.

" + } + }, + "documentation":"

Step defines the list of agents to be routed or route based on the agent requirements such as ProficiencyLevel, Name, or Value.

" + }, + "RoutingCriteriaInputStepExpiry":{ + "type":"structure", + "members":{ + "DurationInSeconds":{ + "shape":"DurationInSeconds", + "documentation":"

The number of seconds that the contact will be routed only to agents matching this routing
 step, if expiry was configured for this routing step.

" + } + }, + "documentation":"

Specify whether this routing criteria step should apply for only a limited amount of time,
 or if it should never expire.

" + }, + "RoutingCriteriaInputSteps":{ + "type":"list", + "member":{"shape":"RoutingCriteriaInputStep"} + }, + "RoutingCriteriaStepStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "JOINED", + "EXPIRED" + ] + }, "RoutingExpression":{ "type":"string", "max":3000, @@ -17186,6 +18048,10 @@ "IsDefault":{ "shape":"Boolean", "documentation":"

Whether this a default routing profile.

" + }, + "AssociatedQueueIds":{ + "shape":"AssociatedQueueIdList", + "documentation":"

The IDs of the associated queue.

" } }, "documentation":"

Contains information about a routing profile.

" @@ -17331,7 +18197,7 @@ }, "StringCondition":{ "shape":"StringCondition", - "documentation":"

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are name, description, and resourceID.

" + "documentation":"

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are associatedQueueIds, name, description, and resourceID.

" } }, "documentation":"

The search criteria to be used to return routing profiles.

The name and description fields support \"contains\" queries with a minimum of 2 characters and a maximum of 25 characters. Any queries with character lengths outside of this range will throw invalid results.

" @@ -17602,6 +18468,50 @@ "min":1, "pattern":"s3://\\S+/.+|https://\\\\S+\\\\.s3\\\\.\\\\S+\\\\.amazonaws\\\\.com/\\\\S+" }, + "SearchAgentStatusesRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

" + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

The maximum number of results to return per page.

", + "box":true + }, + "SearchFilter":{ + "shape":"AgentStatusSearchFilter", + "documentation":"

Filters to be applied to search results.

" + }, + "SearchCriteria":{ + "shape":"AgentStatusSearchCriteria", + "documentation":"

The search criteria to be used to return agent statuses.

" + } + } + }, + "SearchAgentStatusesResponse":{ + "type":"structure", + "members":{ + "AgentStatuses":{ + "shape":"AgentStatusList", + "documentation":"

The search criteria to be used to return agent statuses.

" + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + }, + "ApproximateTotalCount":{ + "shape":"ApproximateTotalCount", + "documentation":"

The total number of agent statuses which matched your search query.

" + } + } + }, "SearchAvailablePhoneNumbersRequest":{ "type":"structure", "required":[ @@ -17729,7 +18639,7 @@ "members":{ "ContactFlows":{ "shape":"ContactFlowSearchSummaryList", - "documentation":"

Information about the contact flows.

" + "documentation":"

Information about the flows.

" }, "NextToken":{ "shape":"NextToken2500", @@ -17861,7 +18771,7 @@ }, "SearchableContactAttributes":{ "shape":"SearchableContactAttributes", - "documentation":"

The search criteria based on user-defined contact attributes that have been configured for contact search. For more information, see Search by customer contact attributes in the Amazon Connect Administrator Guide.

To use SearchableContactAttributes in a search request, the GetContactAttributes action is required to perform an API request. For more information, see https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonconnect.html#amazonconnect-actions-as-permissionsActions defined by Amazon Connect.

" + "documentation":"

The search criteria based on user-defined contact attributes that have been configured for contact search. For more information, see Search by custom contact attributes in the Amazon Connect Administrator Guide.

To use SearchableContactAttributes in a search request, the GetContactAttributes action is required to perform an API request. For more information, see https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonconnect.html#amazonconnect-actions-as-permissionsActions defined by Amazon Connect.

" } }, "documentation":"

A structure of search criteria to be used to return contacts.

" @@ -18092,7 +19002,7 @@ }, "ResourceTypes":{ "shape":"ResourceTypeList", - "documentation":"

The list of resource types to be used to search tags from. If not provided or if any empty list is provided, this API will search from all supported resource types.

" + "documentation":"

The list of resource types to be used to search tags from. If not provided or if any empty list is provided, this API will search from all supported resource types.

Supported resource types

  • AGENT

  • ROUTING_PROFILE

  • STANDARD_QUEUE

  • SECURITY_PROFILE

  • OPERATING_HOURS

  • PROMPT

  • CONTACT_FLOW

  • FLOW_MODULE

" }, "NextToken":{ "shape":"NextToken2500", @@ -18221,6 +19131,50 @@ "max":100, "min":0 }, + "SearchUserHierarchyGroupsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

" + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

The maximum number of results to return per page.

", + "box":true + }, + "SearchFilter":{ + "shape":"UserHierarchyGroupSearchFilter", + "documentation":"

Filters to be applied to search results.

" + }, + "SearchCriteria":{ + "shape":"UserHierarchyGroupSearchCriteria", + "documentation":"

The search criteria to be used to return UserHierarchyGroups.

" + } + } + }, + "SearchUserHierarchyGroupsResponse":{ + "type":"structure", + "members":{ + "UserHierarchyGroups":{ + "shape":"UserHierarchyGroupList", + "documentation":"

Information about the userHierarchyGroups.

" + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + }, + "ApproximateTotalCount":{ + "shape":"ApproximateTotalCount", + "documentation":"

The total number of userHierarchyGroups which matched your search query.

" + } + } + }, "SearchUsersRequest":{ "type":"structure", "required":["InstanceId"], @@ -18762,7 +19716,7 @@ "documentation":"

An ascending or descending sort.

" } }, - "documentation":"

A structure that defineds the field name to sort by and a sort order.

" + "documentation":"

A structure that defines the field name to sort by and a sort order.

" }, "SortOrder":{ "type":"string", @@ -18818,7 +19772,7 @@ }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The unique identifier of the Connect instance.

", + "documentation":"

The unique identifier of the Amazon Connect instance.

", "location":"uri", "locationName":"InstanceId" }, @@ -19239,7 +20193,7 @@ }, "ContactFlowId":{ "shape":"ContactFlowId", - "documentation":"

The identifier of the flow for the call. To see the ContactFlowId in the Amazon Connect admin website, on the navigation menu go to Routing, Contact Flows. Choose the flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold:

arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx

" + "documentation":"

The identifier of the flow for the call. To see the ContactFlowId in the Amazon Connect admin website, on the navigation menu go to Routing, Flows. Choose the flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold:

arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx

" }, "InstanceId":{ "shape":"InstanceId", @@ -19293,6 +20247,28 @@ "AVG" ] }, + "Step":{ + "type":"structure", + "members":{ + "Expiry":{ + "shape":"Expiry", + "documentation":"

An object to specify the expiration of a routing step.

" + }, + "Expression":{ + "shape":"Expression", + "documentation":"

A tagged union to specify expression for a routing step.

" + }, + "Status":{ + "shape":"RoutingCriteriaStepStatus", + "documentation":"

Represents status of the Routing step.

" + } + }, + "documentation":"

Step signifies the criteria to be used for routing to an agent

" + }, + "Steps":{ + "type":"list", + "member":{"shape":"Step"} + }, "StopContactRecordingRequest":{ "type":"structure", "required":[ @@ -19412,7 +20388,7 @@ "documentation":"

The type of comparison to be made when evaluating the string condition.

" } }, - "documentation":"

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are name and description.

" + "documentation":"

A leaf node condition which can be used to specify a string condition.

" }, "StringReference":{ "type":"structure", @@ -19598,7 +20574,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + "pattern":"^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$" }, "TagKeyList":{ "type":"list", @@ -19698,6 +20674,10 @@ "type":"list", "member":{"shape":"TagSet"} }, + "TargetListType":{ + "type":"string", + "enum":["PROFICIENCIES"] + }, "TaskActionDefinition":{ "type":"structure", "required":[ @@ -19960,7 +20940,7 @@ "members":{ "Comparison":{ "shape":"ResourceArnOrId", - "documentation":"

The type of comparison. Only \"less than\" (LT) comparisons are supported.

" + "documentation":"

The type of comparison. Only \"less than\" (LT) and \"greater than\" (GT) comparisons are supported.

" }, "ThresholdValue":{ "shape":"ThresholdValue", @@ -20339,6 +21319,48 @@ } } }, + "UpdateAuthenticationProfileRequest":{ + "type":"structure", + "required":[ + "AuthenticationProfileId", + "InstanceId" + ], + "members":{ + "AuthenticationProfileId":{ + "shape":"AuthenticationProfileId", + "documentation":"

A unique identifier for the authentication profile.

", + "location":"uri", + "locationName":"AuthenticationProfileId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "Name":{ + "shape":"AuthenticationProfileName", + "documentation":"

The name for the authentication profile.

" + }, + "Description":{ + "shape":"AuthenticationProfileDescription", + "documentation":"

The description for the authentication profile.

" + }, + "AllowedIps":{ + "shape":"IpCidrList", + "documentation":"

A list of IP address range strings that are allowed to access the instance. For more information on how to configure IP addresses, seeConfigure session timeouts in the Amazon Connect Administrator Guide.

" + }, + "BlockedIps":{ + "shape":"IpCidrList", + "documentation":"

A list of IP address range strings that are blocked from accessing the instance. For more information on how to configure IP addresses, For more information on how to configure IP addresses, see Configure IP-based access control in the Amazon Connect Administrator Guide.

" + }, + "PeriodicSessionDuration":{ + "shape":"AccessTokenDuration", + "documentation":"

The short lived session duration configuration for users logged in to Amazon Connect, in minutes. This value determines the maximum possible time before an agent is authenticated. For more information, For more information on how to configure IP addresses, see Configure session timeouts in the Amazon Connect Administrator Guide.

", + "box":true + } + } + }, "UpdateCaseActionDefinition":{ "type":"structure", "required":["Fields"], @@ -20659,6 +21681,10 @@ "QueuePriority":{ "shape":"QueuePriority", "documentation":"

Priority of the contact in the queue. The default priority for new contacts is 5. You can raise the priority of a contact compared to other contacts in the queue by assigning them a higher priority, such as 1 or 2.

" + }, + "RoutingCriteria":{ + "shape":"RoutingCriteriaInput", + "documentation":"

Updates the routing criteria on the contact. These properties can be used to change how a
 contact is routed within the queue.

" } } }, @@ -22075,6 +23101,42 @@ "type":"list", "member":{"shape":"UserData"} }, + "UserHierarchyGroupList":{ + "type":"list", + "member":{"shape":"HierarchyGroup"} + }, + "UserHierarchyGroupSearchConditionList":{ + "type":"list", + "member":{"shape":"UserHierarchyGroupSearchCriteria"} + }, + "UserHierarchyGroupSearchCriteria":{ + "type":"structure", + "members":{ + "OrConditions":{ + "shape":"UserHierarchyGroupSearchConditionList", + "documentation":"

A list of conditions which would be applied together with an OR condition.

" + }, + "AndConditions":{ + "shape":"UserHierarchyGroupSearchConditionList", + "documentation":"

A list of conditions which would be applied together with an AND condition.

" + }, + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are name,

 parentId, levelId, and resourceID.

" + } + }, + "documentation":"

The search criteria to be used to return userHierarchyGroup.

" + }, + "UserHierarchyGroupSearchFilter":{ + "type":"structure", + "members":{ + "AttributeFilter":{ + "shape":"ControlPlaneAttributeFilter", + "documentation":"

An object that can be used to specify Tag conditions inside the SearchFilter. This accepts an OR or AND (List of List) input where:

  • The top level list specifies conditions that need to be applied with OR operator.

  • The inner list specifies conditions that need to be applied with AND operator.

" + } + }, + "documentation":"

Filters to be applied to search results.

" + }, "UserId":{"type":"string"}, "UserIdList":{ "type":"list", @@ -22252,6 +23314,10 @@ "shape":"StringCondition", "documentation":"

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are Username, FirstName, LastName, RoutingProfileId, SecurityProfileId, ResourceId.

" }, + "ListCondition":{ + "shape":"ListCondition", + "documentation":"

A leaf node condition which can be used to specify a List condition to search users with attributes included in Lists like Proficiencies.

" + }, "HierarchyGroupCondition":{ "shape":"HierarchyGroupCondition", "documentation":"

A leaf node condition which can be used to specify a hierarchy group condition.

" @@ -22789,5 +23855,5 @@ }, "timestamp":{"type":"timestamp"} }, - "documentation":"

Amazon Connect is a cloud-based contact center solution that you use to set up and manage a customer contact center and provide reliable customer engagement at any scale.

Amazon Connect provides metrics and real-time reporting that enable you to optimize contact routing. You can also resolve customer issues more efficiently by getting customers in touch with the appropriate agents.

There are limits to the number of Amazon Connect resources that you can create. There are also limits to the number of requests that you can make per second. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide.

You can connect programmatically to an Amazon Web Services service by using an endpoint. For a list of Amazon Connect endpoints, see Amazon Connect Endpoints.

" + "documentation":"

Amazon Connect is a cloud-based contact center solution that you use to set up and manage a customer contact center and provide reliable customer engagement at any scale.

Amazon Connect provides metrics and real-time reporting that enable you to optimize contact routing. You can also resolve customer issues more efficiently by getting customers in touch with the appropriate agents.

There are limits to the number of Amazon Connect resources that you can create. There are also limits to the number of requests that you can make per second. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide.

You can connect programmatically to an Amazon Web Services service by using an endpoint. For a list of Amazon Connect endpoints, see Amazon Connect Endpoints.

" } diff --git a/botocore/data/controlcatalog/2018-05-10/paginators-1.json b/botocore/data/controlcatalog/2018-05-10/paginators-1.json index d716b3dcef..1926666e8c 100644 --- a/botocore/data/controlcatalog/2018-05-10/paginators-1.json +++ b/botocore/data/controlcatalog/2018-05-10/paginators-1.json @@ -17,6 +17,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Objectives" + }, + "ListControls": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Controls" } } } diff --git a/botocore/data/controlcatalog/2018-05-10/service-2.json b/botocore/data/controlcatalog/2018-05-10/service-2.json index 9c1a84c3ee..5dc6c918de 100644 --- a/botocore/data/controlcatalog/2018-05-10/service-2.json +++ b/botocore/data/controlcatalog/2018-05-10/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2018-05-10", + "auth":["aws.auth#sigv4"], "endpointPrefix":"controlcatalog", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Control Catalog", "serviceId":"ControlCatalog", "signatureVersion":"v4", @@ -12,6 +13,24 @@ "uid":"controlcatalog-2018-05-10" }, "operations":{ + "GetControl":{ + "name":"GetControl", + "http":{ + "method":"POST", + "requestUri":"/get-control", + "responseCode":200 + }, + "input":{"shape":"GetControlRequest"}, + "output":{"shape":"GetControlResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns details about a specific control, most notably a list of Amazon Web Services Regions where this control is supported. Input a value for the ControlArn parameter, in ARN form. GetControl accepts controltower or controlcatalog control ARNs as input. Returns a controlcatalog ARN format.

In the API response, controls that have the value GLOBAL in the Scope field do not show the DeployableRegions field, because it does not apply. Controls that have the value REGIONAL in the Scope field return a value for the DeployableRegions field, as shown in the example.

" + }, "ListCommonControls":{ "name":"ListCommonControls", "http":{ @@ -29,6 +48,23 @@ ], "documentation":"

Returns a paginated list of common controls from the Amazon Web Services Control Catalog.

You can apply an optional filter to see common controls that have a specific objective. If you don’t provide a filter, the operation returns all common controls.

" }, + "ListControls":{ + "name":"ListControls", + "http":{ + "method":"POST", + "requestUri":"/list-controls", + "responseCode":200 + }, + "input":{"shape":"ListControlsRequest"}, + "output":{"shape":"ListControlsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a paginated list of all available controls in the Amazon Web Services Control Catalog library. Allows you to discover available controls. The list of controls is given as structures of type controlSummary. The ARN is returned in the global controlcatalog format, as shown in the examples.

" + }, "ListDomains":{ "name":"ListDomains", "http":{ @@ -109,7 +145,7 @@ "type":"string", "max":2048, "min":41, - "pattern":"^arn:(aws(?:[-a-z]*)?):controlcatalog:::common-control/[0-9a-z]+$" + "pattern":"arn:(aws(?:[-a-z]*)?):controlcatalog:::common-control/[0-9a-z]+" }, "CommonControlFilter":{ "type":"structure", @@ -125,21 +161,21 @@ "type":"structure", "required":[ "Arn", - "CreateTime", + "Name", "Description", "Domain", - "LastUpdateTime", - "Name", - "Objective" + "Objective", + "CreateTime", + "LastUpdateTime" ], "members":{ "Arn":{ "shape":"CommonControlArn", "documentation":"

The Amazon Resource Name (ARN) that identifies the common control.

" }, - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The time when the common control was created.

" + "Name":{ + "shape":"String", + "documentation":"

The name of the common control.

" }, "Description":{ "shape":"String", @@ -149,30 +185,82 @@ "shape":"AssociatedDomainSummary", "documentation":"

The domain that the common control belongs to.

" }, + "Objective":{ + "shape":"AssociatedObjectiveSummary", + "documentation":"

The objective that the common control belongs to.

" + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The time when the common control was created.

" + }, "LastUpdateTime":{ "shape":"Timestamp", "documentation":"

The time when the common control was most recently updated.

" + } + }, + "documentation":"

A summary of metadata for a common control.

" + }, + "CommonControlSummaryList":{ + "type":"list", + "member":{"shape":"CommonControlSummary"} + }, + "ControlArn":{ + "type":"string", + "max":2048, + "min":34, + "pattern":"arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_\\-]+" + }, + "ControlBehavior":{ + "type":"string", + "enum":[ + "PREVENTIVE", + "PROACTIVE", + "DETECTIVE" + ] + }, + "ControlScope":{ + "type":"string", + "enum":[ + "GLOBAL", + "REGIONAL" + ] + }, + "ControlSummary":{ + "type":"structure", + "required":[ + "Arn", + "Name", + "Description" + ], + "members":{ + "Arn":{ + "shape":"ControlArn", + "documentation":"

The Amazon Resource Name (ARN) of the control.

" }, "Name":{ "shape":"String", - "documentation":"

The name of the common control.

" + "documentation":"

The display name of the control.

" }, - "Objective":{ - "shape":"AssociatedObjectiveSummary", - "documentation":"

The objective that the common control belongs to.

" + "Description":{ + "shape":"String", + "documentation":"

A description of the control, as it may appear in the console. Describes the functionality of the control.

" } }, - "documentation":"

A summary of metadata for a common control.

" + "documentation":"

Overview of information about a control.

" }, - "CommonControlSummaryList":{ + "Controls":{ "type":"list", - "member":{"shape":"CommonControlSummary"} + "member":{"shape":"ControlSummary"} + }, + "DeployableRegions":{ + "type":"list", + "member":{"shape":"RegionCode"} }, "DomainArn":{ "type":"string", "max":2048, "min":33, - "pattern":"^arn:(aws(?:[-a-z]*)?):controlcatalog:::domain/[0-9a-z]+$" + "pattern":"arn:(aws(?:[-a-z]*)?):controlcatalog:::domain/[0-9a-z]+" }, "DomainResourceFilter":{ "type":"structure", @@ -192,31 +280,31 @@ "type":"structure", "required":[ "Arn", - "CreateTime", + "Name", "Description", - "LastUpdateTime", - "Name" + "CreateTime", + "LastUpdateTime" ], "members":{ "Arn":{ "shape":"DomainArn", "documentation":"

The Amazon Resource Name (ARN) that identifies the domain.

" }, - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The time when the domain was created.

" + "Name":{ + "shape":"String", + "documentation":"

The name of the domain.

" }, "Description":{ "shape":"String", "documentation":"

The description of the domain.

" }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The time when the domain was created.

" + }, "LastUpdateTime":{ "shape":"Timestamp", "documentation":"

The time when the domain was most recently updated.

" - }, - "Name":{ - "shape":"String", - "documentation":"

The name of the domain.

" } }, "documentation":"

A summary of metadata for a domain.

" @@ -225,6 +313,45 @@ "type":"list", "member":{"shape":"DomainSummary"} }, + "GetControlRequest":{ + "type":"structure", + "required":["ControlArn"], + "members":{ + "ControlArn":{ + "shape":"ControlArn", + "documentation":"

The Amazon Resource Name (ARN) of the control. It has one of the following formats:

Global format

arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID}

Or Regional format

arn:{PARTITION}:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID}

Here is a more general pattern that covers Amazon Web Services Control Tower and Control Catalog ARNs:

^arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_\\\\-]+$

" + } + } + }, + "GetControlResponse":{ + "type":"structure", + "required":[ + "Arn", + "Name", + "Description", + "Behavior", + "RegionConfiguration" + ], + "members":{ + "Arn":{ + "shape":"ControlArn", + "documentation":"

The Amazon Resource Name (ARN) of the control.

" + }, + "Name":{ + "shape":"String", + "documentation":"

The display name of the control.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description of what the control does.

" + }, + "Behavior":{ + "shape":"ControlBehavior", + "documentation":"

A term that identifies the control's functional behavior. One of Preventive, Deteictive, Proactive

" + }, + "RegionConfiguration":{"shape":"RegionConfiguration"} + } + }, "InternalServerException":{ "type":"structure", "members":{ @@ -239,10 +366,6 @@ "ListCommonControlsRequest":{ "type":"structure", "members":{ - "CommonControlFilter":{ - "shape":"CommonControlFilter", - "documentation":"

An optional filter that narrows the results to a specific objective.

This filter allows you to specify one objective ARN at a time. Passing multiple ARNs in the CommonControlFilter isn’t currently supported.

" - }, "MaxResults":{ "shape":"MaxListCommonControlsResults", "documentation":"

The maximum number of results on a page or for an API request call.

", @@ -254,6 +377,10 @@ "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" + }, + "CommonControlFilter":{ + "shape":"CommonControlFilter", + "documentation":"

An optional filter that narrows the results to a specific objective.

This filter allows you to specify one objective ARN at a time. Passing multiple ARNs in the CommonControlFilter isn’t currently supported.

" } } }, @@ -271,6 +398,37 @@ } } }, + "ListControlsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

The pagination token that's used to fetch the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxListControlsResults", + "documentation":"

The maximum number of results on a page or for an API request call.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListControlsResponse":{ + "type":"structure", + "required":["Controls"], + "members":{ + "Controls":{ + "shape":"Controls", + "documentation":"

Returns a list of controls, given as structures of type controlSummary.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

The pagination token that's used to fetch the next set of results.

" + } + } + }, "ListDomainsRequest":{ "type":"structure", "members":{ @@ -327,13 +485,13 @@ "type":"structure", "required":["Objectives"], "members":{ - "NextToken":{ - "shape":"PaginationToken", - "documentation":"

The pagination token that's used to fetch the next set of results.

" - }, "Objectives":{ "shape":"ObjectiveSummaryList", "documentation":"

The list of objectives that the ListObjectives API returns.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

The pagination token that's used to fetch the next set of results.

" } } }, @@ -343,6 +501,12 @@ "max":100, "min":1 }, + "MaxListControlsResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, "MaxListDomainsResults":{ "type":"integer", "box":true, @@ -359,7 +523,7 @@ "type":"string", "max":2048, "min":36, - "pattern":"^arn:(aws(?:[-a-z]*)?):controlcatalog:::objective/[0-9a-z]+$" + "pattern":"arn:(aws(?:[-a-z]*)?):controlcatalog:::objective/[0-9a-z]+" }, "ObjectiveFilter":{ "type":"structure", @@ -389,20 +553,20 @@ "type":"structure", "required":[ "Arn", - "CreateTime", + "Name", "Description", "Domain", - "LastUpdateTime", - "Name" + "CreateTime", + "LastUpdateTime" ], "members":{ "Arn":{ "shape":"ObjectiveArn", "documentation":"

The Amazon Resource Name (ARN) that identifies the objective.

" }, - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The time when the objective was created.

" + "Name":{ + "shape":"String", + "documentation":"

The name of the objective.

" }, "Description":{ "shape":"String", @@ -412,13 +576,13 @@ "shape":"AssociatedDomainSummary", "documentation":"

The domain that the objective belongs to.

" }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The time when the objective was created.

" + }, "LastUpdateTime":{ "shape":"Timestamp", "documentation":"

The time when the objective was most recently updated.

" - }, - "Name":{ - "shape":"String", - "documentation":"

The name of the objective.

" } }, "documentation":"

A summary of metadata for an objective.

" @@ -432,6 +596,37 @@ "max":1024, "min":0 }, + "RegionCode":{ + "type":"string", + "pattern":"[a-zA-Z0-9-]{1,128}" + }, + "RegionConfiguration":{ + "type":"structure", + "required":["Scope"], + "members":{ + "Scope":{ + "shape":"ControlScope", + "documentation":"

The coverage of the control, if deployed. Scope is an enumerated type, with value Regional, or Global. A control with Global scope is effective in all Amazon Web Services Regions, regardless of the Region from which it is enabled, or to which it is deployed. A control implemented by an SCP is usually Global in scope. A control with Regional scope has operations that are restricted specifically to the Region from which it is enabled and to which it is deployed. Controls implemented by Config rules and CloudFormation hooks usually are Regional in scope. Security Hub controls usually are Regional in scope.

" + }, + "DeployableRegions":{ + "shape":"DeployableRegions", + "documentation":"

Regions in which the control is available to be deployed.

" + } + }, + "documentation":"

Returns information about the control, including the scope of the control, if enabled, and the Regions in which the control currently is available for deployment.

If you are applying controls through an Amazon Web Services Control Tower landing zone environment, remember that the values returned in the RegionConfiguration API operation are not related to the governed Regions in your landing zone. For example, if you are governing Regions A,B,and C while the control is available in Regions A, B, C, and D, you'd see a response with DeployableRegions of A, B, C, and D for a control with REGIONAL scope, even though you may not intend to deploy the control in Region D, because you do not govern it through your landing zone.

" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The requested resource does not exist.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, "String":{"type":"string"}, "ThrottlingException":{ "type":"structure", diff --git a/botocore/data/controlcatalog/2018-05-10/waiters-2.json b/botocore/data/controlcatalog/2018-05-10/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/controlcatalog/2018-05-10/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/controltower/2018-05-10/paginators-1.json b/botocore/data/controltower/2018-05-10/paginators-1.json index 58e933d3ce..e34843be35 100644 --- a/botocore/data/controltower/2018-05-10/paginators-1.json +++ b/botocore/data/controltower/2018-05-10/paginators-1.json @@ -23,6 +23,18 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "enabledBaselines" + }, + "ListControlOperations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "controlOperations" + }, + "ListLandingZoneOperations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "landingZoneOperations" } } } diff --git a/botocore/data/controltower/2018-05-10/service-2.json b/botocore/data/controltower/2018-05-10/service-2.json index bd8367787d..c3785ddbd1 100644 --- a/botocore/data/controltower/2018-05-10/service-2.json +++ b/botocore/data/controltower/2018-05-10/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"controltower", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Control Tower", "serviceId":"ControlTower", "signatureVersion":"v4", "signingName":"controltower", - "uid":"controltower-2018-05-10" + "uid":"controltower-2018-05-10", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateLandingZone":{ @@ -68,7 +70,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disable an EnabledBaseline resource on the specified Target. This API starts an asynchronous operation to remove all resources deployed as part of the baseline enablement. The resource will vary depending on the enabled baseline.

", + "documentation":"

Disable an EnabledBaseline resource on the specified Target. This API starts an asynchronous operation to remove all resources deployed as part of the baseline enablement. The resource will vary depending on the enabled baseline. For usage examples, see the Amazon Web Services Control Tower User Guide .

", "idempotent":true }, "DisableControl":{ @@ -89,7 +91,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

This API call turns off a control. It starts an asynchronous operation that deletes AWS resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. For usage examples, see the Amazon Web Services Control Tower User Guide .

" + "documentation":"

This API call turns off a control. It starts an asynchronous operation that deletes Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. For usage examples, see the Controls Reference Guide .

" }, "EnableBaseline":{ "name":"EnableBaseline", @@ -109,7 +111,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Enable (apply) a Baseline to a Target. This API starts an asynchronous operation to deploy resources specified by the Baseline to the specified Target.

" + "documentation":"

Enable (apply) a Baseline to a Target. This API starts an asynchronous operation to deploy resources specified by the Baseline to the specified Target. For usage examples, see the Amazon Web Services Control Tower User Guide .

" }, "EnableControl":{ "name":"EnableControl", @@ -129,7 +131,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage examples, see the Amazon Web Services Control Tower User Guide .

" + "documentation":"

This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage examples, see the Controls Reference Guide .

" }, "GetBaseline":{ "name":"GetBaseline", @@ -147,7 +149,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieve details about an existing Baseline resource by specifying its identifier.

" + "documentation":"

Retrieve details about an existing Baseline resource by specifying its identifier. For usage examples, see the Amazon Web Services Control Tower User Guide .

" }, "GetBaselineOperation":{ "name":"GetBaselineOperation", @@ -165,7 +167,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns the details of an asynchronous baseline operation, as initiated by any of these APIs: EnableBaseline, DisableBaseline, UpdateEnabledBaseline, ResetEnabledBaseline. A status message is displayed in case of operation failure.

" + "documentation":"

Returns the details of an asynchronous baseline operation, as initiated by any of these APIs: EnableBaseline, DisableBaseline, UpdateEnabledBaseline, ResetEnabledBaseline. A status message is displayed in case of operation failure. For usage examples, see the Amazon Web Services Control Tower User Guide .

" }, "GetControlOperation":{ "name":"GetControlOperation", @@ -183,7 +185,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage examples, see the Amazon Web Services Control Tower User Guide .

" + "documentation":"

Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage examples, see the Controls Reference Guide .

" }, "GetEnabledBaseline":{ "name":"GetEnabledBaseline", @@ -219,7 +221,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves details about an enabled control. For usage examples, see the Amazon Web Services Control Tower User Guide .

" + "documentation":"

Retrieves details about an enabled control. For usage examples, see the Controls Reference Guide .

" }, "GetLandingZone":{ "name":"GetLandingZone", @@ -255,7 +257,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns the status of the specified landing zone operation. Details for an operation are available for 60 days.

" + "documentation":"

Returns the status of the specified landing zone operation. Details for an operation are available for 90 days.

" }, "ListBaselines":{ "name":"ListBaselines", @@ -272,7 +274,24 @@ {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns a summary list of all available baselines.

" + "documentation":"

Returns a summary list of all available baselines. For usage examples, see the Amazon Web Services Control Tower User Guide .

" + }, + "ListControlOperations":{ + "name":"ListControlOperations", + "http":{ + "method":"POST", + "requestUri":"/list-control-operations", + "responseCode":200 + }, + "input":{"shape":"ListControlOperationsInput"}, + "output":{"shape":"ListControlOperationsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Provides a list of operations in progress or queued. For usage examples, see ListControlOperation examples.

" }, "ListEnabledBaselines":{ "name":"ListEnabledBaselines", @@ -289,7 +308,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns a list of summaries describing EnabledBaseline resources. You can filter the list by the corresponding Baseline or Target of the EnabledBaseline resources.

" + "documentation":"

Returns a list of summaries describing EnabledBaseline resources. You can filter the list by the corresponding Baseline or Target of the EnabledBaseline resources. For usage examples, see the Amazon Web Services Control Tower User Guide .

" }, "ListEnabledControls":{ "name":"ListEnabledControls", @@ -307,7 +326,24 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the controls enabled by Amazon Web Services Control Tower on the specified organizational unit and the accounts it contains. For usage examples, see the Amazon Web Services Control Tower User Guide .

" + "documentation":"

Lists the controls enabled by Amazon Web Services Control Tower on the specified organizational unit and the accounts it contains. For usage examples, see the Controls Reference Guide .

" + }, + "ListLandingZoneOperations":{ + "name":"ListLandingZoneOperations", + "http":{ + "method":"POST", + "requestUri":"/list-landingzone-operations", + "responseCode":200 + }, + "input":{"shape":"ListLandingZoneOperationsInput"}, + "output":{"shape":"ListLandingZoneOperationsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Lists all landing zone operations from the past 90 days. Results are sorted by time, with the most recent operation first.

" }, "ListLandingZones":{ "name":"ListLandingZones", @@ -340,7 +376,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns a list of tags associated with the resource. For usage examples, see the Amazon Web Services Control Tower User Guide .

" + "documentation":"

Returns a list of tags associated with the resource. For usage examples, see the Controls Reference Guide .

" }, "ResetEnabledBaseline":{ "name":"ResetEnabledBaseline", @@ -360,7 +396,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Re-enables an EnabledBaseline resource. For example, this API can re-apply the existing Baseline after a new member account is moved to the target OU.

" + "documentation":"

Re-enables an EnabledBaseline resource. For example, this API can re-apply the existing Baseline after a new member account is moved to the target OU. For usage examples, see the Amazon Web Services Control Tower User Guide .

" }, "ResetLandingZone":{ "name":"ResetLandingZone", @@ -379,7 +415,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

This API call resets a landing zone. It starts an asynchronous operation that resets the landing zone to the parameters specified in its original configuration.

" + "documentation":"

This API call resets a landing zone. It starts an asynchronous operation that resets the landing zone to the parameters specified in the original configuration, which you specified in the manifest file. Nothing in the manifest file's original landing zone configuration is changed during the reset process, by default. This API is not the same as a rollback of a landing zone version, which is not a supported operation.

" }, "TagResource":{ "name":"TagResource", @@ -395,7 +431,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Applies tags to a resource. For usage examples, see the Amazon Web Services Control Tower User Guide .

" + "documentation":"

Applies tags to a resource. For usage examples, see the Controls Reference Guide .

" }, "UntagResource":{ "name":"UntagResource", @@ -411,7 +447,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes tags from a resource. For usage examples, see the Amazon Web Services Control Tower User Guide .

" + "documentation":"

Removes tags from a resource. For usage examples, see the Controls Reference Guide .

" }, "UpdateEnabledBaseline":{ "name":"UpdateEnabledBaseline", @@ -431,7 +467,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Updates an EnabledBaseline resource's applied parameters or version.

" + "documentation":"

Updates an EnabledBaseline resource's applied parameters or version. For usage examples, see the Amazon Web Services Control Tower User Guide .

" }, "UpdateEnabledControl":{ "name":"UpdateEnabledControl", @@ -451,7 +487,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Updates the configuration of an already enabled control.

If the enabled control shows an EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request.

If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services Control Tower will update the control to match any valid parameters that you supply.

If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling DisableControl and again calling EnableControl, or you can run an extending governance operation. For usage examples, see the Amazon Web Services Control Tower User Guide

" + "documentation":"

Updates the configuration of an already enabled control.

If the enabled control shows an EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request.

If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services Control Tower updates the control to match any valid parameters that you supply.

If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling DisableControl and again calling EnableControl, or you can run an extending governance operation. For usage examples, see the Controls Reference Guide .

" }, "UpdateLandingZone":{ "name":"UpdateLandingZone", @@ -595,13 +631,31 @@ "min":20, "pattern":"^arn:aws[0-9a-zA-Z_\\-:\\/]+$" }, + "ControlIdentifiers":{ + "type":"list", + "member":{"shape":"ControlIdentifier"}, + "max":1, + "min":1 + }, "ControlOperation":{ "type":"structure", "members":{ + "controlIdentifier":{ + "shape":"ControlIdentifier", + "documentation":"

The controlIdentifier of the control for the operation.

" + }, + "enabledControlIdentifier":{ + "shape":"Arn", + "documentation":"

The controlIdentifier of the enabled control.

" + }, "endTime":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The time that the operation finished.

" }, + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

The identifier of the specified operation.

" + }, "operationType":{ "shape":"ControlOperationType", "documentation":"

One of ENABLE_CONTROL or DISABLE_CONTROL.

" @@ -617,10 +671,40 @@ "statusMessage":{ "shape":"String", "documentation":"

If the operation result is FAILED, this string contains a message explaining why the operation failed.

" + }, + "targetIdentifier":{ + "shape":"TargetIdentifier", + "documentation":"

The target upon which the control operation is working.

" } }, "documentation":"

An operation performed by the control.

" }, + "ControlOperationFilter":{ + "type":"structure", + "members":{ + "controlIdentifiers":{ + "shape":"ControlIdentifiers", + "documentation":"

The set of controlIdentifier returned by the filter.

" + }, + "controlOperationTypes":{ + "shape":"ControlOperationTypes", + "documentation":"

The set of ControlOperation objects returned by the filter.

" + }, + "enabledControlIdentifiers":{ + "shape":"EnabledControlIdentifiers", + "documentation":"

The set controlIdentifier of enabled controls selected by the filter.

" + }, + "statuses":{ + "shape":"ControlOperationStatuses", + "documentation":"

Lists the status of control operations.

" + }, + "targetIdentifiers":{ + "shape":"TargetIdentifiers", + "documentation":"

The set of targetIdentifier objects returned by the filter.

" + } + }, + "documentation":"

A filter object that lets you call ListControlOperations with a specific filter.

" + }, "ControlOperationStatus":{ "type":"string", "enum":[ @@ -629,6 +713,54 @@ "IN_PROGRESS" ] }, + "ControlOperationStatuses":{ + "type":"list", + "member":{"shape":"ControlOperationStatus"}, + "max":1, + "min":1 + }, + "ControlOperationSummary":{ + "type":"structure", + "members":{ + "controlIdentifier":{ + "shape":"ControlIdentifier", + "documentation":"

The controlIdentifier of a control.

" + }, + "enabledControlIdentifier":{ + "shape":"Arn", + "documentation":"

The controlIdentifier of an enabled control.

" + }, + "endTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The time at which the control operation was completed.

" + }, + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

The unique identifier of a control operation.

" + }, + "operationType":{ + "shape":"ControlOperationType", + "documentation":"

The type of operation.

" + }, + "startTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The time at which a control operation began.

" + }, + "status":{ + "shape":"ControlOperationStatus", + "documentation":"

The status of the specified control operation.

" + }, + "statusMessage":{ + "shape":"String", + "documentation":"

A speficic message displayed as part of the control status.

" + }, + "targetIdentifier":{ + "shape":"TargetIdentifier", + "documentation":"

The unique identifier of the target of a control operation.

" + } + }, + "documentation":"

A summary of information about the specified control operation.

" + }, "ControlOperationType":{ "type":"string", "enum":[ @@ -637,6 +769,16 @@ "UPDATE_ENABLED_CONTROL" ] }, + "ControlOperationTypes":{ + "type":"list", + "member":{"shape":"ControlOperationType"}, + "max":1, + "min":1 + }, + "ControlOperations":{ + "type":"list", + "member":{"shape":"ControlOperationSummary"} + }, "CreateLandingZoneInput":{ "type":"structure", "required":[ @@ -646,7 +788,7 @@ "members":{ "manifest":{ "shape":"Manifest", - "documentation":"

The manifest.yaml file is a text file that describes your Amazon Web Services resources. For examples, review The manifest file.

" + "documentation":"

The manifest JSON file is a text file that describes your Amazon Web Services resources. For examples, review Launch your landing zone.

" }, "tags":{ "shape":"TagMap", @@ -767,6 +909,12 @@ }, "documentation":"

The drift summary of the enabled control.

Amazon Web Services Control Tower expects the enabled control configuration to include all supported and governed Regions. If the enabled control differs from the expected configuration, it is defined to be in a state of drift. You can repair this drift by resetting the enabled control.

" }, + "DriftStatuses":{ + "type":"list", + "member":{"shape":"DriftStatus"}, + "max":1, + "min":1 + }, "EnableBaselineInput":{ "type":"structure", "required":[ @@ -1029,6 +1177,30 @@ }, "documentation":"

Information about the enabled control.

" }, + "EnabledControlFilter":{ + "type":"structure", + "members":{ + "controlIdentifiers":{ + "shape":"ControlIdentifiers", + "documentation":"

The set of controlIdentifier returned by the filter.

" + }, + "driftStatuses":{ + "shape":"DriftStatuses", + "documentation":"

A list of DriftStatus items.

" + }, + "statuses":{ + "shape":"EnablementStatuses", + "documentation":"

A list of EnablementStatus items.

" + } + }, + "documentation":"

A structure that returns a set of control identifiers, the control status for each control in the set, and the drift status for each control in the set.

" + }, + "EnabledControlIdentifiers":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":1, + "min":1 + }, "EnabledControlParameter":{ "type":"structure", "required":[ @@ -1116,14 +1288,20 @@ "members":{ "lastOperationIdentifier":{ "shape":"OperationIdentifier", - "documentation":"

The last operation identifier for the enabled control.

" + "documentation":"

The last operation identifier for the enabled resource.

" }, "status":{ "shape":"EnablementStatus", - "documentation":"

The deployment status of the enabled control.

Valid values:

  • SUCCEEDED: The enabledControl configuration was deployed successfully.

  • UNDER_CHANGE: The enabledControl configuration is changing.

  • FAILED: The enabledControl configuration failed to deploy.

" + "documentation":"

The deployment status of the enabled resource.

Valid values:

  • SUCCEEDED: The EnabledControl or EnabledBaseline configuration was deployed successfully.

  • UNDER_CHANGE: The EnabledControl or EnabledBaseline configuration is changing.

  • FAILED: The EnabledControl or EnabledBaseline configuration failed to deploy.

" } }, - "documentation":"

The deployment summary of the enabled control.

" + "documentation":"

The deployment summary of an EnabledControl or EnabledBaseline resource.

" + }, + "EnablementStatuses":{ + "type":"list", + "member":{"shape":"EnablementStatus"}, + "max":1, + "min":1 }, "GetBaselineInput":{ "type":"structure", @@ -1312,7 +1490,7 @@ }, "manifest":{ "shape":"Manifest", - "documentation":"

The landing zone manifest.yaml text file that specifies the landing zone configurations.

" + "documentation":"

The landing zone manifest JSON text file that specifies the landing zone configurations.

" }, "status":{ "shape":"LandingZoneStatus", @@ -1349,6 +1527,10 @@ "shape":"Timestamp", "documentation":"

The landing zone operation end time.

" }, + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

The operationIdentifier of the landing zone operation.

" + }, "operationType":{ "shape":"LandingZoneOperationType", "documentation":"

The landing zone operation type.

Valid values:

  • DELETE: The DeleteLandingZone operation.

  • CREATE: The CreateLandingZone operation.

  • UPDATE: The UpdateLandingZone operation.

  • RESET: The ResetLandingZone operation.

" @@ -1368,6 +1550,20 @@ }, "documentation":"

Information about a landing zone operation.

" }, + "LandingZoneOperationFilter":{ + "type":"structure", + "members":{ + "statuses":{ + "shape":"LandingZoneOperationStatuses", + "documentation":"

The statuses of the set of landing zone operations selected by the filter.

" + }, + "types":{ + "shape":"LandingZoneOperationTypes", + "documentation":"

The set of landing zone operation types selected by the filter.

" + } + }, + "documentation":"

A filter object that lets you call ListLandingZoneOperations with a specific filter.

" + }, "LandingZoneOperationStatus":{ "type":"string", "enum":[ @@ -1376,6 +1572,30 @@ "IN_PROGRESS" ] }, + "LandingZoneOperationStatuses":{ + "type":"list", + "member":{"shape":"LandingZoneOperationStatus"}, + "max":1, + "min":1 + }, + "LandingZoneOperationSummary":{ + "type":"structure", + "members":{ + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

The operationIdentifier of the landing zone operation.

" + }, + "operationType":{ + "shape":"LandingZoneOperationType", + "documentation":"

The type of the landing zone operation.

" + }, + "status":{ + "shape":"LandingZoneOperationStatus", + "documentation":"

The status of the landing zone operation.

" + } + }, + "documentation":"

Returns a summary of information about a landing zone operation.

" + }, "LandingZoneOperationType":{ "type":"string", "enum":[ @@ -1385,6 +1605,16 @@ "RESET" ] }, + "LandingZoneOperationTypes":{ + "type":"list", + "member":{"shape":"LandingZoneOperationType"}, + "max":1, + "min":1 + }, + "LandingZoneOperations":{ + "type":"list", + "member":{"shape":"LandingZoneOperationSummary"} + }, "LandingZoneStatus":{ "type":"string", "enum":[ @@ -1442,6 +1672,47 @@ } } }, + "ListControlOperationsInput":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"ControlOperationFilter", + "documentation":"

An input filter for the ListControlOperations API that lets you select the types of control operations to view.

" + }, + "maxResults":{ + "shape":"ListControlOperationsMaxResults", + "documentation":"

The maximum number of results to be shown.

" + }, + "nextToken":{ + "shape":"ListControlOperationsNextToken", + "documentation":"

A pagination token.

" + } + } + }, + "ListControlOperationsMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListControlOperationsNextToken":{ + "type":"string", + "pattern":"\\S+" + }, + "ListControlOperationsOutput":{ + "type":"structure", + "required":["controlOperations"], + "members":{ + "controlOperations":{ + "shape":"ControlOperations", + "documentation":"

Returns a list of output from control operations.

" + }, + "nextToken":{ + "shape":"ListControlOperationsNextToken", + "documentation":"

A pagination token.

" + } + } + }, "ListEnabledBaselinesInput":{ "type":"structure", "members":{ @@ -1485,8 +1756,11 @@ }, "ListEnabledControlsInput":{ "type":"structure", - "required":["targetIdentifier"], "members":{ + "filter":{ + "shape":"EnabledControlFilter", + "documentation":"

An input filter for the ListEnabledControls API that lets you select the types of control operations to view.

" + }, "maxResults":{ "shape":"MaxResults", "documentation":"

How many results to return per API call.

" @@ -1515,6 +1789,43 @@ } } }, + "ListLandingZoneOperationsInput":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"LandingZoneOperationFilter", + "documentation":"

An input filter for the ListLandingZoneOperations API that lets you select the types of landing zone operations to view.

" + }, + "maxResults":{ + "shape":"ListLandingZoneOperationsMaxResults", + "documentation":"

How many results to return per API call.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The token to continue the list from a previous API call with the same parameters.

" + } + } + }, + "ListLandingZoneOperationsMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListLandingZoneOperationsOutput":{ + "type":"structure", + "required":["landingZoneOperations"], + "members":{ + "landingZoneOperations":{ + "shape":"LandingZoneOperations", + "documentation":"

Lists landing zone operations.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Retrieves the next page of results. If the string is empty, the response is the end of the results.

" + } + } + }, "ListLandingZonesInput":{ "type":"structure", "members":{ @@ -1733,6 +2044,12 @@ "min":20, "pattern":"^arn:aws[0-9a-zA-Z_\\-:\\/]+$" }, + "TargetIdentifiers":{ + "type":"list", + "member":{"shape":"TargetIdentifier"}, + "max":1, + "min":1 + }, "TargetRegions":{ "type":"list", "member":{"shape":"Region"} @@ -1867,7 +2184,7 @@ }, "manifest":{ "shape":"Manifest", - "documentation":"

The manifest.yaml file is a text file that describes your Amazon Web Services resources. For examples, review The manifest file.

" + "documentation":"

The manifest file (JSON) is a text file that describes your Amazon Web Services resources. For an example, review Launch your landing zone. The example manifest file contains each of the available parameters. The schema for the landing zone's JSON manifest file is not published, by design.

" }, "version":{ "shape":"LandingZoneVersion", @@ -1899,5 +2216,5 @@ "exception":true } }, - "documentation":"

These interfaces allow you to apply the Amazon Web Services library of pre-defined controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms \"control\" and \"guardrail\" are synonyms.

To call these APIs, you'll need to know:

  • the controlIdentifier for the control--or guardrail--you are targeting.

  • the ARN associated with the target organizational unit (OU), which we call the targetIdentifier.

  • the ARN associated with a resource that you wish to tag or untag.

To get the controlIdentifier for your Amazon Web Services Control Tower control:

The controlIdentifier is an ARN that is specified for each control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation.

The controlIdentifier is unique in each Amazon Web Services Region for each control. You can find the controlIdentifier for each Region and control in the Tables of control metadata in the Amazon Web Services Control Tower User Guide.

A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and Elective controls is given in Resource identifiers for APIs and controls in the Controls reference guide section of the Amazon Web Services Control Tower User Guide. Remember that Mandatory controls cannot be added or removed.

ARN format: arn:aws:controltower:{REGION}::control/{CONTROL_NAME}

Example:

arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED

To get the targetIdentifier:

The targetIdentifier is the ARN for an OU.

In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU.

OU ARN format:

arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}

Details and examples

To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower

Recording API Requests

Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made the request and when, and so on. For more about Amazon Web Services Control Tower and its support for CloudTrail, see Logging Amazon Web Services Control Tower Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User Guide.

" + "documentation":"

Amazon Web Services Control Tower offers application programming interface (API) operations that support programmatic interaction with these types of resources:

For more information about these types of resources, see the Amazon Web Services Control Tower User Guide .

About control APIs

These interfaces allow you to apply the Amazon Web Services library of pre-defined controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms \"control\" and \"guardrail\" are synonyms.

To call these APIs, you'll need to know:

  • the controlIdentifier for the control--or guardrail--you are targeting.

  • the ARN associated with the target organizational unit (OU), which we call the targetIdentifier.

  • the ARN associated with a resource that you wish to tag or untag.

To get the controlIdentifier for your Amazon Web Services Control Tower control:

The controlIdentifier is an ARN that is specified for each control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation.

About identifiers for Amazon Web Services Control Tower

The Amazon Web Services Control Tower controlIdentifier is unique in each Amazon Web Services Region for each control. You can find the controlIdentifier for each Region and control in the Tables of control metadata or the Control availability by Region tables in the Amazon Web Services Control Tower Controls Reference Guide.

A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and Elective controls is given in Resource identifiers for APIs and controls in the Amazon Web Services Control Tower Controls Reference Guide . Remember that Mandatory controls cannot be added or removed.

Some controls have two identifiers

  • ARN format for Amazon Web Services Control Tower: arn:aws:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID}

    Example:

    arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED

  • ARN format for Amazon Web Services Control Catalog: arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID}

You can find the {CONTROL_CATALOG_OPAQUE_ID} in the Amazon Web Services Control Tower Controls Reference Guide , or in the Amazon Web Services Control Tower console, on the Control details page.

The Amazon Web Services Control Tower APIs for enabled controls, such as GetEnabledControl and ListEnabledControls always return an ARN of the same type given when the control was enabled.

To get the targetIdentifier:

The targetIdentifier is the ARN for an OU.

In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU.

OU ARN format:

arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}

About landing zone APIs

You can configure and launch an Amazon Web Services Control Tower landing zone with APIs. For an introduction and steps, see Getting started with Amazon Web Services Control Tower using APIs.

For an overview of landing zone API operations, see Amazon Web Services Control Tower supports landing zone APIs. The individual API operations for landing zones are detailed in this document, the API reference manual, in the \"Actions\" section.

About baseline APIs

You can apply the AWSControlTowerBaseline baseline to an organizational unit (OU) as a way to register the OU with Amazon Web Services Control Tower, programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration with baselines.

You can call the baseline API operations to view the baselines that Amazon Web Services Control Tower enables for your landing zone, on your behalf, when setting up the landing zone. These baselines are read-only baselines.

The individual API operations for baselines are detailed in this document, the API reference manual, in the \"Actions\" section. For usage examples, see Baseline API input and output examples with CLI.

About Amazon Web Services Control Catalog identifiers

  • The EnableControl and DisableControl API operations can be called by specifying either the Amazon Web Services Control Tower identifer or the Amazon Web Services Control Catalog identifier. The API response returns the same type of identifier that you specified when calling the API.

  • If you use an Amazon Web Services Control Tower identifier to call the EnableControl API, and then call EnableControl again with an Amazon Web Services Control Catalog identifier, Amazon Web Services Control Tower returns an error message stating that the control is already enabled. Similar behavior applies to the DisableControl API operation.

  • Mandatory controls and the landing-zone-level Region deny control have Amazon Web Services Control Tower identifiers only.

Details and examples

To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower

Recording API Requests

Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made the request and when, and so on. For more about Amazon Web Services Control Tower and its support for CloudTrail, see Logging Amazon Web Services Control Tower Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User Guide.

" } diff --git a/botocore/data/cost-optimization-hub/2022-07-26/paginators-1.sdk-extras.json b/botocore/data/cost-optimization-hub/2022-07-26/paginators-1.sdk-extras.json index b28fd5a4c0..f9c62d536c 100644 --- a/botocore/data/cost-optimization-hub/2022-07-26/paginators-1.sdk-extras.json +++ b/botocore/data/cost-optimization-hub/2022-07-26/paginators-1.sdk-extras.json @@ -11,7 +11,8 @@ "non_aggregate_keys": [ "groupBy", "currencyCode", - "estimatedTotalDedupedSavings" + "estimatedTotalDedupedSavings", + "metrics" ] } } diff --git a/botocore/data/cost-optimization-hub/2022-07-26/service-2.json b/botocore/data/cost-optimization-hub/2022-07-26/service-2.json index 664c769f59..b66cf10f53 100644 --- a/botocore/data/cost-optimization-hub/2022-07-26/service-2.json +++ b/botocore/data/cost-optimization-hub/2022-07-26/service-2.json @@ -2,9 +2,11 @@ "version":"2.0", "metadata":{ "apiVersion":"2022-07-26", + "auth":["aws.auth#sigv4"], "endpointPrefix":"cost-optimization-hub", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceFullName":"Cost Optimization Hub", "serviceId":"Cost Optimization Hub", "signatureVersion":"v4", @@ -108,7 +110,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates the enrollment (opt in and opt out) status of an account to the Cost Optimization Hub service.

If the account is a management account of an organization, this action can also be used to enroll member accounts of the organization.

You must have the appropriate permissions to opt in to Cost Optimization Hub and to view its recommendations. When you opt in, Cost Optimization Hub automatically creates a service-linked role in your account to access its data.

" + "documentation":"

Updates the enrollment (opt in and opt out) status of an account to the Cost Optimization Hub service.

If the account is a management account or delegated administrator of an organization, this action can also be used to enroll member accounts of the organization.

You must have the appropriate permissions to opt in to Cost Optimization Hub and to view its recommendations. When you opt in, Cost Optimization Hub automatically creates a service-linked role in your account to access its data.

" }, "UpdatePreferences":{ "name":"UpdatePreferences", @@ -144,17 +146,17 @@ "shape":"AccountId", "documentation":"

The Amazon Web Services account ID.

" }, - "createdTimestamp":{ - "shape":"Timestamp", - "documentation":"

The time when the account enrollment status was created.

" + "status":{ + "shape":"EnrollmentStatus", + "documentation":"

The account enrollment status.

" }, "lastUpdatedTimestamp":{ "shape":"Timestamp", "documentation":"

The time when the account enrollment status was last updated.

" }, - "status":{ - "shape":"EnrollmentStatus", - "documentation":"

The account enrollment status.

" + "createdTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time when the account enrollment status was created.

" } }, "documentation":"

Describes the enrollment status of an organization's member accounts in Cost Optimization Hub.

" @@ -165,7 +167,7 @@ }, "AccountId":{ "type":"string", - "pattern":"^[0-9]{12}$" + "pattern":"[0-9]{12}" }, "AccountIdList":{ "type":"list", @@ -211,21 +213,21 @@ "ComputeConfiguration":{ "type":"structure", "members":{ - "architecture":{ - "shape":"String", - "documentation":"

The architecture of the resource.

" + "vCpu":{ + "shape":"Double", + "documentation":"

The number of vCPU cores in the resource.

" }, "memorySizeInMB":{ "shape":"Integer", "documentation":"

The memory size of the resource.

" }, + "architecture":{ + "shape":"String", + "documentation":"

The architecture of the resource.

" + }, "platform":{ "shape":"String", "documentation":"

The platform of the resource. The platform is the specific combination of operating system, license model, and software on an instance.

" - }, - "vCpu":{ - "shape":"Double", - "documentation":"

The number of vCPU cores in the resource.

" } }, "documentation":"

Describes the performance configuration for compute services such as Amazon EC2, Lambda, and ECS.

" @@ -251,22 +253,32 @@ "shape":"String", "documentation":"

The account scope that you want your recommendations for. Amazon Web Services calculates recommendations including the management account and member accounts if the value is set to PAYER. If the value is LINKED, recommendations are calculated for individual member accounts only.

" }, - "hourlyCommitment":{ + "term":{ "shape":"String", - "documentation":"

The hourly commitment for the Savings Plans type.

" + "documentation":"

The Savings Plans recommendation term in years.

" }, "paymentOption":{ "shape":"String", "documentation":"

The payment option for the commitment.

" }, - "term":{ + "hourlyCommitment":{ "shape":"String", - "documentation":"

The Savings Plans recommendation term in years.

" + "documentation":"

The hourly commitment for the Savings Plans type.

" } }, "documentation":"

The Compute Savings Plans configuration used for recommendations.

" }, "Datetime":{"type":"timestamp"}, + "DbInstanceConfiguration":{ + "type":"structure", + "members":{ + "dbInstanceClass":{ + "shape":"String", + "documentation":"

The DB instance class of the DB instance.

" + } + }, + "documentation":"

The DB instance configuration used for recommendations.

" + }, "Double":{ "type":"double", "box":true @@ -288,17 +300,17 @@ "EbsVolumeConfiguration":{ "type":"structure", "members":{ - "attachmentState":{ - "shape":"String", - "documentation":"

The Amazon Elastic Block Store attachment state.

" + "storage":{ + "shape":"StorageConfiguration", + "documentation":"

The disk storage of the Amazon Elastic Block Store volume.

" }, "performance":{ "shape":"BlockStoragePerformanceConfiguration", "documentation":"

The Amazon Elastic Block Store performance configuration.

" }, - "storage":{ - "shape":"StorageConfiguration", - "documentation":"

The disk storage of the Amazon Elastic Block Store volume.

" + "attachmentState":{ + "shape":"String", + "documentation":"

The Amazon Elastic Block Store attachment state.

" } }, "documentation":"

The Amazon Elastic Block Store volume configuration used for recommendations.

" @@ -372,6 +384,14 @@ "shape":"String", "documentation":"

The account scope that you want your recommendations for.

" }, + "term":{ + "shape":"String", + "documentation":"

The Savings Plans recommendation term in years.

" + }, + "paymentOption":{ + "shape":"String", + "documentation":"

The payment option for the commitment.

" + }, "hourlyCommitment":{ "shape":"String", "documentation":"

The hourly commitment for the Savings Plans type.

" @@ -380,17 +400,9 @@ "shape":"String", "documentation":"

The instance family of the recommended Savings Plan.

" }, - "paymentOption":{ - "shape":"String", - "documentation":"

The payment option for the commitment.

" - }, "savingsPlansRegion":{ "shape":"String", "documentation":"

The Amazon Web Services Region of the commitment.

" - }, - "term":{ - "shape":"String", - "documentation":"

The Savings Plans recommendation term in years.

" } }, "documentation":"

The EC2 instance Savings Plans configuration used for recommendations.

" @@ -416,25 +428,21 @@ "shape":"String", "documentation":"

The account scope that you want your recommendations for.

" }, - "currentGeneration":{ - "shape":"String", - "documentation":"

Determines whether the recommendation is for a current generation instance.

" - }, - "instanceFamily":{ + "service":{ "shape":"String", - "documentation":"

The instance family of the recommended reservation.

" + "documentation":"

The service that you want your recommendations for.

" }, - "instanceType":{ + "normalizedUnitsToPurchase":{ "shape":"String", - "documentation":"

The type of instance that Amazon Web Services recommends.

" + "documentation":"

The number of normalized units that Amazon Web Services recommends that you purchase.

" }, - "monthlyRecurringCost":{ + "term":{ "shape":"String", - "documentation":"

How much purchasing reserved instances costs you on a monthly basis.

" + "documentation":"

The reserved instances recommendation term in years.

" }, - "normalizedUnitsToPurchase":{ + "paymentOption":{ "shape":"String", - "documentation":"

The number of normalized units that Amazon Web Services recommends that you purchase.

" + "documentation":"

The payment option for the commitment.

" }, "numberOfInstancesToPurchase":{ "shape":"String", @@ -444,37 +452,41 @@ "shape":"String", "documentation":"

Indicates whether the recommendation is for standard or convertible reservations.

" }, - "paymentOption":{ + "instanceFamily":{ "shape":"String", - "documentation":"

The payment option for the commitment.

" + "documentation":"

The instance family of the recommended reservation.

" }, - "platform":{ + "instanceType":{ "shape":"String", - "documentation":"

The platform of the recommended reservation. The platform is the specific combination of operating system, license model, and software on an instance.

" + "documentation":"

The type of instance that Amazon Web Services recommends.

" }, "reservedInstancesRegion":{ "shape":"String", "documentation":"

The Amazon Web Services Region of the commitment.

" }, - "service":{ + "currentGeneration":{ "shape":"String", - "documentation":"

The service that you want your recommendations for.

" + "documentation":"

Determines whether the recommendation is for a current generation instance.

" }, - "sizeFlexEligible":{ - "shape":"Boolean", - "documentation":"

Determines whether the recommendation is size flexible.

" + "platform":{ + "shape":"String", + "documentation":"

The platform of the recommended reservation. The platform is the specific combination of operating system, license model, and software on an instance.

" }, "tenancy":{ "shape":"String", "documentation":"

Determines whether the recommended reservation is dedicated or shared.

" }, - "term":{ - "shape":"String", - "documentation":"

The reserved instances recommendation term in years.

" + "sizeFlexEligible":{ + "shape":"Boolean", + "documentation":"

Determines whether the recommendation is size flexible.

" }, "upfrontCost":{ "shape":"String", "documentation":"

How much purchasing this instance costs you upfront.

" + }, + "monthlyRecurringCost":{ + "shape":"String", + "documentation":"

How much purchasing reserved instances costs you on a monthly basis.

" } }, "documentation":"

The EC2 reserved instances configuration used for recommendations.

" @@ -524,53 +536,53 @@ "shape":"String", "documentation":"

The account scope that you want your recommendations for.

" }, - "currentGeneration":{ - "shape":"String", - "documentation":"

Determines whether the recommendation is for a current generation instance.

" - }, - "instanceFamily":{ + "service":{ "shape":"String", - "documentation":"

The instance family of the recommended reservation.

" + "documentation":"

The service that you want your recommendations for.

" }, - "instanceType":{ + "normalizedUnitsToPurchase":{ "shape":"String", - "documentation":"

The type of instance that Amazon Web Services recommends.

" + "documentation":"

The number of normalized units that Amazon Web Services recommends that you purchase.

" }, - "monthlyRecurringCost":{ + "term":{ "shape":"String", - "documentation":"

How much purchasing reserved instances costs you on a monthly basis.

" + "documentation":"

The reserved instances recommendation term in years.

" }, - "normalizedUnitsToPurchase":{ + "paymentOption":{ "shape":"String", - "documentation":"

The number of normalized units that Amazon Web Services recommends that you purchase.

" + "documentation":"

The payment option for the commitment.

" }, "numberOfInstancesToPurchase":{ "shape":"String", "documentation":"

The number of instances that Amazon Web Services recommends that you purchase.

" }, - "paymentOption":{ + "instanceFamily":{ "shape":"String", - "documentation":"

The payment option for the commitment.

" + "documentation":"

The instance family of the recommended reservation.

" + }, + "instanceType":{ + "shape":"String", + "documentation":"

The type of instance that Amazon Web Services recommends.

" }, "reservedInstancesRegion":{ "shape":"String", "documentation":"

The Amazon Web Services Region of the commitment.

" }, - "service":{ + "currentGeneration":{ "shape":"String", - "documentation":"

The service that you want your recommendations for.

" + "documentation":"

Determines whether the recommendation is for a current generation instance.

" }, "sizeFlexEligible":{ "shape":"Boolean", "documentation":"

Determines whether the recommendation is size flexible.

" }, - "term":{ - "shape":"String", - "documentation":"

The reserved instances recommendation term in years.

" - }, "upfrontCost":{ "shape":"String", "documentation":"

How much purchasing this instance costs you upfront.

" + }, + "monthlyRecurringCost":{ + "shape":"String", + "documentation":"

How much purchasing reserved instances costs you on a monthly basis.

" } }, "documentation":"

The ElastiCache reserved instances configuration used for recommendations.

" @@ -585,17 +597,17 @@ "EstimatedDiscounts":{ "type":"structure", "members":{ - "otherDiscount":{ + "savingsPlansDiscount":{ "shape":"Double", - "documentation":"

Estimated other discounts include all discounts that are not itemized. Itemized discounts include reservedInstanceDiscount and savingsPlansDiscount.

" + "documentation":"

Estimated Savings Plans discounts.

" }, "reservedInstancesDiscount":{ "shape":"Double", "documentation":"

Estimated reserved instance discounts.

" }, - "savingsPlansDiscount":{ + "otherDiscount":{ "shape":"Double", - "documentation":"

Estimated Savings Plans discounts.

" + "documentation":"

Estimated other discounts include all discounts that are not itemized. Itemized discounts include reservedInstanceDiscount and savingsPlansDiscount.

" } }, "documentation":"

Estimated discount details of the current and recommended resource configuration for a recommendation.

" @@ -603,49 +615,49 @@ "Filter":{ "type":"structure", "members":{ - "accountIds":{ - "shape":"AccountIdList", - "documentation":"

The account that the recommendation is for.

" + "restartNeeded":{ + "shape":"Boolean", + "documentation":"

Whether or not implementing the recommendation requires a restart.

" }, - "actionTypes":{ - "shape":"ActionTypeList", - "documentation":"

The type of action you can take by adopting the recommendation.

" + "rollbackPossible":{ + "shape":"Boolean", + "documentation":"

Whether or not implementing the recommendation can be rolled back.

" }, "implementationEfforts":{ "shape":"ImplementationEffortList", "documentation":"

The effort required to implement the recommendation.

" }, - "recommendationIds":{ - "shape":"RecommendationIdList", - "documentation":"

The IDs for the recommendations.

" + "accountIds":{ + "shape":"AccountIdList", + "documentation":"

The account that the recommendation is for.

" }, "regions":{ "shape":"RegionList", "documentation":"

The Amazon Web Services Region of the resource.

" }, - "resourceArns":{ - "shape":"ResourceArnList", - "documentation":"

The Amazon Resource Name (ARN) of the recommendation.

" - }, - "resourceIds":{ - "shape":"ResourceIdList", - "documentation":"

The resource ID of the recommendation.

" - }, "resourceTypes":{ "shape":"ResourceTypeList", "documentation":"

The resource type of the recommendation.

" }, - "restartNeeded":{ - "shape":"Boolean", - "documentation":"

Whether or not implementing the recommendation requires a restart.

" - }, - "rollbackPossible":{ - "shape":"Boolean", - "documentation":"

Whether or not implementing the recommendation can be rolled back.

" + "actionTypes":{ + "shape":"ActionTypeList", + "documentation":"

The type of action you can take by adopting the recommendation.

" }, "tags":{ "shape":"TagList", "documentation":"

A list of tags assigned to the recommendation.

" + }, + "resourceIds":{ + "shape":"ResourceIdList", + "documentation":"

The resource ID of the recommendation.

" + }, + "resourceArns":{ + "shape":"ResourceArnList", + "documentation":"

The Amazon Resource Name (ARN) of the recommendation.

" + }, + "recommendationIds":{ + "shape":"RecommendationIdList", + "documentation":"

The IDs for the recommendations.

" } }, "documentation":"

Describes a filter that returns a more specific list of recommendations. Filters recommendations by different dimensions.

" @@ -658,13 +670,13 @@ "GetPreferencesResponse":{ "type":"structure", "members":{ - "memberAccountDiscountVisibility":{ - "shape":"MemberAccountDiscountVisibility", - "documentation":"

Retrieves the status of the \"member account discount visibility\" preference.

" - }, "savingsEstimationMode":{ "shape":"SavingsEstimationMode", "documentation":"

Retrieves the status of the \"savings estimation mode\" preference.

" + }, + "memberAccountDiscountVisibility":{ + "shape":"MemberAccountDiscountVisibility", + "documentation":"

Retrieves the status of the \"member account discount visibility\" preference.

" } } }, @@ -681,93 +693,93 @@ "GetRecommendationResponse":{ "type":"structure", "members":{ + "recommendationId":{ + "shape":"String", + "documentation":"

The ID for the recommendation.

" + }, + "resourceId":{ + "shape":"String", + "documentation":"

The unique identifier for the resource. This is the same as the Amazon Resource Name (ARN), if available.

" + }, + "resourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

" + }, "accountId":{ "shape":"String", "documentation":"

The account that the recommendation is for.

" }, - "actionType":{ - "shape":"ActionType", - "documentation":"

The type of action you can take by adopting the recommendation.

" + "currencyCode":{ + "shape":"String", + "documentation":"

The currency code used for the recommendation.

" + }, + "recommendationLookbackPeriodInDays":{ + "shape":"Integer", + "documentation":"

The lookback period that's used to generate the recommendation.

" }, "costCalculationLookbackPeriodInDays":{ "shape":"Integer", "documentation":"

The lookback period used to calculate cost impact for a recommendation.

" }, - "currencyCode":{ - "shape":"String", - "documentation":"

The currency code used for the recommendation.

" + "estimatedSavingsPercentage":{ + "shape":"Double", + "documentation":"

The estimated savings percentage relative to the total cost over the cost calculation lookback period.

" }, - "currentResourceDetails":{ - "shape":"ResourceDetails", - "documentation":"

The details for the resource.

" + "estimatedSavingsOverCostCalculationLookbackPeriod":{ + "shape":"Double", + "documentation":"

The estimated savings amount over the lookback period used to calculate cost impact for a recommendation.

" }, "currentResourceType":{ "shape":"ResourceType", "documentation":"

The type of resource.

" }, - "estimatedMonthlyCost":{ - "shape":"Double", - "documentation":"

The estimated monthly cost of the recommendation.

" + "recommendedResourceType":{ + "shape":"ResourceType", + "documentation":"

The resource type of the recommendation.

" + }, + "region":{ + "shape":"String", + "documentation":"

The Amazon Web Services Region of the resource.

" + }, + "source":{ + "shape":"Source", + "documentation":"

The source of the recommendation.

" + }, + "lastRefreshTimestamp":{ + "shape":"Datetime", + "documentation":"

The time when the recommendation was last generated.

" }, "estimatedMonthlySavings":{ "shape":"Double", "documentation":"

The estimated monthly savings amount for the recommendation.

" }, - "estimatedSavingsOverCostCalculationLookbackPeriod":{ - "shape":"Double", - "documentation":"

The estimated savings amount over the lookback period used to calculate cost impact for a recommendation.

" - }, - "estimatedSavingsPercentage":{ + "estimatedMonthlyCost":{ "shape":"Double", - "documentation":"

The estimated savings percentage relative to the total cost over the cost calculation lookback period.

" + "documentation":"

The estimated monthly cost of the current resource. For Reserved Instances and Savings Plans, it refers to the cost for eligible usage.

" }, "implementationEffort":{ "shape":"ImplementationEffort", "documentation":"

The effort required to implement the recommendation.

" }, - "lastRefreshTimestamp":{ - "shape":"Datetime", - "documentation":"

The time when the recommendation was last generated.

" - }, - "recommendationId":{ - "shape":"String", - "documentation":"

The ID for the recommendation.

" - }, - "recommendationLookbackPeriodInDays":{ - "shape":"Integer", - "documentation":"

The lookback period that's used to generate the recommendation.

" - }, - "recommendedResourceDetails":{ - "shape":"ResourceDetails", - "documentation":"

The details about the recommended resource.

" - }, - "recommendedResourceType":{ - "shape":"ResourceType", - "documentation":"

The resource type of the recommendation.

" - }, - "region":{ - "shape":"String", - "documentation":"

The Amazon Web Services Region of the resource.

" - }, - "resourceArn":{ - "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the resource.

" - }, - "resourceId":{ - "shape":"String", - "documentation":"

The unique identifier for the resource. This is the same as the Amazon Resource Name (ARN), if available.

" - }, "restartNeeded":{ "shape":"Boolean", "documentation":"

Whether or not implementing the recommendation requires a restart.

" }, + "actionType":{ + "shape":"ActionType", + "documentation":"

The type of action you can take by adopting the recommendation.

" + }, "rollbackPossible":{ "shape":"Boolean", "documentation":"

Whether or not implementing the recommendation can be rolled back.

" }, - "source":{ - "shape":"Source", - "documentation":"

The source of the recommendation.

" + "currentResourceDetails":{ + "shape":"ResourceDetails", + "documentation":"

The details for the resource.

" + }, + "recommendedResourceDetails":{ + "shape":"ResourceDetails", + "documentation":"

The details about the recommended resource.

" }, "tags":{ "shape":"TagList", @@ -842,35 +854,35 @@ "ListEnrollmentStatusesRequest":{ "type":"structure", "members":{ - "accountId":{ - "shape":"AccountId", - "documentation":"

The account ID of a member account in the organization.

" - }, "includeOrganizationInfo":{ "shape":"PrimitiveBoolean", "documentation":"

Indicates whether to return the enrollment status for the organization.

" }, - "maxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of objects that are returned for the request.

" + "accountId":{ + "shape":"AccountId", + "documentation":"

The account ID of a member account in the organization.

" }, "nextToken":{ "shape":"String", "documentation":"

The token to retrieve the next set of results.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of objects that are returned for the request.

" } } }, "ListEnrollmentStatusesResponse":{ "type":"structure", "members":{ - "includeMemberAccounts":{ - "shape":"Boolean", - "documentation":"

The enrollment status of all member accounts in the organization if the account is the management account.

" - }, "items":{ "shape":"AccountEnrollmentStatuses", "documentation":"

The enrollment status of a specific account ID, including creation and last updated timestamps.

" }, + "includeMemberAccounts":{ + "shape":"Boolean", + "documentation":"

The enrollment status of all member accounts in the organization if the account is the management account or delegated administrator.

" + }, "nextToken":{ "shape":"String", "documentation":"

The token to retrieve the next set of results.

" @@ -888,7 +900,11 @@ }, "maxResults":{ "shape":"ListRecommendationSummariesRequestMaxResultsInteger", - "documentation":"

The maximum number of recommendations that are returned for the request.

" + "documentation":"

The maximum number of recommendations to be returned for the request.

" + }, + "metrics":{ + "shape":"SummaryMetricsList", + "documentation":"

Additional metrics to be returned for the request. The only valid value is savingsPercentage.

" }, "nextToken":{ "shape":"String", @@ -905,21 +921,25 @@ "ListRecommendationSummariesResponse":{ "type":"structure", "members":{ - "currencyCode":{ - "shape":"String", - "documentation":"

The currency code used for the recommendation.

" - }, "estimatedTotalDedupedSavings":{ "shape":"Double", "documentation":"

The total overall savings for the aggregated view.

" }, + "items":{ + "shape":"RecommendationSummariesList", + "documentation":"

A list of all savings recommendations.

" + }, "groupBy":{ "shape":"String", "documentation":"

The dimension used to group the recommendations by.

" }, - "items":{ - "shape":"RecommendationSummariesList", - "documentation":"

List of all savings recommendations.

" + "currencyCode":{ + "shape":"String", + "documentation":"

The currency code used for the recommendation.

" + }, + "metrics":{ + "shape":"SummaryMetricsResult", + "documentation":"

The results or descriptions for the additional metrics, based on whether the metrics were or were not requested.

" }, "nextToken":{ "shape":"String", @@ -934,6 +954,10 @@ "shape":"Filter", "documentation":"

The constraints that you want all returned recommendations to match.

" }, + "orderBy":{ + "shape":"OrderBy", + "documentation":"

The ordering of recommendations by a dimension.

" + }, "includeAllRecommendations":{ "shape":"PrimitiveBoolean", "documentation":"

List of all recommendations for a resource, or a single recommendation if de-duped by resourceId.

" @@ -945,10 +969,6 @@ "nextToken":{ "shape":"String", "documentation":"

The token to retrieve the next set of results.

" - }, - "orderBy":{ - "shape":"OrderBy", - "documentation":"

The ordering of recommendations by a dimension.

" } } }, @@ -1005,49 +1025,49 @@ "shape":"String", "documentation":"

The account scope that you want your recommendations for.

" }, - "currentGeneration":{ + "service":{ "shape":"String", - "documentation":"

Determines whether the recommendation is for a current generation instance.

" + "documentation":"

The service that you want your recommendations for.

" }, - "instanceType":{ + "normalizedUnitsToPurchase":{ "shape":"String", - "documentation":"

The type of instance that Amazon Web Services recommends.

" + "documentation":"

The number of normalized units that Amazon Web Services recommends that you purchase.

" }, - "monthlyRecurringCost":{ + "term":{ "shape":"String", - "documentation":"

How much purchasing reserved instances costs you on a monthly basis.

" + "documentation":"

The reserved instances recommendation term in years.

" }, - "normalizedUnitsToPurchase":{ + "paymentOption":{ "shape":"String", - "documentation":"

The number of normalized units that Amazon Web Services recommends that you purchase.

" + "documentation":"

The payment option for the commitment.

" }, "numberOfInstancesToPurchase":{ "shape":"String", "documentation":"

The number of instances that Amazon Web Services recommends that you purchase.

" }, - "paymentOption":{ + "instanceType":{ "shape":"String", - "documentation":"

The payment option for the commitment.

" + "documentation":"

The type of instance that Amazon Web Services recommends.

" }, "reservedInstancesRegion":{ "shape":"String", "documentation":"

The Amazon Web Services Region of the commitment.

" }, - "service":{ + "currentGeneration":{ "shape":"String", - "documentation":"

The service that you want your recommendations for.

" + "documentation":"

Determines whether the recommendation is for a current generation instance.

" }, "sizeFlexEligible":{ "shape":"Boolean", "documentation":"

Determines whether the recommendation is size flexible.

" }, - "term":{ - "shape":"String", - "documentation":"

The reserved instances recommendation term in years.

" - }, "upfrontCost":{ "shape":"String", "documentation":"

How much purchasing this instance costs you upfront.

" + }, + "monthlyRecurringCost":{ + "shape":"String", + "documentation":"

How much purchasing reserved instances costs you on a monthly basis.

" } }, "documentation":"

The OpenSearch reserved instances configuration used for recommendations.

" @@ -1074,6 +1094,60 @@ "documentation":"

Defines how rows will be sorted in the response.

" }, "PrimitiveBoolean":{"type":"boolean"}, + "RdsDbInstance":{ + "type":"structure", + "members":{ + "configuration":{ + "shape":"RdsDbInstanceConfiguration", + "documentation":"

The Amazon RDS DB instance configuration used for recommendations.

" + }, + "costCalculation":{"shape":"ResourceCostCalculation"} + }, + "documentation":"

Contains the details of an Amazon RDS DB instance.

" + }, + "RdsDbInstanceConfiguration":{ + "type":"structure", + "members":{ + "instance":{ + "shape":"DbInstanceConfiguration", + "documentation":"

Details about the instance configuration.

" + } + }, + "documentation":"

The Amazon RDS DB instance configuration used for recommendations.

" + }, + "RdsDbInstanceStorage":{ + "type":"structure", + "members":{ + "configuration":{ + "shape":"RdsDbInstanceStorageConfiguration", + "documentation":"

The Amazon RDS DB instance storage configuration used for recommendations.

" + }, + "costCalculation":{"shape":"ResourceCostCalculation"} + }, + "documentation":"

Contains the details of an Amazon RDS DB instance storage.

" + }, + "RdsDbInstanceStorageConfiguration":{ + "type":"structure", + "members":{ + "storageType":{ + "shape":"String", + "documentation":"

The storage type to associate with the DB instance.

" + }, + "allocatedStorageInGb":{ + "shape":"Double", + "documentation":"

The new amount of storage in GB to allocate for the DB instance.

" + }, + "iops":{ + "shape":"Double", + "documentation":"

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

" + }, + "storageThroughput":{ + "shape":"Double", + "documentation":"

The storage throughput for the DB instance.

" + } + }, + "documentation":"

The Amazon RDS DB instance storage configuration used for recommendations.

" + }, "RdsReservedInstances":{ "type":"structure", "members":{ @@ -1095,21 +1169,25 @@ "shape":"String", "documentation":"

The account scope that you want your recommendations for.

" }, - "currentGeneration":{ + "service":{ "shape":"String", - "documentation":"

Determines whether the recommendation is for a current generation instance.

" + "documentation":"

The service that you want your recommendations for.

" }, - "databaseEdition":{ + "normalizedUnitsToPurchase":{ "shape":"String", - "documentation":"

The database edition that the recommended reservation supports.

" + "documentation":"

The number of normalized units that Amazon Web Services recommends that you purchase.

" }, - "databaseEngine":{ + "term":{ "shape":"String", - "documentation":"

The database engine that the recommended reservation supports.

" + "documentation":"

The reserved instances recommendation term in years.

" }, - "deploymentOption":{ + "paymentOption":{ "shape":"String", - "documentation":"

Determines whether the recommendation is for a reservation in a single Availability Zone or a reservation with a backup in a second Availability Zone.

" + "documentation":"

The payment option for the commitment.

" + }, + "numberOfInstancesToPurchase":{ + "shape":"String", + "documentation":"

The number of instances that Amazon Web Services recommends that you purchase.

" }, "instanceFamily":{ "shape":"String", @@ -1119,45 +1197,41 @@ "shape":"String", "documentation":"

The type of instance that Amazon Web Services recommends.

" }, - "licenseModel":{ + "reservedInstancesRegion":{ "shape":"String", - "documentation":"

The license model that the recommended reservation supports.

" + "documentation":"

The Amazon Web Services Region of the commitment.

" }, - "monthlyRecurringCost":{ - "shape":"String", - "documentation":"

How much purchasing this instance costs you on a monthly basis.

" + "sizeFlexEligible":{ + "shape":"Boolean", + "documentation":"

Determines whether the recommendation is size flexible.

" }, - "normalizedUnitsToPurchase":{ + "currentGeneration":{ "shape":"String", - "documentation":"

The number of normalized units that Amazon Web Services recommends that you purchase.

" + "documentation":"

Determines whether the recommendation is for a current generation instance.

" }, - "numberOfInstancesToPurchase":{ + "upfrontCost":{ "shape":"String", - "documentation":"

The number of instances that Amazon Web Services recommends that you purchase.

" + "documentation":"

How much purchasing this instance costs you upfront.

" }, - "paymentOption":{ + "monthlyRecurringCost":{ "shape":"String", - "documentation":"

The payment option for the commitment.

" + "documentation":"

How much purchasing this instance costs you on a monthly basis.

" }, - "reservedInstancesRegion":{ + "licenseModel":{ "shape":"String", - "documentation":"

The Amazon Web Services Region of the commitment.

" + "documentation":"

The license model that the recommended reservation supports.

" }, - "service":{ + "databaseEdition":{ "shape":"String", - "documentation":"

The service that you want your recommendations for.

" - }, - "sizeFlexEligible":{ - "shape":"Boolean", - "documentation":"

Determines whether the recommendation is size flexible.

" + "documentation":"

The database edition that the recommended reservation supports.

" }, - "term":{ + "databaseEngine":{ "shape":"String", - "documentation":"

The reserved instances recommendation term in years.

" + "documentation":"

The database engine that the recommended reservation supports.

" }, - "upfrontCost":{ + "deploymentOption":{ "shape":"String", - "documentation":"

How much purchasing this instance costs you upfront.

" + "documentation":"

Determines whether the recommendation is for a reservation in a single Availability Zone or a reservation with a backup in a second Availability Zone.

" } }, "documentation":"

The RDS reserved instances configuration used for recommendations.

" @@ -1165,82 +1239,82 @@ "Recommendation":{ "type":"structure", "members":{ + "recommendationId":{ + "shape":"String", + "documentation":"

The ID for the recommendation.

" + }, "accountId":{ "shape":"String", "documentation":"

The account that the recommendation is for.

" }, - "actionType":{ + "region":{ "shape":"String", - "documentation":"

The type of tasks that can be carried out by this action.

" + "documentation":"

The Amazon Web Services Region of the resource.

" }, - "currencyCode":{ + "resourceId":{ "shape":"String", - "documentation":"

The currency code used for the recommendation.

" + "documentation":"

The resource ID for the recommendation.

" }, - "currentResourceSummary":{ + "resourceArn":{ "shape":"String", - "documentation":"

Describes the current resource.

" + "documentation":"

The Amazon Resource Name (ARN) for the recommendation.

" }, "currentResourceType":{ "shape":"String", - "documentation":"

The current resource type.

" - }, - "estimatedMonthlyCost":{ - "shape":"Double", - "documentation":"

The estimated monthly cost for the recommendation.

" - }, - "estimatedMonthlySavings":{ - "shape":"Double", - "documentation":"

The estimated monthly savings amount for the recommendation.

" - }, - "estimatedSavingsPercentage":{ - "shape":"Double", - "documentation":"

The estimated savings percentage relative to the total cost over the cost calculation lookback period.

" - }, - "implementationEffort":{ - "shape":"String", - "documentation":"

The effort required to implement the recommendation.

" - }, - "lastRefreshTimestamp":{ - "shape":"Datetime", - "documentation":"

The time when the recommendation was last generated.

" - }, - "recommendationId":{ - "shape":"String", - "documentation":"

The ID for the recommendation.

" - }, - "recommendationLookbackPeriodInDays":{ - "shape":"Integer", - "documentation":"

The lookback period that's used to generate the recommendation.

" - }, - "recommendedResourceSummary":{ - "shape":"String", - "documentation":"

Describes the recommended resource.

" + "documentation":"

The current resource type.

" }, "recommendedResourceType":{ "shape":"String", "documentation":"

The recommended resource type.

" }, - "region":{ - "shape":"String", - "documentation":"

The Amazon Web Services Region of the resource.

" + "estimatedMonthlySavings":{ + "shape":"Double", + "documentation":"

The estimated monthly savings amount for the recommendation.

" }, - "resourceArn":{ + "estimatedSavingsPercentage":{ + "shape":"Double", + "documentation":"

The estimated savings percentage relative to the total cost over the cost calculation lookback period.

" + }, + "estimatedMonthlyCost":{ + "shape":"Double", + "documentation":"

The estimated monthly cost of the current resource. For Reserved Instances and Savings Plans, it refers to the cost for eligible usage.

" + }, + "currencyCode":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the recommendation.

" + "documentation":"

The currency code used for the recommendation.

" }, - "resourceId":{ + "implementationEffort":{ "shape":"String", - "documentation":"

The resource ID for the recommendation.

" + "documentation":"

The effort required to implement the recommendation.

" }, "restartNeeded":{ "shape":"Boolean", "documentation":"

Whether or not implementing the recommendation requires a restart.

" }, + "actionType":{ + "shape":"String", + "documentation":"

The type of tasks that can be carried out by this action.

" + }, "rollbackPossible":{ "shape":"Boolean", "documentation":"

Whether or not implementing the recommendation can be rolled back.

" }, + "currentResourceSummary":{ + "shape":"String", + "documentation":"

Describes the current resource.

" + }, + "recommendedResourceSummary":{ + "shape":"String", + "documentation":"

Describes the recommended resource.

" + }, + "lastRefreshTimestamp":{ + "shape":"Datetime", + "documentation":"

The time when the recommendation was last generated.

" + }, + "recommendationLookbackPeriodInDays":{ + "shape":"Integer", + "documentation":"

The lookback period that's used to generate the recommendation.

" + }, "source":{ "shape":"Source", "documentation":"

The source of the recommendation.

" @@ -1269,14 +1343,14 @@ "RecommendationSummary":{ "type":"structure", "members":{ - "estimatedMonthlySavings":{ - "shape":"Double", - "documentation":"

The estimated total savings resulting from modifications, on a monthly basis.

" - }, "group":{ "shape":"String", "documentation":"

The grouping of recommendations.

" }, + "estimatedMonthlySavings":{ + "shape":"Double", + "documentation":"

The estimated total savings resulting from modifications, on a monthly basis.

" + }, "recommendationCount":{ "shape":"Integer", "documentation":"

The total number of instance recommendations.

" @@ -1305,53 +1379,53 @@ "shape":"String", "documentation":"

The account scope that you want your recommendations for.

" }, - "currentGeneration":{ - "shape":"String", - "documentation":"

Determines whether the recommendation is for a current generation instance.

" - }, - "instanceFamily":{ + "service":{ "shape":"String", - "documentation":"

The instance family of the recommended reservation.

" + "documentation":"

The service that you want your recommendations for.

" }, - "instanceType":{ + "normalizedUnitsToPurchase":{ "shape":"String", - "documentation":"

The type of instance that Amazon Web Services recommends.

" + "documentation":"

The number of normalized units that Amazon Web Services recommends that you purchase.

" }, - "monthlyRecurringCost":{ + "term":{ "shape":"String", - "documentation":"

How much purchasing reserved instances costs you on a monthly basis.

" + "documentation":"

The reserved instances recommendation term in years.

" }, - "normalizedUnitsToPurchase":{ + "paymentOption":{ "shape":"String", - "documentation":"

The number of normalized units that Amazon Web Services recommends that you purchase.

" + "documentation":"

The payment option for the commitment.

" }, "numberOfInstancesToPurchase":{ "shape":"String", "documentation":"

The number of instances that Amazon Web Services recommends that you purchase.

" }, - "paymentOption":{ + "instanceFamily":{ "shape":"String", - "documentation":"

The payment option for the commitment.

" + "documentation":"

The instance family of the recommended reservation.

" }, - "reservedInstancesRegion":{ + "instanceType":{ "shape":"String", - "documentation":"

The Amazon Web Services Region of the commitment.

" + "documentation":"

The type of instance that Amazon Web Services recommends.

" }, - "service":{ + "reservedInstancesRegion":{ "shape":"String", - "documentation":"

The service that you want your recommendations for.

" + "documentation":"

The Amazon Web Services Region of the commitment.

" }, "sizeFlexEligible":{ "shape":"Boolean", "documentation":"

Determines whether the recommendation is size flexible.

" }, - "term":{ + "currentGeneration":{ "shape":"String", - "documentation":"

The reserved instances recommendation term in years.

" + "documentation":"

Determines whether the recommendation is for a current generation instance.

" }, "upfrontCost":{ "shape":"String", "documentation":"

How much purchasing this instance costs you upfront.

" + }, + "monthlyRecurringCost":{ + "shape":"String", + "documentation":"

How much purchasing reserved instances costs you on a monthly basis.

" } }, "documentation":"

The Redshift reserved instances configuration used for recommendations.

" @@ -1375,10 +1449,6 @@ "ReservedInstancesPricing":{ "type":"structure", "members":{ - "estimatedMonthlyAmortizedReservationCost":{ - "shape":"Double", - "documentation":"

The estimated cost of your recurring monthly fees for the recommended reserved instance across the month.

" - }, "estimatedOnDemandCost":{ "shape":"Double", "documentation":"

The remaining On-Demand cost estimated to not be covered by the recommended reserved instance, over the length of the lookback period.

" @@ -1390,6 +1460,10 @@ "savingsPercentage":{ "shape":"Double", "documentation":"

The savings percentage relative to the total On-Demand costs that are associated with this instance.

" + }, + "estimatedMonthlyAmortizedReservationCost":{ + "shape":"Double", + "documentation":"

The estimated cost of your recurring monthly fees for the recommended reserved instance across the month.

" } }, "documentation":"

Pricing details for your recommended reserved instance.

" @@ -1403,13 +1477,13 @@ "ResourceCostCalculation":{ "type":"structure", "members":{ - "pricing":{ - "shape":"ResourcePricing", - "documentation":"

Pricing details of the resource recommendation.

" - }, "usages":{ "shape":"UsageList", "documentation":"

Usage details of the resource recommendation.

" + }, + "pricing":{ + "shape":"ResourcePricing", + "documentation":"

Pricing details of the resource recommendation.

" } }, "documentation":"

Cost impact of the resource recommendation.

" @@ -1417,9 +1491,17 @@ "ResourceDetails":{ "type":"structure", "members":{ - "computeSavingsPlans":{ - "shape":"ComputeSavingsPlans", - "documentation":"

The Compute Savings Plans recommendation details.

" + "lambdaFunction":{ + "shape":"LambdaFunction", + "documentation":"

The Lambda function recommendation details.

" + }, + "ecsService":{ + "shape":"EcsService", + "documentation":"

The ECS service recommendation details.

" + }, + "ec2Instance":{ + "shape":"Ec2Instance", + "documentation":"

The EC2 instance recommendation details.

" }, "ebsVolume":{ "shape":"EbsVolume", @@ -1429,45 +1511,45 @@ "shape":"Ec2AutoScalingGroup", "documentation":"

The EC2 Auto Scaling group recommendation details.

" }, - "ec2Instance":{ - "shape":"Ec2Instance", - "documentation":"

The EC2 instance recommendation details.

" - }, - "ec2InstanceSavingsPlans":{ - "shape":"Ec2InstanceSavingsPlans", - "documentation":"

The EC2 instance Savings Plans recommendation details.

" - }, "ec2ReservedInstances":{ "shape":"Ec2ReservedInstances", "documentation":"

The EC2 reserved instances recommendation details.

" }, - "ecsService":{ - "shape":"EcsService", - "documentation":"

The ECS service recommendation details.

" + "rdsReservedInstances":{ + "shape":"RdsReservedInstances", + "documentation":"

The RDS reserved instances recommendation details.

" }, "elastiCacheReservedInstances":{ "shape":"ElastiCacheReservedInstances", "documentation":"

The ElastiCache reserved instances recommendation details.

" }, - "lambdaFunction":{ - "shape":"LambdaFunction", - "documentation":"

The Lambda function recommendation details.

" - }, "openSearchReservedInstances":{ "shape":"OpenSearchReservedInstances", "documentation":"

The OpenSearch reserved instances recommendation details.

" }, - "rdsReservedInstances":{ - "shape":"RdsReservedInstances", - "documentation":"

The RDS reserved instances recommendation details.

" - }, "redshiftReservedInstances":{ "shape":"RedshiftReservedInstances", "documentation":"

The Redshift reserved instances recommendation details.

" }, + "ec2InstanceSavingsPlans":{ + "shape":"Ec2InstanceSavingsPlans", + "documentation":"

The EC2 instance Savings Plans recommendation details.

" + }, + "computeSavingsPlans":{ + "shape":"ComputeSavingsPlans", + "documentation":"

The Compute Savings Plans recommendation details.

" + }, "sageMakerSavingsPlans":{ "shape":"SageMakerSavingsPlans", "documentation":"

The SageMaker Savings Plans recommendation details.

" + }, + "rdsDbInstance":{ + "shape":"RdsDbInstance", + "documentation":"

The DB instance recommendation details.

" + }, + "rdsDbInstanceStorage":{ + "shape":"RdsDbInstanceStorage", + "documentation":"

The DB instance storage recommendation details.

" } }, "documentation":"

Contains detailed information about the specified resource.

", @@ -1498,21 +1580,21 @@ "ResourcePricing":{ "type":"structure", "members":{ - "estimatedCostAfterDiscounts":{ - "shape":"Double", - "documentation":"

The savings estimate incorporating all discounts with Amazon Web Services, such as Reserved Instances and Savings Plans.

" - }, "estimatedCostBeforeDiscounts":{ "shape":"Double", "documentation":"

The savings estimate using Amazon Web Services public pricing without incorporating any discounts.

" }, + "estimatedNetUnusedAmortizedCommitments":{ + "shape":"Double", + "documentation":"

The estimated net unused amortized commitment for the recommendation.

" + }, "estimatedDiscounts":{ "shape":"EstimatedDiscounts", "documentation":"

The estimated discounts for a recommendation.

" }, - "estimatedNetUnusedAmortizedCommitments":{ + "estimatedCostAfterDiscounts":{ "shape":"Double", - "documentation":"

The estimated net unused amortized commitment for the recommendation.

" + "documentation":"

The savings estimate incorporating all discounts with Amazon Web Services, such as Reserved Instances and Savings Plans.

" } }, "documentation":"

Contains pricing information about the specified resource.

" @@ -1532,7 +1614,9 @@ "RdsReservedInstances", "OpenSearchReservedInstances", "RedshiftReservedInstances", - "ElastiCacheReservedInstances" + "ElastiCacheReservedInstances", + "RdsDbInstanceStorage", + "RdsDbInstance" ] }, "ResourceTypeList":{ @@ -1562,17 +1646,17 @@ "shape":"String", "documentation":"

The account scope that you want your recommendations for.

" }, - "hourlyCommitment":{ + "term":{ "shape":"String", - "documentation":"

The hourly commitment for the Savings Plans type.

" + "documentation":"

The Savings Plans recommendation term in years.

" }, "paymentOption":{ "shape":"String", "documentation":"

The payment option for the commitment.

" }, - "term":{ + "hourlyCommitment":{ "shape":"String", - "documentation":"

The Savings Plans recommendation term in years.

" + "documentation":"

The hourly commitment for the Savings Plans type.

" } }, "documentation":"

The SageMaker Savings Plans configuration used for recommendations.

" @@ -1597,21 +1681,21 @@ "SavingsPlansPricing":{ "type":"structure", "members":{ - "estimatedMonthlyCommitment":{ - "shape":"Double", - "documentation":"

Estimated monthly commitment for the Savings Plan.

" - }, - "estimatedOnDemandCost":{ - "shape":"Double", - "documentation":"

Estimated On-Demand cost you will pay after buying the Savings Plan.

" - }, "monthlySavingsPlansEligibleCost":{ "shape":"Double", "documentation":"

The cost of paying for the recommended Savings Plan monthly.

" }, + "estimatedMonthlyCommitment":{ + "shape":"Double", + "documentation":"

Estimated monthly commitment for the Savings Plan.

" + }, "savingsPercentage":{ "shape":"Double", "documentation":"

Estimated savings as a percentage of your overall costs after buying the Savings Plan.

" + }, + "estimatedOnDemandCost":{ + "shape":"Double", + "documentation":"

Estimated On-Demand cost you will pay after buying the Savings Plan.

" } }, "documentation":"

Pricing information about a Savings Plan.

" @@ -1626,18 +1710,38 @@ "StorageConfiguration":{ "type":"structure", "members":{ - "sizeInGb":{ - "shape":"Double", - "documentation":"

The storage volume.

" - }, "type":{ "shape":"String", "documentation":"

The storage type.

" + }, + "sizeInGb":{ + "shape":"Double", + "documentation":"

The storage volume.

" } }, "documentation":"

The storage configuration used for recommendations.

" }, "String":{"type":"string"}, + "SummaryMetrics":{ + "type":"string", + "enum":["SavingsPercentage"] + }, + "SummaryMetricsList":{ + "type":"list", + "member":{"shape":"SummaryMetrics"}, + "max":100, + "min":1 + }, + "SummaryMetricsResult":{ + "type":"structure", + "members":{ + "savingsPercentage":{ + "shape":"String", + "documentation":"

The savings percentage based on your Amazon Web Services spend over the past 30 days.

Savings percentage is only supported when filtering by Region, account ID, or tags.

" + } + }, + "documentation":"

The results or descriptions for the additional metrics, based on whether the metrics were or were not requested.

" + }, "Tag":{ "type":"structure", "members":{ @@ -1671,13 +1775,13 @@ "type":"structure", "required":["status"], "members":{ - "includeMemberAccounts":{ - "shape":"Boolean", - "documentation":"

Indicates whether to enroll member accounts of the organization if the account is the management account.

" - }, "status":{ "shape":"EnrollmentStatus", "documentation":"

Sets the account status.

" + }, + "includeMemberAccounts":{ + "shape":"Boolean", + "documentation":"

Indicates whether to enroll member accounts of the organization if the account is the management account or delegated administrator.

" } } }, @@ -1693,32 +1797,40 @@ "UpdatePreferencesRequest":{ "type":"structure", "members":{ - "memberAccountDiscountVisibility":{ - "shape":"MemberAccountDiscountVisibility", - "documentation":"

Sets the \"member account discount visibility\" preference.

" - }, "savingsEstimationMode":{ "shape":"SavingsEstimationMode", "documentation":"

Sets the \"savings estimation mode\" preference.

" + }, + "memberAccountDiscountVisibility":{ + "shape":"MemberAccountDiscountVisibility", + "documentation":"

Sets the \"member account discount visibility\" preference.

" } } }, "UpdatePreferencesResponse":{ "type":"structure", "members":{ - "memberAccountDiscountVisibility":{ - "shape":"MemberAccountDiscountVisibility", - "documentation":"

Shows the status of the \"member account discount visibility\" preference.

" - }, "savingsEstimationMode":{ "shape":"SavingsEstimationMode", "documentation":"

Shows the status of the \"savings estimation mode\" preference.

" + }, + "memberAccountDiscountVisibility":{ + "shape":"MemberAccountDiscountVisibility", + "documentation":"

Shows the status of the \"member account discount visibility\" preference.

" } } }, "Usage":{ "type":"structure", "members":{ + "usageType":{ + "shape":"String", + "documentation":"

The usage type.

" + }, + "usageAmount":{ + "shape":"Double", + "documentation":"

The usage amount.

" + }, "operation":{ "shape":"String", "documentation":"

The operation value.

" @@ -1730,14 +1842,6 @@ "unit":{ "shape":"String", "documentation":"

The usage unit.

" - }, - "usageAmount":{ - "shape":"Double", - "documentation":"

The usage amount.

" - }, - "usageType":{ - "shape":"String", - "documentation":"

The usage type.

" } }, "documentation":"

Details about the usage.

" @@ -1750,14 +1854,14 @@ "type":"structure", "required":["message"], "members":{ - "fields":{ - "shape":"ValidationExceptionDetails", - "documentation":"

The list of fields that are invalid.

" - }, "message":{"shape":"String"}, "reason":{ "shape":"ValidationExceptionReason", "documentation":"

The reason for the validation exception.

" + }, + "fields":{ + "shape":"ValidationExceptionDetails", + "documentation":"

The list of fields that are invalid.

" } }, "documentation":"

The input fails to satisfy the constraints specified by an Amazon Web Services service.

", diff --git a/botocore/data/cost-optimization-hub/2022-07-26/waiters-2.json b/botocore/data/cost-optimization-hub/2022-07-26/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/cost-optimization-hub/2022-07-26/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/cur/2017-01-06/service-2.json b/botocore/data/cur/2017-01-06/service-2.json index 36235badd3..fdcfdaa773 100644 --- a/botocore/data/cur/2017-01-06/service-2.json +++ b/botocore/data/cur/2017-01-06/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"cur", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS Cost and Usage Report Service", "serviceId":"Cost and Usage Report Service", "signatureVersion":"v4", "signingName":"cur", "targetPrefix":"AWSOrigamiServiceGatewayService", - "uid":"cur-2017-01-06" + "uid":"cur-2017-01-06", + "auth":["aws.auth#sigv4"] }, "operations":{ "DeleteReportDefinition":{ diff --git a/botocore/data/customer-profiles/2020-08-15/service-2.json b/botocore/data/customer-profiles/2020-08-15/service-2.json index f555553333..eabe1e22f2 100644 --- a/botocore/data/customer-profiles/2020-08-15/service-2.json +++ b/botocore/data/customer-profiles/2020-08-15/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"profile", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"Customer Profiles", "serviceFullName":"Amazon Connect Customer Profiles", "serviceId":"Customer Profiles", "signatureVersion":"v4", "signingName":"profile", - "uid":"customer-profiles-2020-08-15" + "uid":"customer-profiles-2020-08-15", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddProfileKey":{ @@ -62,7 +64,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

Use this API or UpdateDomain to enable identity resolution: set Matching to true.

To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

" + "documentation":"

Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

Use this API or UpdateDomain to enable identity resolution: set Matching to true.

To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

It is not possible to associate a Customer Profiles domain with an Amazon Connect Instance directly from the API. If you would like to create a domain and associate a Customer Profiles domain, use the Amazon Connect admin website. For more information, see Enable Customer Profiles.

Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

" }, "CreateEventStream":{ "name":"CreateEventStream", @@ -1645,7 +1647,7 @@ }, "AccountNumber":{ "shape":"sensitiveString1To255", - "documentation":"

A unique account number that you have given to the customer.

" + "documentation":"

An account number that you have given to the customer.

" }, "AdditionalInformation":{ "shape":"sensitiveString1To1000", @@ -2946,6 +2948,14 @@ "shape":"string1To255", "documentation":"

The format of your sourceLastUpdatedTimestamp that was previously set up.

" }, + "MaxAvailableProfileObjectCount":{ + "shape":"minSize0", + "documentation":"

The amount of provisioned profile object max count available.

" + }, + "MaxProfileObjectCount":{ + "shape":"minSize1", + "documentation":"

The amount of profile object max count assigned to the object type.

" + }, "Fields":{ "shape":"FieldMap", "documentation":"

A map of the name and ObjectType field.

" @@ -3770,6 +3780,14 @@ "shape":"timestamp", "documentation":"

The timestamp of when the domain was most recently edited.

" }, + "MaxProfileObjectCount":{ + "shape":"minSize1", + "documentation":"

The amount of profile object max count assigned to the object type.

" + }, + "MaxAvailableProfileObjectCount":{ + "shape":"minSize0", + "documentation":"

The amount of provisioned profile object max count available.

" + }, "Tags":{ "shape":"TagMap", "documentation":"

The tags used to organize, track, or control access for this resource.

" @@ -4403,7 +4421,7 @@ }, "AccountNumber":{ "shape":"sensitiveString1To255", - "documentation":"

A unique account number that you have given to the customer.

" + "documentation":"

An account number that you have given to the customer.

" }, "AdditionalInformation":{ "shape":"sensitiveString1To1000", @@ -4688,6 +4706,10 @@ "shape":"string1To255", "documentation":"

The format of your sourceLastUpdatedTimestamp that was previously set up.

" }, + "MaxProfileObjectCount":{ + "shape":"minSize1", + "documentation":"

The amount of profile object max count assigned to the object type

" + }, "Fields":{ "shape":"FieldMap", "documentation":"

A map of the name and ObjectType field.

" @@ -4737,6 +4759,14 @@ "shape":"string1To255", "documentation":"

The format of your sourceLastUpdatedTimestamp that was previously set up in fields that were parsed using SimpleDateFormat. If you have sourceLastUpdatedTimestamp in your field, you must set up sourceLastUpdatedTimestampFormat.

" }, + "MaxProfileObjectCount":{ + "shape":"minSize1", + "documentation":"

The amount of profile object max count assigned to the object type.

" + }, + "MaxAvailableProfileObjectCount":{ + "shape":"minSize0", + "documentation":"

The amount of provisioned profile object max count available.

" + }, "Fields":{ "shape":"FieldMap", "documentation":"

A map of the name and ObjectType field.

" @@ -5658,7 +5688,7 @@ }, "AccountNumber":{ "shape":"sensitiveString0To255", - "documentation":"

A unique account number that you have given to the customer.

" + "documentation":"

An account number that you have given to the customer.

" }, "PartyType":{ "shape":"PartyType", @@ -5886,6 +5916,14 @@ "min":1 }, "message":{"type":"string"}, + "minSize0":{ + "type":"integer", + "min":0 + }, + "minSize1":{ + "type":"integer", + "min":1 + }, "optionalBoolean":{"type":"boolean"}, "requestValueList":{ "type":"list", @@ -5992,5 +6030,5 @@ "pattern":"[a-f0-9]{32}" } }, - "documentation":"Amazon Connect Customer Profiles

Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center. If you're new to Amazon Connect, you might find it helpful to review the Amazon Connect Administrator Guide.

" + "documentation":"Amazon Connect Customer Profiles

Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center.

For more information about the Amazon Connect Customer Profiles feature, see Use Customer Profiles in the Amazon Connect Administrator's Guide.

" } diff --git a/botocore/data/datasync/2018-11-09/service-2.json b/botocore/data/datasync/2018-11-09/service-2.json index 59190377ae..f441250f3f 100644 --- a/botocore/data/datasync/2018-11-09/service-2.json +++ b/botocore/data/datasync/2018-11-09/service-2.json @@ -4707,6 +4707,7 @@ "type":"string", "enum":[ "QUEUED", + "CANCELLING", "LAUNCHING", "PREPARING", "TRANSFERRING", diff --git a/botocore/data/datazone/2018-05-10/paginators-1.json b/botocore/data/datazone/2018-05-10/paginators-1.json index 3fa9a5b0cc..5fbd673fd3 100644 --- a/botocore/data/datazone/2018-05-10/paginators-1.json +++ b/botocore/data/datazone/2018-05-10/paginators-1.json @@ -137,6 +137,30 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "items" + }, + "ListEnvironmentActions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, + "ListLineageNodeHistory": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "nodes" + }, + "ListAssetFilters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, + "ListDataProductRevisions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" } } } diff --git a/botocore/data/datazone/2018-05-10/service-2.json b/botocore/data/datazone/2018-05-10/service-2.json index 6f78ac31e0..16d713bf08 100644 --- a/botocore/data/datazone/2018-05-10/service-2.json +++ b/botocore/data/datazone/2018-05-10/service-2.json @@ -54,6 +54,26 @@ "documentation":"

Accepts a subscription request to a specific asset.

", "idempotent":true }, + "AssociateEnvironmentRole":{ + "name":"AssociateEnvironmentRole", + "http":{ + "method":"PUT", + "requestUri":"/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/roles/{environmentRoleArn}", + "responseCode":200 + }, + "input":{"shape":"AssociateEnvironmentRoleInput"}, + "output":{"shape":"AssociateEnvironmentRoleOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Associates the environment role in Amazon DataZone.

" + }, "CancelMetadataGenerationRun":{ "name":"CancelMetadataGenerationRun", "http":{ @@ -117,6 +137,28 @@ "documentation":"

Creates an asset in Amazon DataZone catalog.

", "idempotent":true }, + "CreateAssetFilter":{ + "name":"CreateAssetFilter", + "http":{ + "method":"POST", + "requestUri":"/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters", + "responseCode":201 + }, + "input":{"shape":"CreateAssetFilterInput"}, + "output":{"shape":"CreateAssetFilterOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Creates a data asset filter.

", + "idempotent":true + }, "CreateAssetRevision":{ "name":"CreateAssetRevision", "http":{ @@ -158,6 +200,49 @@ ], "documentation":"

Creates a custom asset type.

" }, + "CreateDataProduct":{ + "name":"CreateDataProduct", + "http":{ + "method":"POST", + "requestUri":"/v2/domains/{domainIdentifier}/data-products", + "responseCode":201 + }, + "input":{"shape":"CreateDataProductInput"}, + "output":{"shape":"CreateDataProductOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Creates a data product.

", + "idempotent":true + }, + "CreateDataProductRevision":{ + "name":"CreateDataProductRevision", + "http":{ + "method":"POST", + "requestUri":"/v2/domains/{domainIdentifier}/data-products/{identifier}/revisions", + "responseCode":201 + }, + "input":{"shape":"CreateDataProductRevisionInput"}, + "output":{"shape":"CreateDataProductRevisionOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Creates a data product revision.

", + "idempotent":true + }, "CreateDataSource":{ "name":"CreateDataSource", "http":{ @@ -222,6 +307,26 @@ ], "documentation":"

Create an Amazon DataZone environment.

" }, + "CreateEnvironmentAction":{ + "name":"CreateEnvironmentAction", + "http":{ + "method":"POST", + "requestUri":"/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions", + "responseCode":201 + }, + "input":{"shape":"CreateEnvironmentActionInput"}, + "output":{"shape":"CreateEnvironmentActionOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Creates an action for the environment, for example, creates a console link for an analytics tool that is available in this environment.

" + }, "CreateEnvironmentProfile":{ "name":"CreateEnvironmentProfile", "http":{ @@ -481,10 +586,31 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Deletes an asset in Amazon DataZone.

", + "idempotent":true + }, + "DeleteAssetFilter":{ + "name":"DeleteAssetFilter", + "http":{ + "method":"DELETE", + "requestUri":"/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}", + "responseCode":204 + }, + "input":{"shape":"DeleteAssetFilterInput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Delets an asset in Amazon DataZone.

", + "documentation":"

Deletes an asset filter.

", "idempotent":true }, "DeleteAssetType":{ @@ -507,6 +633,27 @@ ], "documentation":"

Deletes an asset type in Amazon DataZone.

" }, + "DeleteDataProduct":{ + "name":"DeleteDataProduct", + "http":{ + "method":"DELETE", + "requestUri":"/v2/domains/{domainIdentifier}/data-products/{identifier}", + "responseCode":204 + }, + "input":{"shape":"DeleteDataProductInput"}, + "output":{"shape":"DeleteDataProductOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Deletes an data product in Amazon DataZone.

", + "idempotent":true + }, "DeleteDataSource":{ "name":"DeleteDataSource", "http":{ @@ -569,6 +716,26 @@ "documentation":"

Deletes an environment in Amazon DataZone.

", "idempotent":true }, + "DeleteEnvironmentAction":{ + "name":"DeleteEnvironmentAction", + "http":{ + "method":"DELETE", + "requestUri":"/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions/{identifier}", + "responseCode":204 + }, + "input":{"shape":"DeleteEnvironmentActionInput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Deletes an action for the environment, for example, deletes a console link for an analytics tool that is available in this environment.

", + "idempotent":true + }, "DeleteEnvironmentBlueprintConfiguration":{ "name":"DeleteEnvironmentBlueprintConfiguration", "http":{ @@ -809,6 +976,26 @@ "documentation":"

Deletes the specified time series form for the specified asset.

", "idempotent":true }, + "DisassociateEnvironmentRole":{ + "name":"DisassociateEnvironmentRole", + "http":{ + "method":"DELETE", + "requestUri":"/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/roles/{environmentRoleArn}", + "responseCode":200 + }, + "input":{"shape":"DisassociateEnvironmentRoleInput"}, + "output":{"shape":"DisassociateEnvironmentRoleOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Disassociates the environment role in Amazon DataZone.

" + }, "GetAsset":{ "name":"GetAsset", "http":{ @@ -828,6 +1015,25 @@ ], "documentation":"

Gets an Amazon DataZone asset.

" }, + "GetAssetFilter":{ + "name":"GetAssetFilter", + "http":{ + "method":"GET", + "requestUri":"/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}", + "responseCode":200 + }, + "input":{"shape":"GetAssetFilterInput"}, + "output":{"shape":"GetAssetFilterOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Gets an asset filter.

" + }, "GetAssetType":{ "name":"GetAssetType", "http":{ @@ -847,6 +1053,25 @@ ], "documentation":"

Gets an Amazon DataZone asset type.

" }, + "GetDataProduct":{ + "name":"GetDataProduct", + "http":{ + "method":"GET", + "requestUri":"/v2/domains/{domainIdentifier}/data-products/{identifier}", + "responseCode":200 + }, + "input":{"shape":"GetDataProductInput"}, + "output":{"shape":"GetDataProductOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Gets the data product.

" + }, "GetDataSource":{ "name":"GetDataSource", "http":{ @@ -928,6 +1153,25 @@ ], "documentation":"

Gets an Amazon DataZone environment.

" }, + "GetEnvironmentAction":{ + "name":"GetEnvironmentAction", + "http":{ + "method":"GET", + "requestUri":"/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions/{identifier}", + "responseCode":200 + }, + "input":{"shape":"GetEnvironmentActionInput"}, + "output":{"shape":"GetEnvironmentActionOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Gets the specified environment action.

" + }, "GetEnvironmentBlueprint":{ "name":"GetEnvironmentBlueprint", "http":{ @@ -966,6 +1210,25 @@ ], "documentation":"

Gets the blueprint configuration in Amazon DataZone.

" }, + "GetEnvironmentCredentials":{ + "name":"GetEnvironmentCredentials", + "http":{ + "method":"GET", + "requestUri":"/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/credentials", + "responseCode":200 + }, + "input":{"shape":"GetEnvironmentCredentialsInput"}, + "output":{"shape":"GetEnvironmentCredentialsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Gets the credentials of an environment in Amazon DataZone.

" + }, "GetEnvironmentProfile":{ "name":"GetEnvironmentProfile", "http":{ @@ -1081,6 +1344,25 @@ ], "documentation":"

Gets the data portal URL for the specified Amazon DataZone domain.

" }, + "GetLineageNode":{ + "name":"GetLineageNode", + "http":{ + "method":"GET", + "requestUri":"/v2/domains/{domainIdentifier}/lineage/nodes/{identifier}", + "responseCode":200 + }, + "input":{"shape":"GetLineageNodeInput"}, + "output":{"shape":"GetLineageNodeOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Gets the data lineage node.

" + }, "GetListing":{ "name":"GetListing", "http":{ @@ -1098,7 +1380,7 @@ {"shape":"ValidationException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Gets a listing (a record of an asset at a given time).

" + "documentation":"

Gets a listing (a record of an asset at a given time). If you specify a listing version, only details that are specific to that version are returned.

" }, "GetMetadataGenerationRun":{ "name":"GetMetadataGenerationRun", @@ -1252,6 +1534,25 @@ ], "documentation":"

Gets a user profile in Amazon DataZone.

" }, + "ListAssetFilters":{ + "name":"ListAssetFilters", + "http":{ + "method":"GET", + "requestUri":"/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters", + "responseCode":200 + }, + "input":{"shape":"ListAssetFiltersInput"}, + "output":{"shape":"ListAssetFiltersOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Lists asset filters.

" + }, "ListAssetRevisions":{ "name":"ListAssetRevisions", "http":{ @@ -1271,6 +1572,25 @@ ], "documentation":"

Lists the revisions for the asset.

" }, + "ListDataProductRevisions":{ + "name":"ListDataProductRevisions", + "http":{ + "method":"GET", + "requestUri":"/v2/domains/{domainIdentifier}/data-products/{identifier}/revisions", + "responseCode":200 + }, + "input":{"shape":"ListDataProductRevisionsInput"}, + "output":{"shape":"ListDataProductRevisionsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Lists data product revisions.

" + }, "ListDataSourceRunActivities":{ "name":"ListDataSourceRunActivities", "http":{ @@ -1355,6 +1675,24 @@ ], "documentation":"

Lists Amazon DataZone domains.

" }, + "ListEnvironmentActions":{ + "name":"ListEnvironmentActions", + "http":{ + "method":"GET", + "requestUri":"/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions", + "responseCode":200 + }, + "input":{"shape":"ListEnvironmentActionsInput"}, + "output":{"shape":"ListEnvironmentActionsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Lists existing environment actions.

" + }, "ListEnvironmentBlueprintConfigurations":{ "name":"ListEnvironmentBlueprintConfigurations", "http":{ @@ -1429,6 +1767,25 @@ ], "documentation":"

Lists Amazon DataZone environments.

" }, + "ListLineageNodeHistory":{ + "name":"ListLineageNodeHistory", + "http":{ + "method":"GET", + "requestUri":"/v2/domains/{domainIdentifier}/lineage/nodes/{identifier}/history", + "responseCode":200 + }, + "input":{"shape":"ListLineageNodeHistoryInput"}, + "output":{"shape":"ListLineageNodeHistoryOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Lists the history of the specified data lineage node.

" + }, "ListMetadataGenerationRuns":{ "name":"ListMetadataGenerationRuns", "http":{ @@ -1618,12 +1975,34 @@ ], "documentation":"

Lists time series data points.

" }, - "PostTimeSeriesDataPoints":{ - "name":"PostTimeSeriesDataPoints", + "PostLineageEvent":{ + "name":"PostLineageEvent", "http":{ "method":"POST", - "requestUri":"/v2/domains/{domainIdentifier}/entities/{entityType}/{entityIdentifier}/time-series-data-points", - "responseCode":201 + "requestUri":"/v2/domains/{domainIdentifier}/lineage/events", + "responseCode":200 + }, + "input":{"shape":"PostLineageEventInput"}, + "output":{"shape":"PostLineageEventOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Posts a data lineage event.

", + "idempotent":true + }, + "PostTimeSeriesDataPoints":{ + "name":"PostTimeSeriesDataPoints", + "http":{ + "method":"POST", + "requestUri":"/v2/domains/{domainIdentifier}/entities/{entityType}/{entityIdentifier}/time-series-data-points", + "responseCode":201 }, "input":{"shape":"PostTimeSeriesDataPointsInput"}, "output":{"shape":"PostTimeSeriesDataPointsOutput"}, @@ -1898,6 +2277,27 @@ "documentation":"

Untags a resource in Amazon DataZone.

", "idempotent":true }, + "UpdateAssetFilter":{ + "name":"UpdateAssetFilter", + "http":{ + "method":"PATCH", + "requestUri":"/v2/domains/{domainIdentifier}/assets/{assetIdentifier}/filters/{identifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateAssetFilterInput"}, + "output":{"shape":"UpdateAssetFilterOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Updates an asset filter.

", + "idempotent":true + }, "UpdateDataSource":{ "name":"UpdateDataSource", "http":{ @@ -1962,6 +2362,26 @@ ], "documentation":"

Updates the specified environment in Amazon DataZone.

" }, + "UpdateEnvironmentAction":{ + "name":"UpdateEnvironmentAction", + "http":{ + "method":"PATCH", + "requestUri":"/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions/{identifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateEnvironmentActionInput"}, + "output":{"shape":"UpdateEnvironmentActionOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Updates an environment action.

" + }, "UpdateEnvironmentProfile":{ "name":"UpdateEnvironmentProfile", "http":{ @@ -2374,10 +2794,92 @@ "type":"string", "sensitive":true }, + "ActionParameters":{ + "type":"structure", + "members":{ + "awsConsoleLink":{ + "shape":"AwsConsoleLinkParameters", + "documentation":"

The console link specified as part of the environment action.

" + } + }, + "documentation":"

The parameters of the environment action.

", + "union":true + }, "ApplicableAssetTypes":{ "type":"list", "member":{"shape":"TypeName"} }, + "AssetFilterConfiguration":{ + "type":"structure", + "members":{ + "columnConfiguration":{ + "shape":"ColumnFilterConfiguration", + "documentation":"

The column configuration of the asset filter.

" + }, + "rowConfiguration":{ + "shape":"RowFilterConfiguration", + "documentation":"

The row configuration of the asset filter.

" + } + }, + "documentation":"

The configuration details of the asset filter.

", + "union":true + }, + "AssetFilterSummary":{ + "type":"structure", + "required":[ + "assetId", + "domainId", + "id", + "name" + ], + "members":{ + "assetId":{ + "shape":"AssetId", + "documentation":"

The ID of the data asset.

" + }, + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the asset filter was created.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the asset filter.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where the asset filter lives.

" + }, + "effectiveColumnNames":{ + "shape":"ColumnNameList", + "documentation":"

The effective column names of the asset filter.

" + }, + "effectiveRowFilter":{ + "shape":"String", + "documentation":"

The effective row filter of the asset filter.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

The error message that is displayed if the action does not succeed.

" + }, + "id":{ + "shape":"FilterId", + "documentation":"

The ID of the asset filter.

" + }, + "name":{ + "shape":"FilterName", + "documentation":"

The name of the asset filter.

" + }, + "status":{ + "shape":"FilterStatus", + "documentation":"

The status of the asset filter.

" + } + }, + "documentation":"

The summary of the asset filter.

" + }, + "AssetFilters":{ + "type":"list", + "member":{"shape":"AssetFilterSummary"} + }, "AssetId":{ "type":"string", "pattern":"^[a-zA-Z0-9_-]{1,36}$" @@ -2386,6 +2888,28 @@ "type":"string", "pattern":"^[a-zA-Z0-9_-]{1,36}$" }, + "AssetInDataProductListingItem":{ + "type":"structure", + "members":{ + "entityId":{ + "shape":"String", + "documentation":"

The entity ID of the listing of the asset in a data product.

" + }, + "entityRevision":{ + "shape":"String", + "documentation":"

The entity revision of the listing of the asset in a data product.

" + }, + "entityType":{ + "shape":"String", + "documentation":"

The entity type of the listing of the asset in a data product.

" + } + }, + "documentation":"

The listing of the asset in a data product.

" + }, + "AssetInDataProductListingItems":{ + "type":"list", + "member":{"shape":"AssetInDataProductListingItem"} + }, "AssetItem":{ "type":"structure", "required":[ @@ -2727,6 +3251,39 @@ }, "documentation":"

The details of the asset type.

" }, + "AssociateEnvironmentRoleInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "environmentIdentifier", + "environmentRoleArn" + ], + "members":{ + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the Amazon DataZone domain in which the environment role is associated.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "environmentIdentifier":{ + "shape":"EnvironmentId", + "documentation":"

The ID of the Amazon DataZone environment.

", + "location":"uri", + "locationName":"environmentIdentifier" + }, + "environmentRoleArn":{ + "shape":"String", + "documentation":"

The ARN of the environment role.

", + "location":"uri", + "locationName":"environmentRoleArn" + } + } + }, + "AssociateEnvironmentRoleOutput":{ + "type":"structure", + "members":{ + } + }, "Attribute":{ "type":"string", "max":128, @@ -2753,6 +3310,16 @@ "type":"string", "pattern":"^\\d{12}$" }, + "AwsConsoleLinkParameters":{ + "type":"structure", + "members":{ + "uri":{ + "shape":"String", + "documentation":"

The URI of the console link specified as part of the environment action.

" + } + }, + "documentation":"

The parameters of the console link specified as part of the environment action.

" + }, "AwsRegion":{ "type":"string", "pattern":"^[a-z]{2}-[a-z]{4,10}-\\d$" @@ -2901,6 +3468,20 @@ }, "documentation":"

Part of the provisioning properties of the environment blueprint.

" }, + "ColumnFilterConfiguration":{ + "type":"structure", + "members":{ + "includedColumnNames":{ + "shape":"ColumnNameList", + "documentation":"

Specifies whether to include column names.

" + } + }, + "documentation":"

The column configuration of the asset filter.

" + }, + "ColumnNameList":{ + "type":"list", + "member":{"shape":"String"} + }, "ConfigurableActionParameter":{ "type":"structure", "members":{ @@ -2961,6 +3542,102 @@ }, "exception":true }, + "CreateAssetFilterInput":{ + "type":"structure", + "required":[ + "assetIdentifier", + "configuration", + "domainIdentifier", + "name" + ], + "members":{ + "assetIdentifier":{ + "shape":"AssetId", + "documentation":"

The ID of the data asset.

", + "location":"uri", + "locationName":"assetIdentifier" + }, + "clientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive identifier that is provided to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "configuration":{ + "shape":"AssetFilterConfiguration", + "documentation":"

The configuration of the asset filter.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the asset filter.

" + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain in which you want to create an asset filter.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "name":{ + "shape":"FilterName", + "documentation":"

The name of the asset filter.

" + } + } + }, + "CreateAssetFilterOutput":{ + "type":"structure", + "required":[ + "assetId", + "configuration", + "domainId", + "id", + "name" + ], + "members":{ + "assetId":{ + "shape":"AssetId", + "documentation":"

The ID of the asset.

" + }, + "configuration":{ + "shape":"AssetFilterConfiguration", + "documentation":"

The configuration of the asset filter.

" + }, + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the asset filter was created.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the asset filter.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where the asset filter is created.

" + }, + "effectiveColumnNames":{ + "shape":"ColumnNameList", + "documentation":"

The column names in the asset filter.

" + }, + "effectiveRowFilter":{ + "shape":"String", + "documentation":"

The row filter in the asset filter.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

The error message that is displayed if the asset filter is not created successfully.

" + }, + "id":{ + "shape":"FilterId", + "documentation":"

The ID of the asset filter.

" + }, + "name":{ + "shape":"FilterName", + "documentation":"

The name of the asset filter.

" + }, + "status":{ + "shape":"FilterStatus", + "documentation":"

The status of the asset filter.

" + } + } + }, "CreateAssetInput":{ "type":"structure", "required":[ @@ -3344,52 +4021,282 @@ } } }, - "CreateDataSourceInput":{ + "CreateDataProductInput":{ "type":"structure", "required":[ "domainIdentifier", - "environmentIdentifier", "name", - "projectIdentifier", - "type" + "owningProjectIdentifier" ], "members":{ - "assetFormsInput":{ - "shape":"FormInputList", - "documentation":"

The metadata forms that are to be attached to the assets that this data source works with.

" - }, "clientToken":{ - "shape":"String", + "shape":"ClientToken", "documentation":"

A unique, case-sensitive identifier that is provided to ensure the idempotency of the request.

", "idempotencyToken":true }, - "configuration":{ - "shape":"DataSourceConfigurationInput", - "documentation":"

Specifies the configuration of the data source. It can be set to either glueRunConfiguration or redshiftRunConfiguration.

" - }, "description":{ - "shape":"Description", - "documentation":"

The description of the data source.

" + "shape":"DataProductDescription", + "documentation":"

The description of the data product.

" }, "domainIdentifier":{ "shape":"DomainId", - "documentation":"

The ID of the Amazon DataZone domain where the data source is created.

", + "documentation":"

The ID of the domain where the data product is created.

", "location":"uri", "locationName":"domainIdentifier" }, - "enableSetting":{ - "shape":"EnableSetting", - "documentation":"

Specifies whether the data source is enabled.

" + "formsInput":{ + "shape":"FormInputList", + "documentation":"

The metadata forms of the data product.

" }, - "environmentIdentifier":{ - "shape":"String", - "documentation":"

The unique identifier of the Amazon DataZone environment to which the data source publishes assets.

" + "glossaryTerms":{ + "shape":"GlossaryTerms", + "documentation":"

The glossary terms of the data product.

" + }, + "items":{ + "shape":"DataProductItems", + "documentation":"

The data assets of the data product.

" }, "name":{ - "shape":"Name", - "documentation":"

The name of the data source.

" + "shape":"DataProductName", + "documentation":"

The name of the data product.

" }, - "projectIdentifier":{ + "owningProjectIdentifier":{ + "shape":"ProjectId", + "documentation":"

The ID of the owning project of the data product.

" + } + } + }, + "CreateDataProductOutput":{ + "type":"structure", + "required":[ + "domainId", + "id", + "name", + "owningProjectId", + "revision", + "status" + ], + "members":{ + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the data product was created.

" + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

The user who created the data product.

" + }, + "description":{ + "shape":"DataProductDescription", + "documentation":"

The description of the data product.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where the data product lives.

" + }, + "firstRevisionCreatedAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the first revision of the data product was created.

" + }, + "firstRevisionCreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The user who created the first revision of the data product.

" + }, + "formsOutput":{ + "shape":"FormOutputList", + "documentation":"

The metadata forms of the data product.

" + }, + "glossaryTerms":{ + "shape":"GlossaryTerms", + "documentation":"

The glossary terms of the data product.

" + }, + "id":{ + "shape":"DataProductId", + "documentation":"

The ID of the data product.

" + }, + "items":{ + "shape":"DataProductItems", + "documentation":"

The data assets of the data product.

" + }, + "name":{ + "shape":"DataProductName", + "documentation":"

The name of the data product.

" + }, + "owningProjectId":{ + "shape":"ProjectId", + "documentation":"

The ID of the owning project of the data product.

" + }, + "revision":{ + "shape":"Revision", + "documentation":"

The revision of the data product.

" + }, + "status":{ + "shape":"DataProductStatus", + "documentation":"

The status of the data product.

" + } + } + }, + "CreateDataProductRevisionInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "identifier", + "name" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that is provided to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"DataProductDescription", + "documentation":"

The description of the data product revision.

" + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where the data product revision is created.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "formsInput":{ + "shape":"FormInputList", + "documentation":"

The metadata forms of the data product revision.

" + }, + "glossaryTerms":{ + "shape":"GlossaryTerms", + "documentation":"

The glossary terms of the data product revision.

" + }, + "identifier":{ + "shape":"DataProductId", + "documentation":"

The ID of the data product revision.

", + "location":"uri", + "locationName":"identifier" + }, + "items":{ + "shape":"DataProductItems", + "documentation":"

The data assets of the data product revision.

" + }, + "name":{ + "shape":"DataProductName", + "documentation":"

The name of the data product revision.

" + } + } + }, + "CreateDataProductRevisionOutput":{ + "type":"structure", + "required":[ + "domainId", + "id", + "name", + "owningProjectId", + "revision", + "status" + ], + "members":{ + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the data product revision is created.

" + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

The user who created the data product revision.

" + }, + "description":{ + "shape":"DataProductDescription", + "documentation":"

The description of the data product revision.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where data product revision is created.

" + }, + "firstRevisionCreatedAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the first revision of the data product is created.

" + }, + "firstRevisionCreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The user who created the first revision of the data product.

" + }, + "formsOutput":{ + "shape":"FormOutputList", + "documentation":"

The metadata forms of the data product revision.

" + }, + "glossaryTerms":{ + "shape":"GlossaryTerms", + "documentation":"

The glossary terms of the data product revision.

" + }, + "id":{ + "shape":"DataProductId", + "documentation":"

The ID of the data product revision.

" + }, + "items":{ + "shape":"DataProductItems", + "documentation":"

The data assets of the data product revision.

" + }, + "name":{ + "shape":"DataProductName", + "documentation":"

The name of the data product revision.

" + }, + "owningProjectId":{ + "shape":"ProjectId", + "documentation":"

The ID of the owning project of the data product revision.

" + }, + "revision":{ + "shape":"Revision", + "documentation":"

The revision of the data product revision.

" + }, + "status":{ + "shape":"DataProductStatus", + "documentation":"

The status of the data product revision.

" + } + } + }, + "CreateDataSourceInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "environmentIdentifier", + "name", + "projectIdentifier", + "type" + ], + "members":{ + "assetFormsInput":{ + "shape":"FormInputList", + "documentation":"

The metadata forms that are to be attached to the assets that this data source works with.

" + }, + "clientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive identifier that is provided to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "configuration":{ + "shape":"DataSourceConfigurationInput", + "documentation":"

Specifies the configuration of the data source. It can be set to either glueRunConfiguration or redshiftRunConfiguration.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the data source.

" + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the Amazon DataZone domain where the data source is created.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "enableSetting":{ + "shape":"EnableSetting", + "documentation":"

Specifies whether the data source is enabled.

" + }, + "environmentIdentifier":{ + "shape":"String", + "documentation":"

The unique identifier of the Amazon DataZone environment to which the data source publishes assets.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the data source.

" + }, + "projectIdentifier":{ "shape":"String", "documentation":"

The identifier of the Amazon DataZone project in which you want to add this data source.

" }, @@ -3587,6 +4494,77 @@ } } }, + "CreateEnvironmentActionInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "environmentIdentifier", + "name", + "parameters" + ], + "members":{ + "description":{ + "shape":"String", + "documentation":"

The description of the environment action that is being created in the environment.

" + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the Amazon DataZone domain in which the environment action is created.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "environmentIdentifier":{ + "shape":"EnvironmentId", + "documentation":"

The ID of the environment in which the environment action is created.

", + "location":"uri", + "locationName":"environmentIdentifier" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the environment action.

" + }, + "parameters":{ + "shape":"ActionParameters", + "documentation":"

The parameters of the environment action.

" + } + } + }, + "CreateEnvironmentActionOutput":{ + "type":"structure", + "required":[ + "domainId", + "environmentId", + "id", + "name", + "parameters" + ], + "members":{ + "description":{ + "shape":"String", + "documentation":"

The description of the environment action.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain in which the environment action is created.

" + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

The ID of the environment in which the environment is created.

" + }, + "id":{ + "shape":"EnvironmentActionId", + "documentation":"

The ID of the environment action.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the environment action.

" + }, + "parameters":{ + "shape":"ActionParameters", + "documentation":"

The parameters of the environment action.

" + } + } + }, "CreateEnvironmentInput":{ "type":"structure", "required":[ @@ -3606,6 +4584,18 @@ "location":"uri", "locationName":"domainIdentifier" }, + "environmentAccountIdentifier":{ + "shape":"String", + "documentation":"

The ID of the account in which the environment is being created.

" + }, + "environmentAccountRegion":{ + "shape":"String", + "documentation":"

The region of the account in which the environment is being created.

" + }, + "environmentBlueprintIdentifier":{ + "shape":"String", + "documentation":"

The ID of the blueprint with which the environment is being created.

" + }, "environmentProfileIdentifier":{ "shape":"EnvironmentProfileId", "documentation":"

The identifier of the environment profile that is used to create this Amazon DataZone environment.

" @@ -3633,7 +4623,6 @@ "required":[ "createdBy", "domainId", - "environmentProfileId", "name", "projectId", "provider" @@ -4366,7 +5355,9 @@ }, "subscriptionId":{ "shape":"SubscriptionId", - "documentation":"

The identifier of the subscription grant.

" + "documentation":"

The identifier of the subscription grant.

", + "deprecated":true, + "deprecatedMessage":"Multiple subscriptions can exist for a single grant" }, "subscriptionTargetId":{ "shape":"SubscriptionTargetId", @@ -4759,25 +5750,142 @@ "type":"string", "pattern":"^[a-zA-Z0-9_-]{1,36}$" }, - "DataProductItem":{ + "DataProductItem":{ + "type":"structure", + "required":[ + "identifier", + "itemType" + ], + "members":{ + "glossaryTerms":{ + "shape":"ItemGlossaryTerms", + "documentation":"

The glossary terms of the data product.

" + }, + "identifier":{ + "shape":"EntityIdentifier", + "documentation":"

The ID of the data product.

" + }, + "itemType":{ + "shape":"DataProductItemType", + "documentation":"

The type of the data product.

" + }, + "revision":{ + "shape":"Revision", + "documentation":"

The revision of the data product.

" + } + }, + "documentation":"

The data product.

" + }, + "DataProductItemType":{ + "type":"string", + "enum":["ASSET"] + }, + "DataProductItems":{ + "type":"list", + "member":{"shape":"DataProductItem"}, + "min":1 + }, + "DataProductListing":{ + "type":"structure", + "members":{ + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the data product listing was created.

" + }, + "dataProductId":{ + "shape":"DataProductId", + "documentation":"

The ID of the data product listing.

" + }, + "dataProductRevision":{ + "shape":"Revision", + "documentation":"

The revision of the data product listing.

" + }, + "forms":{ + "shape":"Forms", + "documentation":"

The metadata forms of the data product listing.

" + }, + "glossaryTerms":{ + "shape":"DetailedGlossaryTerms", + "documentation":"

The glossary terms of the data product listing.

" + }, + "items":{ + "shape":"ListingSummaries", + "documentation":"

The data assets of the data product listing.

" + }, + "owningProjectId":{ + "shape":"ProjectId", + "documentation":"

The ID of the owning project of the data product listing.

" + } + }, + "documentation":"

The data product listing.

" + }, + "DataProductListingItem":{ "type":"structure", "members":{ - "domainId":{ - "shape":"DomainId", - "documentation":"

" + "additionalAttributes":{ + "shape":"DataProductListingItemAdditionalAttributes", + "documentation":"

The additional attributes of the asset of the data product.

" + }, + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the asset of the data product listing was created.

" }, - "itemId":{ + "description":{ + "shape":"Description", + "documentation":"

The description of the asset of the asset of the data product.

" + }, + "entityId":{ "shape":"DataProductId", - "documentation":"

" + "documentation":"

The entity ID of the asset of the asset of the data product.

" + }, + "entityRevision":{ + "shape":"Revision", + "documentation":"

The revision of the asset of the asset of the data product.

" + }, + "glossaryTerms":{ + "shape":"DetailedGlossaryTerms", + "documentation":"

The glossary terms of the asset of the asset of the data product.

" + }, + "items":{ + "shape":"ListingSummaryItems", + "documentation":"

The data of the asset of the data product.

" + }, + "listingCreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The timestamp at which the listing was created.

" + }, + "listingId":{ + "shape":"ListingId", + "documentation":"

The ID of the listing.

" + }, + "listingRevision":{ + "shape":"Revision", + "documentation":"

The revision of the listing.

" + }, + "listingUpdatedBy":{ + "shape":"UpdatedBy", + "documentation":"

The user who updated the listing.

" + }, + "name":{ + "shape":"DataProductName", + "documentation":"

The name of the asset of the data product.

" + }, + "owningProjectId":{ + "shape":"ProjectId", + "documentation":"

The ID of the owning project of the asset of the data product.

" } }, - "documentation":"

" + "documentation":"

The asset of the data product listing.

" }, - "DataProductItems":{ - "type":"list", - "member":{"shape":"DataProductItem"}, - "max":100, - "min":0 + "DataProductListingItemAdditionalAttributes":{ + "type":"structure", + "members":{ + "forms":{ + "shape":"Forms", + "documentation":"

The metadata forms of the asset of the data product.

" + } + }, + "documentation":"

The additional attributes of the asset of the data product.

" }, "DataProductName":{ "type":"string", @@ -4785,7 +5893,7 @@ "min":1, "sensitive":true }, - "DataProductSummary":{ + "DataProductResultItem":{ "type":"structure", "required":[ "domainId", @@ -4796,50 +5904,84 @@ "members":{ "createdAt":{ "shape":"CreatedAt", - "documentation":"

" + "documentation":"

The timestamp at which the data product was created.

" }, "createdBy":{ "shape":"CreatedBy", - "documentation":"

" - }, - "dataProductItems":{ - "shape":"DataProductItems", - "documentation":"

" + "documentation":"

The user who created the data product.

" }, "description":{ "shape":"DataProductDescription", - "documentation":"

" + "documentation":"

The description of the data product.

" }, "domainId":{ "shape":"DomainId", - "documentation":"

" + "documentation":"

The ID of the domain where the data product lives.

" + }, + "firstRevisionCreatedAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which first revision of the data product was created.

" + }, + "firstRevisionCreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The user who created the first revision of the data product.

" }, "glossaryTerms":{ "shape":"GlossaryTerms", - "documentation":"

" + "documentation":"

The glossary terms of the data product.

" }, "id":{ "shape":"DataProductId", - "documentation":"

" + "documentation":"

The ID of the data product.

" }, "name":{ "shape":"DataProductName", - "documentation":"

" + "documentation":"

The name of the data product.

" }, "owningProjectId":{ "shape":"ProjectId", - "documentation":"

" + "documentation":"

The ID of the owning project of the data product.

" + } + }, + "documentation":"

The data product.

" + }, + "DataProductRevision":{ + "type":"structure", + "members":{ + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the data product revision was created.

" }, - "updatedAt":{ - "shape":"UpdatedAt", - "documentation":"

" + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

The user who created the data product revision.

" }, - "updatedBy":{ - "shape":"UpdatedBy", - "documentation":"

" + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where the data product revision lives.

" + }, + "id":{ + "shape":"DataProductId", + "documentation":"

The ID of the data product revision.

" + }, + "revision":{ + "shape":"Revision", + "documentation":"

The data product revision.

" } }, - "documentation":"

" + "documentation":"

The data product revision.

" + }, + "DataProductRevisions":{ + "type":"list", + "member":{"shape":"DataProductRevision"} + }, + "DataProductStatus":{ + "type":"string", + "enum":[ + "CREATED", + "CREATING", + "CREATE_FAILED" + ] }, "DataSourceConfigurationInput":{ "type":"structure", @@ -5132,6 +6274,34 @@ "min":1, "sensitive":true }, + "DeleteAssetFilterInput":{ + "type":"structure", + "required":[ + "assetIdentifier", + "domainIdentifier", + "identifier" + ], + "members":{ + "assetIdentifier":{ + "shape":"AssetId", + "documentation":"

The ID of the data asset.

", + "location":"uri", + "locationName":"assetIdentifier" + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where you want to delete an asset filter.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "identifier":{ + "shape":"FilterId", + "documentation":"

The ID of the asset filter that you want to delete.

", + "location":"uri", + "locationName":"identifier" + } + } + }, "DeleteAssetInput":{ "type":"structure", "required":[ @@ -5184,6 +6354,32 @@ "members":{ } }, + "DeleteDataProductInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "identifier" + ], + "members":{ + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the Amazon DataZone domain in which the data product is deleted.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "identifier":{ + "shape":"DataProductId", + "documentation":"

The identifier of the data product that is deleted.

", + "location":"uri", + "locationName":"identifier" + } + } + }, + "DeleteDataProductOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteDataSourceInput":{ "type":"structure", "required":[ @@ -5209,6 +6405,12 @@ "documentation":"

The identifier of the data source that is deleted.

", "location":"uri", "locationName":"identifier" + }, + "retainPermissionsOnRevokeFailure":{ + "shape":"Boolean", + "documentation":"

Specifies that the granted permissions are retained in case of a self-subscribe functionality failure for a data source.

", + "location":"querystring", + "locationName":"retainPermissionsOnRevokeFailure" } } }, @@ -5282,10 +6484,18 @@ "shape":"Boolean", "documentation":"

Specifies whether the assets that this data source creates in the inventory are to be also automatically published to the catalog.

" }, + "retainPermissionsOnRevokeFailure":{ + "shape":"Boolean", + "documentation":"

Specifies that the granted permissions are retained in case of a self-subscribe functionality failure for a data source.

" + }, "schedule":{ "shape":"ScheduleConfiguration", "documentation":"

The schedule of runs for this data source.

" }, + "selfGrantStatus":{ + "shape":"SelfGrantStatusOutput", + "documentation":"

Specifies the status of the self-granting functionality.

" + }, "status":{ "shape":"DataSourceStatus", "documentation":"

The status of this data source.

" @@ -5335,6 +6545,34 @@ } } }, + "DeleteEnvironmentActionInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "environmentIdentifier", + "identifier" + ], + "members":{ + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the Amazon DataZone domain in which an environment action is deleted.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "environmentIdentifier":{ + "shape":"EnvironmentId", + "documentation":"

The ID of the environment where an environment action is deleted.

", + "location":"uri", + "locationName":"environmentIdentifier" + }, + "identifier":{ + "shape":"String", + "documentation":"

The ID of the environment action that is deleted.

", + "location":"uri", + "locationName":"identifier" + } + } + }, "DeleteEnvironmentBlueprintConfigurationInput":{ "type":"structure", "required":[ @@ -5634,7 +6872,9 @@ }, "subscriptionId":{ "shape":"SubscriptionId", - "documentation":"

The identifier of the subsctiption whose subscription grant is to be deleted.

" + "documentation":"

The identifier of the subsctiption whose subscription grant is to be deleted.

", + "deprecated":true, + "deprecatedMessage":"Multiple subscriptions can exist for a single grant" }, "subscriptionTargetId":{ "shape":"SubscriptionTargetId", @@ -5848,6 +7088,39 @@ "type":"list", "member":{"shape":"DetailedGlossaryTerm"} }, + "DisassociateEnvironmentRoleInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "environmentIdentifier", + "environmentRoleArn" + ], + "members":{ + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the Amazon DataZone domain in which an environment role is disassociated.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "environmentIdentifier":{ + "shape":"EnvironmentId", + "documentation":"

The ID of the environment.

", + "location":"uri", + "locationName":"environmentIdentifier" + }, + "environmentRoleArn":{ + "shape":"String", + "documentation":"

The ARN of the environment role.

", + "location":"uri", + "locationName":"environmentRoleArn" + } + } + }, + "DisassociateEnvironmentRoleOutput":{ + "type":"structure", + "members":{ + } + }, "DomainDescription":{ "type":"string", "sensitive":true @@ -5925,6 +7198,13 @@ }, "documentation":"

A summary of a Amazon DataZone domain.

" }, + "EdgeDirection":{ + "type":"string", + "enum":[ + "UPSTREAM", + "DOWNSTREAM" + ] + }, "EditedValue":{ "type":"string", "max":5000, @@ -5953,12 +7233,56 @@ }, "EntityType":{ "type":"string", - "enum":["ASSET"] + "enum":[ + "ASSET", + "DATA_PRODUCT" + ] + }, + "EnvironmentActionId":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_-]{1,36}$" }, "EnvironmentActionList":{ "type":"list", "member":{"shape":"ConfigurableEnvironmentAction"} }, + "EnvironmentActionSummary":{ + "type":"structure", + "required":[ + "domainId", + "environmentId", + "id", + "name", + "parameters" + ], + "members":{ + "description":{ + "shape":"String", + "documentation":"

The environment action description.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The Amazon DataZone domain ID of the environment action.

" + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

The environment ID of the environment action.

" + }, + "id":{ + "shape":"EnvironmentActionId", + "documentation":"

The ID of the environment action.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the environment action.

" + }, + "parameters":{ + "shape":"ActionParameters", + "documentation":"

The parameters of the environment action.

" + } + }, + "documentation":"

The details about the specified action configured for an environment. For example, the details of the specified console links for an analytics tool that is available in this environment.

" + }, "EnvironmentBlueprintConfigurationItem":{ "type":"structure", "required":[ @@ -5986,6 +7310,10 @@ "shape":"RoleArn", "documentation":"

The ARN of the manage access role specified in the environment blueprint configuration.

" }, + "provisioningConfigurations":{ + "shape":"ProvisioningConfigurationList", + "documentation":"

The provisioning configuration of a blueprint.

" + }, "provisioningRoleArn":{ "shape":"RoleArn", "documentation":"

The ARN of the provisioning role specified in the environment blueprint configuration.

" @@ -6105,7 +7433,7 @@ }, "EnvironmentProfileId":{ "type":"string", - "pattern":"^[a-zA-Z0-9_-]{1,36}$" + "pattern":"^[a-zA-Z0-9_-]{0,36}$" }, "EnvironmentProfileName":{ "type":"string", @@ -6202,7 +7530,6 @@ "required":[ "createdBy", "domainId", - "environmentProfileId", "name", "projectId", "provider" @@ -6263,6 +7590,24 @@ }, "documentation":"

The details of an environment.

" }, + "EqualToExpression":{ + "type":"structure", + "required":[ + "columnName", + "value" + ], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value that might be equal to an expression.

" + } + }, + "documentation":"

Specifies whether the value is equal to an expression.

" + }, "ErrorMessage":{"type":"string"}, "ExternalIdentifier":{ "type":"string", @@ -6355,12 +7700,30 @@ "type":"list", "member":{"shape":"FilterExpression"} }, + "FilterId":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_-]{1,36}$" + }, "FilterList":{ "type":"list", "member":{"shape":"FilterClause"}, "max":100, "min":1 }, + "FilterName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\w -]+$", + "sensitive":true + }, + "FilterStatus":{ + "type":"string", + "enum":[ + "VALID", + "INVALID" + ] + }, "FilterValueString":{ "type":"string", "max":128, @@ -6444,7 +7807,7 @@ }, "FormInputContentString":{ "type":"string", - "max":150000, + "max":300000, "min":0 }, "FormInputList":{ @@ -6583,6 +7946,90 @@ "max":10, "min":0 }, + "GetAssetFilterInput":{ + "type":"structure", + "required":[ + "assetIdentifier", + "domainIdentifier", + "identifier" + ], + "members":{ + "assetIdentifier":{ + "shape":"AssetId", + "documentation":"

The ID of the data asset.

", + "location":"uri", + "locationName":"assetIdentifier" + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where you want to get an asset filter.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "identifier":{ + "shape":"FilterId", + "documentation":"

The ID of the asset filter.

", + "location":"uri", + "locationName":"identifier" + } + } + }, + "GetAssetFilterOutput":{ + "type":"structure", + "required":[ + "assetId", + "configuration", + "domainId", + "id", + "name" + ], + "members":{ + "assetId":{ + "shape":"AssetId", + "documentation":"

The ID of the data asset.

" + }, + "configuration":{ + "shape":"AssetFilterConfiguration", + "documentation":"

The configuration of the asset filter.

" + }, + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the asset filter was created.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the asset filter.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where you want to get an asset filter.

" + }, + "effectiveColumnNames":{ + "shape":"ColumnNameList", + "documentation":"

The column names of the asset filter.

" + }, + "effectiveRowFilter":{ + "shape":"String", + "documentation":"

The row filter of the asset filter.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

The error message that is displayed if the action does not complete successfully.

" + }, + "id":{ + "shape":"FilterId", + "documentation":"

The ID of the asset filter.

" + }, + "name":{ + "shape":"FilterName", + "documentation":"

The name of the asset filter.

" + }, + "status":{ + "shape":"FilterStatus", + "documentation":"

The status of the asset filter.

" + } + } + }, "GetAssetInput":{ "type":"structure", "required":[ @@ -6784,6 +8231,102 @@ } } }, + "GetDataProductInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "identifier" + ], + "members":{ + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where the data product lives.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "identifier":{ + "shape":"DataProductId", + "documentation":"

The ID of the data product.

", + "location":"uri", + "locationName":"identifier" + }, + "revision":{ + "shape":"Revision", + "documentation":"

The revision of the data product.

", + "location":"querystring", + "locationName":"revision" + } + } + }, + "GetDataProductOutput":{ + "type":"structure", + "required":[ + "domainId", + "id", + "name", + "owningProjectId", + "revision", + "status" + ], + "members":{ + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the data product is created.

" + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

The user who created the data product.

" + }, + "description":{ + "shape":"DataProductDescription", + "documentation":"

The description of the data product.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where the data product lives.

" + }, + "firstRevisionCreatedAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the first revision of the data product is created.

" + }, + "firstRevisionCreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The user who created the first revision of the data product.

" + }, + "formsOutput":{ + "shape":"FormOutputList", + "documentation":"

The metadata forms of the data product.

" + }, + "glossaryTerms":{ + "shape":"GlossaryTerms", + "documentation":"

The glossary terms of the data product.

" + }, + "id":{ + "shape":"DataProductId", + "documentation":"

The ID of the data product.

" + }, + "items":{ + "shape":"DataProductItems", + "documentation":"

The data assets of the data product.

" + }, + "name":{ + "shape":"DataProductName", + "documentation":"

The name of the data product.

" + }, + "owningProjectId":{ + "shape":"ProjectId", + "documentation":"

The ID of the owning project of the data product.

" + }, + "revision":{ + "shape":"Revision", + "documentation":"

The revision of the data product.

" + }, + "status":{ + "shape":"DataProductStatus", + "documentation":"

The status of the data product.

" + } + } + }, "GetDataSourceInput":{ "type":"structure", "required":[ @@ -6887,6 +8430,10 @@ "shape":"ScheduleConfiguration", "documentation":"

The schedule of the data source runs.

" }, + "selfGrantStatus":{ + "shape":"SelfGrantStatusOutput", + "documentation":"

Specifies the status of the self-granting functionality.

" + }, "status":{ "shape":"DataSourceStatus", "documentation":"

The status of the data source.

" @@ -7059,6 +8606,70 @@ } } }, + "GetEnvironmentActionInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "environmentIdentifier", + "identifier" + ], + "members":{ + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the Amazon DataZone domain in which the GetEnvironmentAction API is invoked.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "environmentIdentifier":{ + "shape":"EnvironmentId", + "documentation":"

The environment ID of the environment action.

", + "location":"uri", + "locationName":"environmentIdentifier" + }, + "identifier":{ + "shape":"String", + "documentation":"

The ID of the environment action

", + "location":"uri", + "locationName":"identifier" + } + } + }, + "GetEnvironmentActionOutput":{ + "type":"structure", + "required":[ + "domainId", + "environmentId", + "id", + "name", + "parameters" + ], + "members":{ + "description":{ + "shape":"String", + "documentation":"

The description of the environment action.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the Amazon DataZone domain in which the environment action lives.

" + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

The environment ID of the environment action.

" + }, + "id":{ + "shape":"EnvironmentActionId", + "documentation":"

The ID of the environment action.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the environment action.

" + }, + "parameters":{ + "shape":"ActionParameters", + "documentation":"

The parameters of the environment action.

" + } + } + }, "GetEnvironmentBlueprintConfigurationInput":{ "type":"structure", "required":[ @@ -7107,6 +8718,10 @@ "shape":"RoleArn", "documentation":"

The ARN of the manage access role with which this blueprint is created.

" }, + "provisioningConfigurations":{ + "shape":"ProvisioningConfigurationList", + "documentation":"

The provisioning configuration of a blueprint.

" + }, "provisioningRoleArn":{ "shape":"RoleArn", "documentation":"

The ARN of the provisioning role with which this blueprint is created.

" @@ -7193,6 +8808,49 @@ } } }, + "GetEnvironmentCredentialsInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "environmentIdentifier" + ], + "members":{ + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the Amazon DataZone domain in which this environment and its credentials exist.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "environmentIdentifier":{ + "shape":"EnvironmentId", + "documentation":"

The ID of the environment whose credentials this operation gets.

", + "location":"uri", + "locationName":"environmentIdentifier" + } + } + }, + "GetEnvironmentCredentialsOutput":{ + "type":"structure", + "members":{ + "accessKeyId":{ + "shape":"String", + "documentation":"

The access key ID of the environment.

" + }, + "expiration":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The expiration timestamp of the environment credentials.

" + }, + "secretAccessKey":{ + "shape":"String", + "documentation":"

The secret access key of the environment credentials.

" + }, + "sessionToken":{ + "shape":"String", + "documentation":"

The session token of the environment credentials.

" + } + }, + "sensitive":true + }, "GetEnvironmentInput":{ "type":"structure", "required":[ @@ -7219,7 +8877,6 @@ "required":[ "createdBy", "domainId", - "environmentProfileId", "name", "projectId", "provider" @@ -7690,13 +9347,110 @@ "type":"structure", "required":["userProfileId"], "members":{ - "authCodeUrl":{ + "authCodeUrl":{ + "shape":"String", + "documentation":"

The data portal URL of the specified Amazon DataZone domain.

" + }, + "userProfileId":{ + "shape":"String", + "documentation":"

The ID of the user profile.

" + } + } + }, + "GetLineageNodeInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "identifier" + ], + "members":{ + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain in which you want to get the data lineage node.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "eventTimestamp":{ + "shape":"Timestamp", + "documentation":"

The event time stamp for which you want to get the data lineage node.

", + "location":"querystring", + "locationName":"timestamp" + }, + "identifier":{ + "shape":"LineageNodeIdentifier", + "documentation":"

The ID of the data lineage node that you want to get.

Both, a lineage node identifier generated by Amazon DataZone and a sourceIdentifier of the lineage node are supported. If sourceIdentifier is greater than 1800 characters, you can use lineage node identifier generated by Amazon DataZone to get the node details.

", + "location":"uri", + "locationName":"identifier" + } + } + }, + "GetLineageNodeOutput":{ + "type":"structure", + "required":[ + "domainId", + "id", + "typeName" + ], + "members":{ + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the data lineage node was created.

" + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

The user who created the data lineage node.

" + }, + "description":{ "shape":"String", - "documentation":"

The data portal URL of the specified Amazon DataZone domain.

" + "documentation":"

The description of the data lineage node.

" }, - "userProfileId":{ + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where you're getting the data lineage node.

" + }, + "downstreamNodes":{ + "shape":"LineageNodeReferenceList", + "documentation":"

The downsteam nodes of the specified data lineage node.

" + }, + "eventTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of the event described in the data lineage node.

" + }, + "formsOutput":{ + "shape":"FormOutputList", + "documentation":"

The metadata of the specified data lineage node.

" + }, + "id":{ + "shape":"LineageNodeId", + "documentation":"

The ID of the data lineage node.

" + }, + "name":{ "shape":"String", - "documentation":"

The ID of the user profile.

" + "documentation":"

The name of the data lineage node.

" + }, + "sourceIdentifier":{ + "shape":"String", + "documentation":"

The source identifier of the data lineage node.

" + }, + "typeName":{ + "shape":"String", + "documentation":"

The name of the type of the specified data lineage node.

" + }, + "typeRevision":{ + "shape":"Revision", + "documentation":"

The revision type of the specified data lineage node.

" + }, + "updatedAt":{ + "shape":"UpdatedAt", + "documentation":"

The timestamp at which the data lineage node was updated.

" + }, + "updatedBy":{ + "shape":"UpdatedBy", + "documentation":"

The user who updated the data lineage node.

" + }, + "upstreamNodes":{ + "shape":"LineageNodeReferenceList", + "documentation":"

The upstream nodes of the specified data lineage node.

" } } }, @@ -7980,7 +9734,9 @@ }, "subscriptionId":{ "shape":"SubscriptionId", - "documentation":"

The identifier of the subscription.

" + "documentation":"

The identifier of the subscription.

", + "deprecated":true, + "deprecatedMessage":"Multiple subscriptions can exist for a single grant" }, "subscriptionTargetId":{ "shape":"SubscriptionTargetId", @@ -8623,6 +10379,17 @@ "min":4, "pattern":"[a-z]{2}-?(iso|gov)?-{1}[a-z]*-{1}[0-9]" }, + "GlueSelfGrantStatusOutput":{ + "type":"structure", + "required":["selfGrantStatusDetails"], + "members":{ + "selfGrantStatusDetails":{ + "shape":"SelfGrantStatusDetails", + "documentation":"

The details for the self granting status for a Glue data source.

" + } + }, + "documentation":"

The details of the self granting status.

" + }, "GrantedEntity":{ "type":"structure", "members":{ @@ -8645,6 +10412,42 @@ "documentation":"

The details of a listing for which a subscription is to be granted.

", "union":true }, + "GreaterThanExpression":{ + "type":"structure", + "required":[ + "columnName", + "value" + ], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value that might be greater than an expression.

" + } + }, + "documentation":"

Specifies whether the value is greater than an expression.

" + }, + "GreaterThanOrEqualToExpression":{ + "type":"structure", + "required":[ + "columnName", + "value" + ], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value that might be greater than or equal to an expression.

" + } + }, + "documentation":"

Specifies whether the value is greater than or equal to an expression.

" + }, "GroupDetails":{ "type":"structure", "required":["groupId"], @@ -8751,6 +10554,24 @@ "max":10, "min":1 }, + "InExpression":{ + "type":"structure", + "required":[ + "columnName", + "values" + ], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "values":{ + "shape":"StringList", + "documentation":"

The values that might be in the expression.

" + } + }, + "documentation":"

Specifies whether values are in the expression.

" + }, "Integer":{ "type":"integer", "box":true @@ -8772,18 +10593,313 @@ "enum":[ "ASSET", "GLOSSARY", - "GLOSSARY_TERM" + "GLOSSARY_TERM", + "DATA_PRODUCT" ] }, + "IsNotNullExpression":{ + "type":"structure", + "required":["columnName"], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + } + }, + "documentation":"

Specifies that the expression is not null.

" + }, + "IsNullExpression":{ + "type":"structure", + "required":["columnName"], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + } + }, + "documentation":"

Specifies that the expression is null.

" + }, + "ItemGlossaryTerms":{ + "type":"list", + "member":{"shape":"GlossaryTermId"}, + "max":2, + "min":1 + }, "KmsKeyArn":{ "type":"string", "max":1024, "min":1, "pattern":"^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$" }, - "LastName":{ - "type":"string", - "sensitive":true + "LakeFormationConfiguration":{ + "type":"structure", + "members":{ + "locationRegistrationExcludeS3Locations":{ + "shape":"S3LocationList", + "documentation":"

Specifies certain Amazon S3 locations if you do not want Amazon DataZone to automatically register them in hybrid mode.

" + }, + "locationRegistrationRole":{ + "shape":"RoleArn", + "documentation":"

The role that is used to manage read/write access to the chosen Amazon S3 bucket(s) for Data Lake using AWS Lake Formation hybrid access mode.

" + } + }, + "documentation":"

The Lake Formation configuration of the Data Lake blueprint.

" + }, + "LastName":{ + "type":"string", + "sensitive":true + }, + "LessThanExpression":{ + "type":"structure", + "required":[ + "columnName", + "value" + ], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value that might be less than the expression.

" + } + }, + "documentation":"

Specifies that a value is less than an expression.

" + }, + "LessThanOrEqualToExpression":{ + "type":"structure", + "required":[ + "columnName", + "value" + ], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value that might be less than or equal to an expression.

" + } + }, + "documentation":"

Specifies that a value is less than or equal to an expression.

" + }, + "LikeExpression":{ + "type":"structure", + "required":[ + "columnName", + "value" + ], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value that might be like the expression.

" + } + }, + "documentation":"

Specifies that a value is like the expression.

" + }, + "LineageEvent":{ + "type":"blob", + "max":300000, + "min":0, + "sensitive":true + }, + "LineageNodeId":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_-]{1,36}$" + }, + "LineageNodeIdentifier":{ + "type":"string", + "max":2086, + "min":1 + }, + "LineageNodeReference":{ + "type":"structure", + "members":{ + "eventTimestamp":{ + "shape":"Timestamp", + "documentation":"

The event timestamp of the data lineage node.

" + }, + "id":{ + "shape":"LineageNodeId", + "documentation":"

The ID of the data lineage node.

" + } + }, + "documentation":"

The reference details for the data lineage node.

" + }, + "LineageNodeReferenceList":{ + "type":"list", + "member":{"shape":"LineageNodeReference"}, + "max":100, + "min":0 + }, + "LineageNodeSummaries":{ + "type":"list", + "member":{"shape":"LineageNodeSummary"} + }, + "LineageNodeSummary":{ + "type":"structure", + "required":[ + "domainId", + "id", + "typeName" + ], + "members":{ + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the data lineage node was created.

" + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

The user who created the data lineage node.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the data lineage node.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain of the data lineage node.

" + }, + "eventTimestamp":{ + "shape":"Timestamp", + "documentation":"

The event timestamp of the data lineage node.

" + }, + "id":{ + "shape":"LineageNodeId", + "documentation":"

The ID of the data lineage node.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the data lineage node.

" + }, + "sourceIdentifier":{ + "shape":"String", + "documentation":"

The alternate ID of the data lineage node.

" + }, + "typeName":{ + "shape":"String", + "documentation":"

The name of the type of the data lineage node.

" + }, + "typeRevision":{ + "shape":"Revision", + "documentation":"

The type of the revision of the data lineage node.

" + }, + "updatedAt":{ + "shape":"UpdatedAt", + "documentation":"

The timestamp at which the data lineage node was updated.

" + }, + "updatedBy":{ + "shape":"UpdatedBy", + "documentation":"

The user who updated the data lineage node.

" + } + }, + "documentation":"

The summary of the data lineage node.

" + }, + "LineageNodeTypeItem":{ + "type":"structure", + "required":[ + "domainId", + "formsOutput", + "revision" + ], + "members":{ + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the data lineage node type was created.

" + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

The user who created the data lineage node type.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the data lineage node type.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where the data lineage node type lives.

" + }, + "formsOutput":{ + "shape":"FormsOutputMap", + "documentation":"

The forms output of the data lineage node type.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the data lineage node type.

" + }, + "revision":{ + "shape":"Revision", + "documentation":"

The revision of the data lineage node type.

" + }, + "updatedAt":{ + "shape":"UpdatedAt", + "documentation":"

The timestamp at which the data lineage node type was updated.

" + }, + "updatedBy":{ + "shape":"UpdatedBy", + "documentation":"

The user who updated the data lineage node type.

" + } + }, + "documentation":"

The details of a data lineage node type.

" + }, + "ListAssetFiltersInput":{ + "type":"structure", + "required":[ + "assetIdentifier", + "domainIdentifier" + ], + "members":{ + "assetIdentifier":{ + "shape":"AssetId", + "documentation":"

The ID of the data asset.

", + "location":"uri", + "locationName":"assetIdentifier" + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where you want to list asset filters.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of asset filters to return in a single call to ListAssetFilters. When the number of asset filters to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListAssetFilters to list the next set of asset filters.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

When the number of asset filters is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of asset filters, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListAssetFilters to list the next set of asset filters.

", + "location":"querystring", + "locationName":"nextToken" + }, + "status":{ + "shape":"FilterStatus", + "documentation":"

The status of the asset filter.

", + "location":"querystring", + "locationName":"status" + } + } + }, + "ListAssetFiltersOutput":{ + "type":"structure", + "required":["items"], + "members":{ + "items":{ + "shape":"AssetFilters", + "documentation":"

The results of the ListAssetFilters action.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

When the number of asset filters is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of asset filters, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListAssetFilters to list the next set of asset filters.

" + } + } }, "ListAssetRevisionsInput":{ "type":"structure", @@ -8831,6 +10947,53 @@ } } }, + "ListDataProductRevisionsInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "identifier" + ], + "members":{ + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain of the data product revisions that you want to list.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "identifier":{ + "shape":"DataProductId", + "documentation":"

The ID of the data product revision.

", + "location":"uri", + "locationName":"identifier" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of asset filters to return in a single call to ListDataProductRevisions. When the number of data product revisions to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListDataProductRevisions to list the next set of data product revisions.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

When the number of data product revisions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of data product revisions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDataProductRevisions to list the next set of data product revisions.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListDataProductRevisionsOutput":{ + "type":"structure", + "required":["items"], + "members":{ + "items":{ + "shape":"DataProductRevisions", + "documentation":"

The results of the ListDataProductRevisions action.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

When the number of data product revisions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of data product revisions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDataProductRevisions to list the next set of data product revisions.

" + } + } + }, "ListDataSourceRunActivitiesInput":{ "type":"structure", "required":[ @@ -9045,6 +11208,56 @@ } } }, + "ListEnvironmentActionSummaries":{ + "type":"list", + "member":{"shape":"EnvironmentActionSummary"} + }, + "ListEnvironmentActionsInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "environmentIdentifier" + ], + "members":{ + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the Amazon DataZone domain in which the environment actions are listed.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "environmentIdentifier":{ + "shape":"EnvironmentId", + "documentation":"

The ID of the envrironment whose environment actions are listed.

", + "location":"uri", + "locationName":"environmentIdentifier" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of environment actions to return in a single call to ListEnvironmentActions. When the number of environment actions to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListEnvironmentActions to list the next set of environment actions.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

When the number of environment actions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of environment actions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironmentActions to list the next set of environment actions.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListEnvironmentActionsOutput":{ + "type":"structure", + "members":{ + "items":{ + "shape":"ListEnvironmentActionSummaries", + "documentation":"

The results of ListEnvironmentActions.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

When the number of environment actions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of environment actions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironmentActions to list the next set of environment actions.

" + } + } + }, "ListEnvironmentBlueprintConfigurationsInput":{ "type":"structure", "required":["domainIdentifier"], @@ -9289,6 +11502,76 @@ } } }, + "ListLineageNodeHistoryInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "identifier" + ], + "members":{ + "direction":{ + "shape":"EdgeDirection", + "documentation":"

The direction of the data lineage node refers to the lineage node having neighbors in that direction. For example, if direction is UPSTREAM, the ListLineageNodeHistory API responds with historical versions with upstream neighbors only.

", + "location":"querystring", + "locationName":"direction" + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where you want to list the history of the specified data lineage node.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "eventTimestampGTE":{ + "shape":"Timestamp", + "documentation":"

Specifies whether the action is to return data lineage node history from the time after the event timestamp.

", + "location":"querystring", + "locationName":"timestampGTE" + }, + "eventTimestampLTE":{ + "shape":"Timestamp", + "documentation":"

Specifies whether the action is to return data lineage node history from the time prior of the event timestamp.

", + "location":"querystring", + "locationName":"timestampLTE" + }, + "identifier":{ + "shape":"LineageNodeIdentifier", + "documentation":"

The ID of the data lineage node whose history you want to list.

", + "location":"uri", + "locationName":"identifier" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of history items to return in a single call to ListLineageNodeHistory. When the number of memberships to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListLineageNodeHistory to list the next set of items.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

When the number of history items is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of items, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListLineageNodeHistory to list the next set of items.

", + "location":"querystring", + "locationName":"nextToken" + }, + "sortOrder":{ + "shape":"SortOrder", + "documentation":"

The order by which you want data lineage node history to be sorted.

", + "location":"querystring", + "locationName":"sortOrder" + } + } + }, + "ListLineageNodeHistoryOutput":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

When the number of history items is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of items, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListLineageNodeHistory to list the next set of items.

" + }, + "nodes":{ + "shape":"LineageNodeSummaries", + "documentation":"

The nodes returned by the ListLineageNodeHistory action.

" + } + } + }, "ListMetadataGenerationRunsInput":{ "type":"structure", "required":["domainIdentifier"], @@ -9550,6 +11833,12 @@ "location":"querystring", "locationName":"nextToken" }, + "owningProjectId":{ + "shape":"ProjectId", + "documentation":"

The ID of the owning project of the subscription grants.

", + "location":"querystring", + "locationName":"owningProjectId" + }, "sortBy":{ "shape":"SortKey", "documentation":"

Specifies the way of sorting the results of this action.

", @@ -9644,7 +11933,7 @@ }, "status":{ "shape":"SubscriptionRequestStatus", - "documentation":"

Specifies the status of the subscription requests.

", + "documentation":"

Specifies the status of the subscription requests.

This is not a required parameter, but if not specified, by default, Amazon DataZone returns only PENDING subscription requests.

", "location":"querystring", "locationName":"status" }, @@ -9777,7 +12066,7 @@ }, "status":{ "shape":"SubscriptionStatus", - "documentation":"

The status of the subscriptions that you want to list.

", + "documentation":"

The status of the subscriptions that you want to list.

This is not a required parameter, but if not provided, by default, Amazon DataZone returns only APPROVED subscriptions.

", "location":"querystring", "locationName":"status" }, @@ -9912,6 +12201,10 @@ "assetListing":{ "shape":"AssetListing", "documentation":"

An asset published in an Amazon DataZone catalog.

" + }, + "dataProductListing":{ + "shape":"DataProductListing", + "documentation":"

The data product listing.

" } }, "documentation":"

The details of a listing (aka asset published in a Amazon DataZone catalog).

", @@ -9966,6 +12259,50 @@ "INACTIVE" ] }, + "ListingSummaries":{ + "type":"list", + "member":{"shape":"ListingSummary"} + }, + "ListingSummary":{ + "type":"structure", + "members":{ + "glossaryTerms":{ + "shape":"DetailedGlossaryTerms", + "documentation":"

The glossary terms of the data product.

" + }, + "listingId":{ + "shape":"ListingId", + "documentation":"

The ID of the data product listing.

" + }, + "listingRevision":{ + "shape":"Revision", + "documentation":"

The revision of the data product listing.

" + } + }, + "documentation":"

The summary of the listing of the data product.

" + }, + "ListingSummaryItem":{ + "type":"structure", + "members":{ + "glossaryTerms":{ + "shape":"DetailedGlossaryTerms", + "documentation":"

The glossary terms of the data product listing.

" + }, + "listingId":{ + "shape":"ListingId", + "documentation":"

The ID of the data product listing.

" + }, + "listingRevision":{ + "shape":"Revision", + "documentation":"

The revision of the data product listing.

" + } + }, + "documentation":"

The results of the data product summary.

" + }, + "ListingSummaryItems":{ + "type":"list", + "member":{"shape":"ListingSummaryItem"} + }, "LongDescription":{ "type":"string", "max":4096, @@ -10134,6 +12471,60 @@ "min":1, "sensitive":true }, + "NotEqualToExpression":{ + "type":"structure", + "required":[ + "columnName", + "value" + ], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value that might not be equal to the expression.

" + } + }, + "documentation":"

Specifies that a value is not equal to the expression.

" + }, + "NotInExpression":{ + "type":"structure", + "required":[ + "columnName", + "values" + ], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "values":{ + "shape":"StringList", + "documentation":"

The value that might not be in the expression.

" + } + }, + "documentation":"

Specifies that a value is not in the expression.

" + }, + "NotLikeExpression":{ + "type":"structure", + "required":[ + "columnName", + "value" + ], + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value that might not be like the expression.

" + } + }, + "documentation":"

Specifies that a value might be not like the expression.

" + }, "NotificationOutput":{ "type":"structure", "required":[ @@ -10251,6 +12642,38 @@ "max":8192, "min":1 }, + "PostLineageEventInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "event" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that is provided to ensure the idempotency of the request.

", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where you want to post a data lineage event.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "event":{ + "shape":"LineageEvent", + "documentation":"

The data lineage event that you want to post. Only open-lineage run event are supported as events.

" + } + }, + "payload":"event" + }, + "PostLineageEventOutput":{ + "type":"structure", + "members":{ + } + }, "PostTimeSeriesDataPointsInput":{ "type":"structure", "required":[ @@ -10431,6 +12854,21 @@ }, "documentation":"

The details of a Amazon DataZone project.

" }, + "ProvisioningConfiguration":{ + "type":"structure", + "members":{ + "lakeFormationConfiguration":{ + "shape":"LakeFormationConfiguration", + "documentation":"

The Lake Formation configuration of the Data Lake blueprint.

" + } + }, + "documentation":"

The provisioning configuration of the blueprint.

", + "union":true + }, + "ProvisioningConfigurationList":{ + "type":"list", + "member":{"shape":"ProvisioningConfiguration"} + }, "ProvisioningProperties":{ "type":"structure", "members":{ @@ -10470,6 +12908,10 @@ "shape":"RoleArn", "documentation":"

The ARN of the manage access role.

" }, + "provisioningConfigurations":{ + "shape":"ProvisioningConfigurationList", + "documentation":"

The provisioning configuration of a blueprint.

" + }, "provisioningRoleArn":{ "shape":"RoleArn", "documentation":"

The ARN of the provisioning role.

" @@ -10507,6 +12949,10 @@ "shape":"RoleArn", "documentation":"

The ARN of the manage access role.

" }, + "provisioningConfigurations":{ + "shape":"ProvisioningConfigurationList", + "documentation":"

The provisioning configuration of a blueprint.

" + }, "provisioningRoleArn":{ "shape":"RoleArn", "documentation":"

The ARN of the provisioning role.

" @@ -10635,6 +13081,17 @@ "min":4, "pattern":"[a-z]{2}-?(iso|gov)?-{1}[a-z]*-{1}[0-9]" }, + "RedshiftSelfGrantStatusOutput":{ + "type":"structure", + "required":["selfGrantStatusDetails"], + "members":{ + "selfGrantStatusDetails":{ + "shape":"SelfGrantStatusDetails", + "documentation":"

The details for the self granting status for an Amazon Redshift data source.

" + } + }, + "documentation":"

The details for the self granting status for an Amazon Redshift data source.

" + }, "RedshiftServerlessStorage":{ "type":"structure", "required":["workgroupName"], @@ -11061,6 +13518,99 @@ "type":"string", "pattern":"^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$" }, + "RowFilter":{ + "type":"structure", + "members":{ + "and":{ + "shape":"RowFilterList", + "documentation":"

The 'and' clause of the row filter.

" + }, + "expression":{ + "shape":"RowFilterExpression", + "documentation":"

The expression of the row filter.

" + }, + "or":{ + "shape":"RowFilterList", + "documentation":"

The 'or' clause of the row filter.

" + } + }, + "documentation":"

The row filter.

", + "union":true + }, + "RowFilterConfiguration":{ + "type":"structure", + "required":["rowFilter"], + "members":{ + "rowFilter":{ + "shape":"RowFilter", + "documentation":"

The row filter.

" + }, + "sensitive":{ + "shape":"Boolean", + "documentation":"

Specifies whether the row filter is sensitive.

" + } + }, + "documentation":"

The row filter configuration details.

" + }, + "RowFilterExpression":{ + "type":"structure", + "members":{ + "equalTo":{ + "shape":"EqualToExpression", + "documentation":"

The 'equal to' clause of the row filter expression.

" + }, + "greaterThan":{ + "shape":"GreaterThanExpression", + "documentation":"

The 'greater than' clause of the row filter expression.

" + }, + "greaterThanOrEqualTo":{ + "shape":"GreaterThanOrEqualToExpression", + "documentation":"

The 'greater than or equal to' clause of the filter expression.

" + }, + "in":{ + "shape":"InExpression", + "documentation":"

The 'in' clause of the row filter expression.

" + }, + "isNotNull":{ + "shape":"IsNotNullExpression", + "documentation":"

The 'is not null' clause of the row filter expression.

" + }, + "isNull":{ + "shape":"IsNullExpression", + "documentation":"

The 'is null' clause of the row filter expression.

" + }, + "lessThan":{ + "shape":"LessThanExpression", + "documentation":"

The 'less than' clause of the row filter expression.

" + }, + "lessThanOrEqualTo":{ + "shape":"LessThanOrEqualToExpression", + "documentation":"

The 'less than or equal to' clause of the row filter expression.

" + }, + "like":{ + "shape":"LikeExpression", + "documentation":"

The 'like' clause of the row filter expression.

" + }, + "notEqualTo":{ + "shape":"NotEqualToExpression", + "documentation":"

The 'no equal to' clause of the row filter expression.

" + }, + "notIn":{ + "shape":"NotInExpression", + "documentation":"

The 'not in' clause of the row filter expression.

" + }, + "notLike":{ + "shape":"NotLikeExpression", + "documentation":"

The 'not like' clause of the row filter expression.

" + } + }, + "documentation":"

The row filter expression.

", + "union":true + }, + "RowFilterList":{ + "type":"list", + "member":{"shape":"RowFilter"} + }, "RunStatisticsForAssets":{ "type":"structure", "members":{ @@ -11087,6 +13637,18 @@ }, "documentation":"

The asset statistics from the data source run.

" }, + "S3Location":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^s3://.+$" + }, + "S3LocationList":{ + "type":"list", + "member":{"shape":"S3Location"}, + "max":20, + "min":0 + }, "ScheduleConfiguration":{ "type":"structure", "members":{ @@ -11222,8 +13784,8 @@ "documentation":"

The asset item included in the search results.

" }, "dataProductItem":{ - "shape":"DataProductSummary", - "documentation":"

The data product item included in the search results.

" + "shape":"DataProductResultItem", + "documentation":"

The data product.

" }, "glossaryItem":{ "shape":"GlossaryItem", @@ -11332,6 +13894,10 @@ "assetListing":{ "shape":"AssetListingItem", "documentation":"

The asset listing included in the results of the SearchListings action.

" + }, + "dataProductListing":{ + "shape":"DataProductListingItem", + "documentation":"

The data product listing.

" } }, "documentation":"

The details of the results of the SearchListings action.

", @@ -11436,6 +14002,10 @@ "formTypeItem":{ "shape":"FormTypeData", "documentation":"

The form type included in the results of the SearchTypes action.

" + }, + "lineageNodeTypeItem":{ + "shape":"LineageNodeTypeItem", + "documentation":"

The details of a data lineage node type.

" } }, "documentation":"

The details of the results of the SearchTypes action.

", @@ -11489,6 +14059,73 @@ } } }, + "SelfGrantStatus":{ + "type":"string", + "enum":[ + "GRANT_PENDING", + "REVOKE_PENDING", + "GRANT_IN_PROGRESS", + "REVOKE_IN_PROGRESS", + "GRANTED", + "GRANT_FAILED", + "REVOKE_FAILED" + ] + }, + "SelfGrantStatusDetail":{ + "type":"structure", + "required":[ + "databaseName", + "status" + ], + "members":{ + "databaseName":{ + "shape":"SelfGrantStatusDetailDatabaseNameString", + "documentation":"

The name of the database used for the data source.

" + }, + "failureCause":{ + "shape":"String", + "documentation":"

The reason for why the operation failed.

" + }, + "schemaName":{ + "shape":"SelfGrantStatusDetailSchemaNameString", + "documentation":"

The name of the schema used in the data source.

" + }, + "status":{ + "shape":"SelfGrantStatus", + "documentation":"

The self granting status of the data source.

" + } + }, + "documentation":"

The details for the self granting status.

" + }, + "SelfGrantStatusDetailDatabaseNameString":{ + "type":"string", + "max":128, + "min":1 + }, + "SelfGrantStatusDetailSchemaNameString":{ + "type":"string", + "max":128, + "min":1 + }, + "SelfGrantStatusDetails":{ + "type":"list", + "member":{"shape":"SelfGrantStatusDetail"} + }, + "SelfGrantStatusOutput":{ + "type":"structure", + "members":{ + "glueSelfGrantStatus":{ + "shape":"GlueSelfGrantStatusOutput", + "documentation":"

The details for the self granting status for a Glue data source.

" + }, + "redshiftSelfGrantStatus":{ + "shape":"RedshiftSelfGrantStatusOutput", + "documentation":"

The details for the self granting status for an Amazon Redshift data source.

" + } + }, + "documentation":"

The details for the self granting status for a data source.

", + "union":true + }, "ServiceQuotaExceededException":{ "type":"structure", "required":["message"], @@ -11524,7 +14161,7 @@ }, "Smithy":{ "type":"string", - "max":10000, + "max":100000, "min":1 }, "SortFieldProject":{ @@ -11728,6 +14365,10 @@ } }, "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, "SubscribedAsset":{ "type":"structure", "required":[ @@ -11861,6 +14502,10 @@ "assetListing":{ "shape":"SubscribedAssetListing", "documentation":"

The asset for which the subscription grant is created.

" + }, + "productListing":{ + "shape":"SubscribedProductListing", + "documentation":"

The data product listing.

" } }, "documentation":"

The published asset for which the subscription grant is created.

", @@ -11894,6 +14539,36 @@ "max":1, "min":1 }, + "SubscribedProductListing":{ + "type":"structure", + "members":{ + "assetListings":{ + "shape":"AssetInDataProductListingItems", + "documentation":"

The data assets of the data product listing.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the data product listing.

" + }, + "entityId":{ + "shape":"AssetId", + "documentation":"

The ID of the data product listing.

" + }, + "entityRevision":{ + "shape":"Revision", + "documentation":"

The revision of the data product listing.

" + }, + "glossaryTerms":{ + "shape":"DetailedGlossaryTerms", + "documentation":"

The glossary terms of the data product listing.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the data product listing.

" + } + }, + "documentation":"

The data product listing.

" + }, "SubscribedProject":{ "type":"structure", "members":{ @@ -11990,7 +14665,9 @@ }, "subscriptionId":{ "shape":"SubscriptionId", - "documentation":"

The ID of the subscription grant.

" + "documentation":"

The ID of the subscription.

", + "deprecated":true, + "deprecatedMessage":"Multiple subscriptions can exist for a single grant" }, "subscriptionTargetId":{ "shape":"SubscriptionTargetId", @@ -12647,7 +15324,8 @@ "type":"string", "enum":[ "ASSET_TYPE", - "FORM_TYPE" + "FORM_TYPE", + "LINEAGE_NODE_TYPE" ] }, "UnauthorizedException":{ @@ -12689,6 +15367,102 @@ "members":{ } }, + "UpdateAssetFilterInput":{ + "type":"structure", + "required":[ + "assetIdentifier", + "domainIdentifier", + "identifier" + ], + "members":{ + "assetIdentifier":{ + "shape":"AssetId", + "documentation":"

The ID of the data asset.

", + "location":"uri", + "locationName":"assetIdentifier" + }, + "configuration":{ + "shape":"AssetFilterConfiguration", + "documentation":"

The configuration of the asset filter.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the asset filter.

" + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where you want to update an asset filter.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "identifier":{ + "shape":"FilterId", + "documentation":"

The ID of the asset filter.

", + "location":"uri", + "locationName":"identifier" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the asset filter.

" + } + } + }, + "UpdateAssetFilterOutput":{ + "type":"structure", + "required":[ + "assetId", + "configuration", + "domainId", + "id", + "name" + ], + "members":{ + "assetId":{ + "shape":"AssetId", + "documentation":"

The ID of the data asset.

" + }, + "configuration":{ + "shape":"AssetFilterConfiguration", + "documentation":"

The configuration of the asset filter.

" + }, + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the asset filter was created.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the asset filter.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where the asset filter was created.

" + }, + "effectiveColumnNames":{ + "shape":"ColumnNameList", + "documentation":"

The column names of the asset filter.

" + }, + "effectiveRowFilter":{ + "shape":"String", + "documentation":"

The row filter of the asset filter.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

The error message that is displayed if the action is not completed successfully.

" + }, + "id":{ + "shape":"FilterId", + "documentation":"

The ID of the asset filter.

" + }, + "name":{ + "shape":"FilterName", + "documentation":"

The name of the asset filter.

" + }, + "status":{ + "shape":"FilterStatus", + "documentation":"

The status of the asset filter.

" + } + } + }, "UpdateDataSourceInput":{ "type":"structure", "required":[ @@ -12736,6 +15510,10 @@ "shape":"RecommendationConfiguration", "documentation":"

The recommendation to be updated as part of the UpdateDataSource action.

" }, + "retainPermissionsOnRevokeFailure":{ + "shape":"Boolean", + "documentation":"

Specifies that the granted permissions are retained in case of a self-subscribe functionality failure for a data source.

" + }, "schedule":{ "shape":"ScheduleConfiguration", "documentation":"

The schedule to be updated as part of the UpdateDataSource action.

" @@ -12816,10 +15594,18 @@ "shape":"RecommendationConfiguration", "documentation":"

The recommendation to be updated as part of the UpdateDataSource action.

" }, + "retainPermissionsOnRevokeFailure":{ + "shape":"Boolean", + "documentation":"

Specifies that the granted permissions are retained in case of a self-subscribe functionality failure for a data source.

" + }, "schedule":{ "shape":"ScheduleConfiguration", "documentation":"

The schedule to be updated as part of the UpdateDataSource action.

" }, + "selfGrantStatus":{ + "shape":"SelfGrantStatusOutput", + "documentation":"

Specifies the status of the self-granting functionality.

" + }, "status":{ "shape":"DataSourceStatus", "documentation":"

The status to be updated as part of the UpdateDataSource action.

" @@ -12899,6 +15685,82 @@ } } }, + "UpdateEnvironmentActionInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "environmentIdentifier", + "identifier" + ], + "members":{ + "description":{ + "shape":"String", + "documentation":"

The description of the environment action.

" + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The domain ID of the environment action.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "environmentIdentifier":{ + "shape":"EnvironmentId", + "documentation":"

The environment ID of the environment action.

", + "location":"uri", + "locationName":"environmentIdentifier" + }, + "identifier":{ + "shape":"String", + "documentation":"

The ID of the environment action.

", + "location":"uri", + "locationName":"identifier" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the environment action.

" + }, + "parameters":{ + "shape":"ActionParameters", + "documentation":"

The parameters of the environment action.

" + } + } + }, + "UpdateEnvironmentActionOutput":{ + "type":"structure", + "required":[ + "domainId", + "environmentId", + "id", + "name", + "parameters" + ], + "members":{ + "description":{ + "shape":"String", + "documentation":"

The description of the environment action.

" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The domain ID of the environment action.

" + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

The environment ID of the environment action.

" + }, + "id":{ + "shape":"EnvironmentActionId", + "documentation":"

The ID of the environment action.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the environment action.

" + }, + "parameters":{ + "shape":"ActionParameters", + "documentation":"

The parameters of the environment action.

" + } + } + }, "UpdateEnvironmentInput":{ "type":"structure", "required":[ @@ -12937,7 +15799,6 @@ "required":[ "createdBy", "domainId", - "environmentProfileId", "name", "projectId", "provider" @@ -13507,7 +16368,9 @@ }, "subscriptionId":{ "shape":"SubscriptionId", - "documentation":"

The identifier of the subscription.

" + "documentation":"

The identifier of the subscription.

", + "deprecated":true, + "deprecatedMessage":"Multiple subscriptions can exist for a single grant" }, "subscriptionTargetId":{ "shape":"SubscriptionTargetId", diff --git a/botocore/data/devicefarm/2015-06-23/endpoint-rule-set-1.json b/botocore/data/devicefarm/2015-06-23/endpoint-rule-set-1.json index 56d866263f..d2d71d3fcb 100644 --- a/botocore/data/devicefarm/2015-06-23/endpoint-rule-set-1.json +++ b/botocore/data/devicefarm/2015-06-23/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/devicefarm/2015-06-23/service-2.json b/botocore/data/devicefarm/2015-06-23/service-2.json index 14a5d1e4cb..fda5cb6fda 100644 --- a/botocore/data/devicefarm/2015-06-23/service-2.json +++ b/botocore/data/devicefarm/2015-06-23/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"devicefarm", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS Device Farm", "serviceId":"Device Farm", "signatureVersion":"v4", "targetPrefix":"DeviceFarm_20150623", - "uid":"devicefarm-2015-06-23" + "uid":"devicefarm-2015-06-23", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateDevicePool":{ diff --git a/botocore/data/directconnect/2012-10-25/endpoint-rule-set-1.json b/botocore/data/directconnect/2012-10-25/endpoint-rule-set-1.json index 72a011aafa..c255b39924 100644 --- a/botocore/data/directconnect/2012-10-25/endpoint-rule-set-1.json +++ b/botocore/data/directconnect/2012-10-25/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/directconnect/2012-10-25/service-2.json b/botocore/data/directconnect/2012-10-25/service-2.json index 7de7943fee..21398acdd2 100644 --- a/botocore/data/directconnect/2012-10-25/service-2.json +++ b/botocore/data/directconnect/2012-10-25/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"directconnect", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS Direct Connect", "serviceId":"Direct Connect", "signatureVersion":"v4", "targetPrefix":"OvertureService", - "uid":"directconnect-2012-10-25" + "uid":"directconnect-2012-10-25", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptDirectConnectGatewayAssociationProposal":{ @@ -38,7 +40,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deprecated. Use AllocateHostedConnection instead.

Creates a hosted connection on an interconnect.

Allocates a VLAN number and a specified amount of bandwidth for use by a hosted connection on the specified interconnect.

Intended for use by Direct Connect Partners only.

", + "documentation":"

Deprecated. Use AllocateHostedConnection instead.

Creates a hosted connection on an interconnect.

Allocates a VLAN number and a specified amount of bandwidth for use by a hosted connection on the specified interconnect.

Intended for use by Direct Connect Partners only.

", "deprecated":true }, "AllocateHostedConnection":{ @@ -333,7 +335,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a link aggregation group (LAG) with the specified number of bundled physical dedicated connections between the customer network and a specific Direct Connect location. A LAG is a logical interface that uses the Link Aggregation Control Protocol (LACP) to aggregate multiple interfaces, enabling you to treat them as a single interface.

All connections in a LAG must use the same bandwidth (either 1Gbps or 10Gbps) and must terminate at the same Direct Connect endpoint.

You can have up to 10 dedicated connections per LAG. Regardless of this limit, if you request more connections for the LAG than Direct Connect can allocate on a single endpoint, no LAG is created.

You can specify an existing physical dedicated connection or interconnect to include in the LAG (which counts towards the total number of connections). Doing so interrupts the current physical dedicated connection, and re-establishes them as a member of the LAG. The LAG will be created on the same Direct Connect endpoint to which the dedicated connection terminates. Any virtual interfaces associated with the dedicated connection are automatically disassociated and re-associated with the LAG. The connection ID does not change.

If the Amazon Web Services account used to create a LAG is a registered Direct Connect Partner, the LAG is automatically enabled to host sub-connections. For a LAG owned by a partner, any associated virtual interfaces cannot be directly configured.

" + "documentation":"

Creates a link aggregation group (LAG) with the specified number of bundled physical dedicated connections between the customer network and a specific Direct Connect location. A LAG is a logical interface that uses the Link Aggregation Control Protocol (LACP) to aggregate multiple interfaces, enabling you to treat them as a single interface.

All connections in a LAG must use the same bandwidth (either 1Gbps, 10Gbps, 100Gbps, or 400Gbps) and must terminate at the same Direct Connect endpoint.

You can have up to 10 dedicated connections per location. Regardless of this limit, if you request more connections for the LAG than Direct Connect can allocate on a single endpoint, no LAG is created..

You can specify an existing physical dedicated connection or interconnect to include in the LAG (which counts towards the total number of connections). Doing so interrupts the current physical dedicated connection, and re-establishes them as a member of the LAG. The LAG will be created on the same Direct Connect endpoint to which the dedicated connection terminates. Any virtual interfaces associated with the dedicated connection are automatically disassociated and re-associated with the LAG. The connection ID does not change.

If the Amazon Web Services account used to create a LAG is a registered Direct Connect Partner, the LAG is automatically enabled to host sub-connections. For a LAG owned by a partner, any associated virtual interfaces cannot be directly configured.

" }, "CreatePrivateVirtualInterface":{ "name":"CreatePrivateVirtualInterface", @@ -349,7 +351,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a private virtual interface. A virtual interface is the VLAN that transports Direct Connect traffic. A private virtual interface can be connected to either a Direct Connect gateway or a Virtual Private Gateway (VGW). Connecting the private virtual interface to a Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different Amazon Web Services Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region.

Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" + "documentation":"

Creates a private virtual interface. A virtual interface is the VLAN that transports Direct Connect traffic. A private virtual interface can be connected to either a Direct Connect gateway or a Virtual Private Gateway (VGW). Connecting the private virtual interface to a Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different Amazon Web Services Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region.

Setting the MTU of a virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" }, "CreatePublicVirtualInterface":{ "name":"CreatePublicVirtualInterface", @@ -507,7 +509,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for a connection.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that your APN partner or service provider uses when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at Direct Connect Locations in the Direct Connect User Guide.

", + "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for a connection.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that your APN partner or service provider uses when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at Direct Connect Locations in the Direct Connect User Guide.

", "deprecated":true }, "DescribeConnections":{ @@ -536,7 +538,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deprecated. Use DescribeHostedConnections instead.

Lists the connections that have been provisioned on the specified interconnect.

Intended for use by Direct Connect Partners only.

", + "documentation":"

Deprecated. Use DescribeHostedConnections instead.

Lists the connections that have been provisioned on the specified interconnect.

Intended for use by Direct Connect Partners only.

", "deprecated":true }, "DescribeCustomerMetadata":{ @@ -634,7 +636,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for the specified interconnect.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at Direct Connect Locations in the Direct Connect User Guide.

", + "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for the specified interconnect.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at Direct Connect Locations in the Direct Connect User Guide.

", "deprecated":true }, "DescribeInterconnects":{ @@ -731,7 +733,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Lists the virtual private gateways owned by the Amazon Web Services account.

You can create one or more Direct Connect private virtual interfaces linked to a virtual private gateway.

" + "documentation":"

Deprecated. Use DescribeVpnGateways instead. See DescribeVPNGateways in the Amazon Elastic Compute Cloud API Reference.

Lists the virtual private gateways owned by the Amazon Web Services account.

You can create one or more Direct Connect private virtual interfaces linked to a virtual private gateway.

" }, "DescribeVirtualInterfaces":{ "name":"DescribeVirtualInterfaces", @@ -915,7 +917,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Updates the specified attributes of the specified virtual private interface.

Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" + "documentation":"

Updates the specified attributes of the specified virtual private interface.

Setting the MTU of a virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" } }, "shapes":{ @@ -949,7 +951,10 @@ "AcceptDirectConnectGatewayAssociationProposalResult":{ "type":"structure", "members":{ - "directConnectGatewayAssociation":{"shape":"DirectConnectGatewayAssociation"} + "directConnectGatewayAssociation":{ + "shape":"DirectConnectGatewayAssociation", + "documentation":"

Information about an association between a Direct Connect gateway and a virtual gateway or transit gateway.

" + } } }, "AddressFamily":{ @@ -1019,7 +1024,7 @@ }, "bandwidth":{ "shape":"Bandwidth", - "documentation":"

The bandwidth of the connection. The possible values are 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, and 10Gbps. Note that only those Direct Connect Partners who have met specific requirements are allowed to create a 1Gbps, 2Gbps, 5Gbps or 10Gbps hosted connection.

" + "documentation":"

The bandwidth of the connection. The possible values are 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, 10Gbps, and 25Gbps. Note that only those Direct Connect Partners who have met specific requirements are allowed to create a 1Gbps, 2Gbps, 5Gbps, 10Gbps, or 25Gbps hosted connection.

" }, "connectionName":{ "shape":"ConnectionName", @@ -1104,7 +1109,10 @@ "AllocateTransitVirtualInterfaceResult":{ "type":"structure", "members":{ - "virtualInterface":{"shape":"VirtualInterface"} + "virtualInterface":{ + "shape":"VirtualInterface", + "documentation":"

Information about the transit virtual interface.

" + } } }, "AmazonAddress":{"type":"string"}, @@ -1710,7 +1718,7 @@ }, "bandwidth":{ "shape":"Bandwidth", - "documentation":"

The port bandwidth, in Gbps. The possible values are 1 and 10.

" + "documentation":"

The port bandwidth, in Gbps. The possible values are 1, 10, and 100.

" }, "location":{ "shape":"LocationCode", @@ -1741,7 +1749,7 @@ "members":{ "numberOfConnections":{ "shape":"Count", - "documentation":"

The number of physical dedicated connections initially provisioned and bundled by the LAG. You can have a maximum of four connections when the port speed is 1G or 10G, or two when the port speed is 100G.

" + "documentation":"

The number of physical dedicated connections initially provisioned and bundled by the LAG. You can have a maximum of four connections when the port speed is 1Gbps or 10Gbps, or two when the port speed is 100Gbps or 400Gbps.

" }, "location":{ "shape":"LocationCode", @@ -1749,7 +1757,7 @@ }, "connectionsBandwidth":{ "shape":"Bandwidth", - "documentation":"

The bandwidth of the individual physical dedicated connections bundled by the LAG. The possible values are 1Gbps and 10Gbps.

" + "documentation":"

The bandwidth of the individual physical dedicated connections bundled by the LAG. The possible values are 1Gbps,10Gbps, 100Gbps, and 400Gbps.

" }, "lagName":{ "shape":"LagName", @@ -1831,7 +1839,10 @@ "CreateTransitVirtualInterfaceResult":{ "type":"structure", "members":{ - "virtualInterface":{"shape":"VirtualInterface"} + "virtualInterface":{ + "shape":"VirtualInterface", + "documentation":"

Information about a virtual interface.

" + } } }, "CustomerAddress":{"type":"string"}, @@ -2743,11 +2754,11 @@ "members":{ "connectionsBandwidth":{ "shape":"Bandwidth", - "documentation":"

The individual bandwidth of the physical connections bundled by the LAG. The possible values are 1Gbps and 10Gbps.

" + "documentation":"

The individual bandwidth of the physical connections bundled by the LAG. The possible values are 1Gbps, 10Gbps, 100Gbps, or 400 Gbps..

" }, "numberOfConnections":{ "shape":"Count", - "documentation":"

The number of physical dedicated connections bundled by the LAG, up to a maximum of 10.

" + "documentation":"

The number of physical dedicated connections initially provisioned and bundled by the LAG. You can have a maximum of four connections when the port speed is 1 Gbps or 10 Gbps, or two when the port speed is 100 Gbps or 400 Gbps.

" }, "lagId":{ "shape":"LagId", @@ -3043,7 +3054,7 @@ }, "mtu":{ "shape":"MTU", - "documentation":"

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 9001. The default value is 1500.

" + "documentation":"

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 8500. The default value is 1500.

" }, "authKey":{ "shape":"BGPAuthKey", @@ -3102,7 +3113,7 @@ }, "mtu":{ "shape":"MTU", - "documentation":"

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 9001. The default value is 1500.

" + "documentation":"

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 8500. The default value is 1500.

" }, "authKey":{ "shape":"BGPAuthKey", @@ -3590,7 +3601,10 @@ "UpdateDirectConnectGatewayAssociationResult":{ "type":"structure", "members":{ - "directConnectGatewayAssociation":{"shape":"DirectConnectGatewayAssociation"} + "directConnectGatewayAssociation":{ + "shape":"DirectConnectGatewayAssociation", + "documentation":"

Information about an association between a Direct Connect gateway and a virtual private gateway or transit gateway.

" + } } }, "UpdateDirectConnectGatewayRequest":{ @@ -3613,7 +3627,10 @@ "UpdateDirectConnectGatewayResponse":{ "type":"structure", "members":{ - "directConnectGateway":{"shape":"DirectConnectGateway"} + "directConnectGateway":{ + "shape":"DirectConnectGateway", + "documentation":"

Informaiton about a Direct Connect gateway, which enables you to connect virtual interfaces and virtual private gateways or transit gateways.

" + } } }, "UpdateLagRequest":{ @@ -3648,7 +3665,7 @@ }, "mtu":{ "shape":"MTU", - "documentation":"

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 9001. The default value is 1500.

" + "documentation":"

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 8500. The default value is 1500.

" }, "enableSiteLink":{ "shape":"EnableSiteLink", @@ -3716,7 +3733,7 @@ }, "virtualInterfaceType":{ "shape":"VirtualInterfaceType", - "documentation":"

The type of virtual interface. The possible values are private and public.

" + "documentation":"

The type of virtual interface. The possible values are private, public and transit.

" }, "virtualInterfaceName":{ "shape":"VirtualInterfaceName", diff --git a/botocore/data/dms/2016-01-01/service-2.json b/botocore/data/dms/2016-01-01/service-2.json index 7589f5a234..cefe7292f5 100644 --- a/botocore/data/dms/2016-01-01/service-2.json +++ b/botocore/data/dms/2016-01-01/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"dms", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS Database Migration Service", "serviceId":"Database Migration Service", "signatureVersion":"v4", "targetPrefix":"AmazonDMSv20160101", - "uid":"dms-2016-01-01" + "uid":"dms-2016-01-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddTagsToResource":{ diff --git a/botocore/data/docdb/2014-10-31/service-2.json b/botocore/data/docdb/2014-10-31/service-2.json index 8cc37568bd..462207e810 100644 --- a/botocore/data/docdb/2014-10-31/service-2.json +++ b/botocore/data/docdb/2014-10-31/service-2.json @@ -4,13 +4,15 @@ "apiVersion":"2014-10-31", "endpointPrefix":"rds", "protocol":"query", + "protocols":["query"], "serviceAbbreviation":"Amazon DocDB", "serviceFullName":"Amazon DocumentDB with MongoDB compatibility", "serviceId":"DocDB", "signatureVersion":"v4", "signingName":"rds", "uid":"docdb-2014-10-31", - "xmlNamespace":"http://rds.amazonaws.com/doc/2014-10-31/" + "xmlNamespace":"http://rds.amazonaws.com/doc/2014-10-31/", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddSourceIdentifierToSubscription":{ @@ -638,6 +640,25 @@ ], "documentation":"

Forces a failover for a cluster.

A failover for a cluster promotes one of the Amazon DocumentDB replicas (read-only instances) in the cluster to be the primary instance (the cluster writer).

If the primary instance fails, Amazon DocumentDB automatically fails over to an Amazon DocumentDB replica, if one exists. You can force a failover when you want to simulate a failure of a primary instance for testing.

" }, + "FailoverGlobalCluster":{ + "name":"FailoverGlobalCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"FailoverGlobalClusterMessage"}, + "output":{ + "shape":"FailoverGlobalClusterResult", + "resultWrapper":"FailoverGlobalClusterResult" + }, + "errors":[ + {"shape":"GlobalClusterNotFoundFault"}, + {"shape":"InvalidGlobalClusterStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Promotes the specified secondary DB cluster to be the primary DB cluster in the global cluster when failing over a global cluster occurs.

Use this operation to respond to an unplanned event, such as a regional disaster in the primary region. Failing over can result in a loss of write transaction data that wasn't replicated to the chosen secondary before the failover event occurred. However, the recovery process that promotes a DB instance on the chosen seconday DB cluster to be the primary writer DB instance guarantees that the data is in a transactionally consistent state.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -1796,6 +1817,12 @@ }, "exception":true }, + "DBClusterIdentifier":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[A-Za-z][0-9A-Za-z-:._]*" + }, "DBClusterList":{ "type":"list", "member":{ @@ -3360,6 +3387,37 @@ "DBCluster":{"shape":"DBCluster"} } }, + "FailoverGlobalClusterMessage":{ + "type":"structure", + "required":[ + "GlobalClusterIdentifier", + "TargetDbClusterIdentifier" + ], + "members":{ + "GlobalClusterIdentifier":{ + "shape":"GlobalClusterIdentifier", + "documentation":"

The identifier of the Amazon DocumentDB global cluster to apply this operation. The identifier is the unique key assigned by the user when the cluster is created. In other words, it's the name of the global cluster.

Constraints:

  • Must match the identifier of an existing global cluster.

  • Minimum length of 1. Maximum length of 255.

Pattern: [A-Za-z][0-9A-Za-z-:._]*

" + }, + "TargetDbClusterIdentifier":{ + "shape":"DBClusterIdentifier", + "documentation":"

The identifier of the secondary Amazon DocumentDB cluster that you want to promote to the primary for the global cluster. Use the Amazon Resource Name (ARN) for the identifier so that Amazon DocumentDB can locate the cluster in its Amazon Web Services region.

Constraints:

  • Must match the identifier of an existing secondary cluster.

  • Minimum length of 1. Maximum length of 255.

Pattern: [A-Za-z][0-9A-Za-z-:._]*

" + }, + "AllowDataLoss":{ + "shape":"BooleanOptional", + "documentation":"

Specifies whether to allow data loss for this global cluster operation. Allowing data loss triggers a global failover operation.

If you don't specify AllowDataLoss, the global cluster operation defaults to a switchover.

Constraints:

  • Can't be specified together with the Switchover parameter.

" + }, + "Switchover":{ + "shape":"BooleanOptional", + "documentation":"

Specifies whether to switch over this global database cluster.

Constraints:

  • Can't be specified together with the AllowDataLoss parameter.

" + } + } + }, + "FailoverGlobalClusterResult":{ + "type":"structure", + "members":{ + "GlobalCluster":{"shape":"GlobalCluster"} + } + }, "Filter":{ "type":"structure", "required":[ @@ -4805,7 +4863,7 @@ "documentation":"

The identifier of the Amazon DocumentDB global database cluster to switch over. The identifier is the unique key assigned by the user when the cluster is created. In other words, it's the name of the global cluster. This parameter isn’t case-sensitive.

Constraints:

  • Must match the identifier of an existing global cluster (Amazon DocumentDB global database).

  • Minimum length of 1. Maximum length of 255.

Pattern: [A-Za-z][0-9A-Za-z-:._]*

" }, "TargetDbClusterIdentifier":{ - "shape":"String", + "shape":"DBClusterIdentifier", "documentation":"

The identifier of the secondary Amazon DocumentDB cluster to promote to the new primary for the global database cluster. Use the Amazon Resource Name (ARN) for the identifier so that Amazon DocumentDB can locate the cluster in its Amazon Web Services region.

Constraints:

  • Must match the identifier of an existing secondary cluster.

  • Minimum length of 1. Maximum length of 255.

Pattern: [A-Za-z][0-9A-Za-z-:._]*

" } } diff --git a/botocore/data/ds/2015-04-16/endpoint-rule-set-1.json b/botocore/data/ds/2015-04-16/endpoint-rule-set-1.json index b16b407230..fdcdb95dcb 100644 --- a/botocore/data/ds/2015-04-16/endpoint-rule-set-1.json +++ b/botocore/data/ds/2015-04-16/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/ds/2015-04-16/service-2.json b/botocore/data/ds/2015-04-16/service-2.json index 9a525d4efd..00e6e0435e 100644 --- a/botocore/data/ds/2015-04-16/service-2.json +++ b/botocore/data/ds/2015-04-16/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"ds", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Directory Service", "serviceFullName":"AWS Directory Service", "serviceId":"Directory Service", "signatureVersion":"v4", "targetPrefix":"DirectoryService_20150416", - "uid":"ds-2015-04-16" + "uid":"ds-2015-04-16", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptSharedDirectory":{ diff --git a/botocore/data/dynamodb/2012-08-10/service-2.json b/botocore/data/dynamodb/2012-08-10/service-2.json index a8f3c338c9..9b95e4b201 100644 --- a/botocore/data/dynamodb/2012-08-10/service-2.json +++ b/botocore/data/dynamodb/2012-08-10/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"dynamodb", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"DynamoDB", "serviceFullName":"Amazon DynamoDB", "serviceId":"DynamoDB", "signatureVersion":"v4", "targetPrefix":"DynamoDB_20120810", - "uid":"dynamodb-2012-08-10" + "uid":"dynamodb-2012-08-10", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchExecuteStatement":{ @@ -25,7 +27,7 @@ {"shape":"RequestLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch returns at most a single item.

The entire batch must consist of either read statements or write statements, you cannot mix both in one batch.

A HTTP 200 response does not mean that all statements in the BatchExecuteStatement succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each statement.

" + "documentation":"

This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch returns at most a single item. For more information, see Running batch operations with PartiQL for DynamoDB .

The entire batch must consist of either read statements or write statements, you cannot mix both in one batch.

A HTTP 200 response does not mean that all statements in the BatchExecuteStatement succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each statement.

" }, "BatchGetItem":{ "name":"BatchGetItem", @@ -60,7 +62,7 @@ {"shape":"RequestLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types.

BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

  • One or more tables specified in the BatchWriteItem request does not exist.

  • Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.

  • You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request.

  • Your request contains at least two items with identical hash and range keys (which essentially is two put operations).

  • There are more than 25 requests in the batch.

  • Any individual item in a batch exceeds 400 KB.

  • The total request size exceeds 16 MB.

", + "documentation":"

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types.

BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

For tables and indexes with provisioned capacity, if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. For all tables and indexes, if none of the items can be processed due to other throttling scenarios (such as exceeding partition level limits), then BatchWriteItem returns a ThrottlingException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

  • One or more tables specified in the BatchWriteItem request does not exist.

  • Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.

  • You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request.

  • Your request contains at least two items with identical hash and range keys (which essentially is two put operations).

  • There are more than 25 requests in the batch.

  • Any individual item in a batch exceeds 400 KB.

  • The total request size exceeds 16 MB.

  • Any individual items with keys exceeding the key length limits. For a partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes.

", "endpointdiscovery":{ } }, @@ -98,7 +100,7 @@ {"shape":"GlobalTableAlreadyExistsException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided Regions.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

If you want to add a new replica table to a global table, each of the following conditions must be true:

  • The table must have the same primary key as all of the other replicas.

  • The table must have the same name as all of the other replicas.

  • The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item.

  • None of the replica tables in the global table can contain any data.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

If local secondary indexes are specified, then the following conditions must also be met:

  • The local secondary indexes must have the same name.

  • The local secondary indexes must have the same hash key and sort key (if present).

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

", + "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided Regions.

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

If you want to add a new replica table to a global table, each of the following conditions must be true:

  • The table must have the same primary key as all of the other replicas.

  • The table must have the same name as all of the other replicas.

  • The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item.

  • None of the replica tables in the global table can contain any data.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

If local secondary indexes are specified, then the following conditions must also be met:

  • The local secondary indexes must have the same name.

  • The local secondary indexes must have the same hash key and sort key (if present).

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

", "endpointdiscovery":{ } }, @@ -191,7 +193,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

This operation only applies to Version 2019.11.21 (Current) of global tables.

DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete.

When you delete a table, any indexes on that table are also deleted.

If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

Use the DescribeTable action to check the status of the table.

", + "documentation":"

The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. For the full list of table states, see TableStatus.

When you delete a table, any indexes on that table are also deleted.

If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

Use the DescribeTable action to check the status of the table.

", "endpointdiscovery":{ } }, @@ -279,7 +281,7 @@ {"shape":"InternalServerError"}, {"shape":"GlobalTableNotFoundException"} ], - "documentation":"

Returns information about the specified global table.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", + "documentation":"

Returns information about the specified global table.

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

", "endpointdiscovery":{ } }, @@ -295,7 +297,7 @@ {"shape":"GlobalTableNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Describes Region-specific settings for a global table.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", + "documentation":"

Describes Region-specific settings for a global table.

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

", "endpointdiscovery":{ } }, @@ -355,7 +357,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

This operation only applies to Version 2019.11.21 (Current) of global tables.

If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

", + "documentation":"

Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

", "endpointdiscovery":{ } }, @@ -371,7 +373,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Describes auto scaling settings across replicas of the global table at once.

This operation only applies to Version 2019.11.21 (Current) of global tables.

" + "documentation":"

Describes auto scaling settings across replicas of the global table at once.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

" }, "DescribeTimeToLive":{ "name":"DescribeTimeToLive", @@ -586,7 +588,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Lists all global tables that have a replica in the specified Region.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", + "documentation":"

Lists all global tables that have a replica in the specified Region.

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

", "endpointdiscovery":{ } }, @@ -871,7 +873,7 @@ {"shape":"ReplicaNotFoundException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

This operation only applies to Version 2017.11.29 of global tables. If you are using global tables Version 2019.11.21 you can use UpdateTable instead.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

  • The global secondary indexes must have the same provisioned and maximum write capacity units.

", + "documentation":"

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units.

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). If you are using global tables Version 2019.11.21 you can use UpdateTable instead.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

  • The global secondary indexes must have the same provisioned and maximum write capacity units.

", "endpointdiscovery":{ } }, @@ -891,7 +893,7 @@ {"shape":"ResourceInUseException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates settings for a global table.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", + "documentation":"

Updates settings for a global table.

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

", "endpointdiscovery":{ } }, @@ -948,7 +950,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

This operation only applies to Version 2019.11.21 (Current) of global tables.

You can only perform one of the following operations at once:

  • Modify the provisioned throughput settings of the table.

  • Remove a global secondary index from the table.

  • Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable to perform other operations.

UpdateTable is an asynchronous operation; while it's executing, the table status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

", + "documentation":"

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

You can only perform one of the following operations at once:

  • Modify the provisioned throughput settings of the table.

  • Remove a global secondary index from the table.

  • Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable to perform other operations.

UpdateTable is an asynchronous operation; while it's executing, the table status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

", "endpointdiscovery":{ } }, @@ -966,7 +968,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates auto scaling settings on your global tables at once.

This operation only applies to Version 2019.11.21 (Current) of global tables.

" + "documentation":"

Updates auto scaling settings on your global tables at once.

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

" }, "UpdateTimeToLive":{ "name":"UpdateTimeToLive", @@ -1838,7 +1840,7 @@ "documentation":"

The amount of throughput consumed on each global index affected by the operation.

" } }, - "documentation":"

The capacity units consumed by an operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the request asked for it. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by an operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the request asked for it. For more information, see Provisioned capacity mode in the Amazon DynamoDB Developer Guide.

" }, "ConsumedCapacityMultiple":{ "type":"list", @@ -2077,7 +2079,7 @@ }, "BillingMode":{ "shape":"BillingMode", - "documentation":"

Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later.

  • PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode.

  • PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode.

" + "documentation":"

Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later.

  • PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode.

  • PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity mode.

" }, "ProvisionedThroughput":{ "shape":"ProvisionedThroughput", @@ -2105,7 +2107,7 @@ }, "ResourcePolicy":{ "shape":"ResourcePolicy", - "documentation":"

An Amazon Web Services resource-based policy document in JSON format that will be attached to the table.

When you attach a resource-based policy while creating a table, the policy application is strongly consistent.

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that apply for resource-based policies, see Resource-based policy considerations.

" + "documentation":"

An Amazon Web Services resource-based policy document in JSON format that will be attached to the table.

When you attach a resource-based policy while creating a table, the policy application is strongly consistent.

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that apply for resource-based policies, see Resource-based policy considerations.

You need to specify the CreateTable and PutResourcePolicy IAM actions for authorizing a user to create a table with a resource-based policy.

" }, "OnDemandThroughput":{ "shape":"OnDemandThroughput", @@ -2281,7 +2283,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the DeleteItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the DeleteItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned capacity mode in the Amazon DynamoDB Developer Guide.

" }, "ItemCollectionMetrics":{ "shape":"ItemCollectionMetrics", @@ -3157,7 +3159,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the GetItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the GetItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide.

" } }, "documentation":"

Represents the output of a GetItem operation.

" @@ -4659,7 +4661,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the PutItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the PutItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer Guide.

" }, "ItemCollectionMetrics":{ "shape":"ItemCollectionMetrics", @@ -4798,7 +4800,7 @@ }, "ScannedCount":{ "shape":"Integer", - "documentation":"

The number of items evaluated, before any QueryFilter is applied. A high ScannedCount value with few, or no, Count results indicates an inefficient Query operation. For more information, see Count and ScannedCount in the Amazon DynamoDB Developer Guide.

If you did not use a filter in the request, then ScannedCount is the same as Count.

" + "documentation":"

The number of items evaluated, before any QueryFilter is applied. A high ScannedCount value with few, or no, Count results indicates an inefficient Query operation. For more information, see Count and ScannedCount in the Amazon DynamoDB Developer Guide.

If you did not use a filter in the request, then ScannedCount is the same as Count.

" }, "LastEvaluatedKey":{ "shape":"Key", @@ -4806,7 +4808,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the Query operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the Query operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide.

" } }, "documentation":"

Represents the output of a Query operation.

" @@ -5605,7 +5607,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the Scan operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the Scan operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide.

" } }, "documentation":"

Represents the output of a Scan operation.

" @@ -6388,7 +6390,7 @@ }, "GlobalTableBillingMode":{ "shape":"BillingMode", - "documentation":"

The billing mode of the global table. If GlobalTableBillingMode is not specified, the global table defaults to PROVISIONED capacity billing mode.

  • PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode.

  • PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode.

" + "documentation":"

The billing mode of the global table. If GlobalTableBillingMode is not specified, the global table defaults to PROVISIONED capacity billing mode.

  • PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode.

  • PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity mode.

" }, "GlobalTableProvisionedWriteCapacityUnits":{ "shape":"PositiveLongObject", @@ -6489,7 +6491,7 @@ }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", - "documentation":"

The capacity units consumed by the UpdateItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The capacity units consumed by the UpdateItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer Guide.

" }, "ItemCollectionMetrics":{ "shape":"ItemCollectionMetrics", @@ -6595,7 +6597,7 @@ }, "BillingMode":{ "shape":"BillingMode", - "documentation":"

Controls how you are charged for read and write throughput and how you manage capacity. When switching from pay-per-request to provisioned capacity, initial provisioned capacity values must be set. The initial provisioned capacity values are estimated based on the consumed read and write capacity of your table and global secondary indexes over the past 30 minutes.

  • PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode.

  • PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode.

" + "documentation":"

Controls how you are charged for read and write throughput and how you manage capacity. When switching from pay-per-request to provisioned capacity, initial provisioned capacity values must be set. The initial provisioned capacity values are estimated based on the consumed read and write capacity of your table and global secondary indexes over the past 30 minutes.

  • PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode.

  • PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity mode.

" }, "ProvisionedThroughput":{ "shape":"ProvisionedThroughput", @@ -6615,7 +6617,7 @@ }, "ReplicaUpdates":{ "shape":"ReplicationGroupUpdateList", - "documentation":"

A list of replica update actions (create, delete, or update) for the table.

This property only applies to Version 2019.11.21 (Current) of global tables.

" + "documentation":"

A list of replica update actions (create, delete, or update) for the table.

For global tables, this property only applies to global tables using Version 2019.11.21 (Current version).

" }, "TableClass":{ "shape":"TableClass", diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index a1c6a3446b..819978feaa 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -4,12 +4,14 @@ "apiVersion":"2016-11-15", "endpointPrefix":"ec2", "protocol":"ec2", + "protocols":["ec2"], "serviceAbbreviation":"Amazon EC2", "serviceFullName":"Amazon Elastic Compute Cloud", "serviceId":"EC2", "signatureVersion":"v4", "uid":"ec2-2016-11-15", - "xmlNamespace":"http://ec2.amazonaws.com/doc/2016-11-15" + "xmlNamespace":"http://ec2.amazonaws.com/doc/2016-11-15", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptAddressTransfer":{ @@ -20,7 +22,7 @@ }, "input":{"shape":"AcceptAddressTransferRequest"}, "output":{"shape":"AcceptAddressTransferResult"}, - "documentation":"

Accepts an Elastic IP address transfer. For more information, see Accept a transferred Elastic IP address in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Accepts an Elastic IP address transfer. For more information, see Accept a transferred Elastic IP address in the Amazon VPC User Guide.

" }, "AcceptReservedInstancesExchangeQuote":{ "name":"AcceptReservedInstancesExchangeQuote", @@ -100,7 +102,7 @@ }, "input":{"shape":"AllocateAddressRequest"}, "output":{"shape":"AllocateAddressResult"}, - "documentation":"

Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different Amazon Web Services account.

You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another Amazon Web Services account. To attempt to recover an Elastic IP address that you released, specify it in this operation.

For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).

" + "documentation":"

Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different Amazon Web Services account.

You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon EC2 User Guide.

If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another Amazon Web Services account. To attempt to recover an Elastic IP address that you released, specify it in this operation.

For more information, see Elastic IP Addresses in the Amazon EC2 User Guide.

You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).

" }, "AllocateHosts":{ "name":"AllocateHosts", @@ -140,7 +142,7 @@ }, "input":{"shape":"AssignIpv6AddressesRequest"}, "output":{"shape":"AssignIpv6AddressesResult"}, - "documentation":"

Assigns one or more IPv6 addresses to the specified network interface. You can specify one or more specific IPv6 addresses, or you can specify the number of IPv6 addresses to be automatically assigned from within the subnet's IPv6 CIDR block range. You can assign as many IPv6 addresses to a network interface as you can assign private IPv4 addresses, and the limit varies per instance type. For information, see IP Addresses Per Network Interface Per Instance Type in the Amazon Elastic Compute Cloud User Guide.

You must specify either the IPv6 addresses or the IPv6 address count in the request.

You can optionally use Prefix Delegation on the network interface. You must specify either the IPV6 Prefix Delegation prefixes, or the IPv6 Prefix Delegation count. For information, see Assigning prefixes to Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Assigns one or more IPv6 addresses to the specified network interface. You can specify one or more specific IPv6 addresses, or you can specify the number of IPv6 addresses to be automatically assigned from within the subnet's IPv6 CIDR block range. You can assign as many IPv6 addresses to a network interface as you can assign private IPv4 addresses, and the limit varies per instance type.

You must specify either the IPv6 addresses or the IPv6 address count in the request.

You can optionally use Prefix Delegation on the network interface. You must specify either the IPV6 Prefix Delegation prefixes, or the IPv6 Prefix Delegation count. For information, see Assigning prefixes to network interfaces in the Amazon EC2 User Guide.

" }, "AssignPrivateIpAddresses":{ "name":"AssignPrivateIpAddresses", @@ -150,7 +152,7 @@ }, "input":{"shape":"AssignPrivateIpAddressesRequest"}, "output":{"shape":"AssignPrivateIpAddressesResult"}, - "documentation":"

Assigns one or more secondary private IP addresses to the specified network interface.

You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

When you move a secondary private IP address to another network interface, any Elastic IP address that is associated with the IP address is also moved.

Remapping an IP address is an asynchronous operation. When you move an IP address from one network interface to another, check network/interfaces/macs/mac/local-ipv4s in the instance metadata to confirm that the remapping is complete.

You must specify either the IP addresses or the IP address count in the request.

You can optionally use Prefix Delegation on the network interface. You must specify either the IPv4 Prefix Delegation prefixes, or the IPv4 Prefix Delegation count. For information, see Assigning prefixes to Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Assigns one or more secondary private IP addresses to the specified network interface.

You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon EC2 User Guide.

When you move a secondary private IP address to another network interface, any Elastic IP address that is associated with the IP address is also moved.

Remapping an IP address is an asynchronous operation. When you move an IP address from one network interface to another, check network/interfaces/macs/mac/local-ipv4s in the instance metadata to confirm that the remapping is complete.

You must specify either the IP addresses or the IP address count in the request.

You can optionally use Prefix Delegation on the network interface. You must specify either the IPv4 Prefix Delegation prefixes, or the IPv4 Prefix Delegation count. For information, see Assigning prefixes to network interfaces in the Amazon EC2 User Guide.

" }, "AssignPrivateNatGatewayAddress":{ "name":"AssignPrivateNatGatewayAddress", @@ -160,7 +162,7 @@ }, "input":{"shape":"AssignPrivateNatGatewayAddressRequest"}, "output":{"shape":"AssignPrivateNatGatewayAddressResult"}, - "documentation":"

Assigns one or more private IPv4 addresses to a private NAT gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide.

" + "documentation":"

Assigns private IPv4 addresses to a private NAT gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide.

" }, "AssociateAddress":{ "name":"AssociateAddress", @@ -189,7 +191,7 @@ "requestUri":"/" }, "input":{"shape":"AssociateDhcpOptionsRequest"}, - "documentation":"

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

For more information, see DHCP options sets in the Amazon VPC User Guide.

" + "documentation":"

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

For more information, see DHCP option sets in the Amazon VPC User Guide.

" }, "AssociateEnclaveCertificateIamRole":{ "name":"AssociateEnclaveCertificateIamRole", @@ -309,7 +311,7 @@ }, "input":{"shape":"AssociateTrunkInterfaceRequest"}, "output":{"shape":"AssociateTrunkInterfaceResult"}, - "documentation":"

Associates a branch network interface with a trunk network interface.

Before you create the association, run the create-network-interface command and set --interface-type to trunk. You must also create a network interface for each branch network interface that you want to associate with the trunk network interface.

" + "documentation":"

Associates a branch network interface with a trunk network interface.

Before you create the association, use CreateNetworkInterface command and set the interface type to trunk. You must also create a network interface for each branch network interface that you want to associate with the trunk network interface.

" }, "AssociateVpcCidrBlock":{ "name":"AssociateVpcCidrBlock", @@ -319,7 +321,7 @@ }, "input":{"shape":"AssociateVpcCidrBlockRequest"}, "output":{"shape":"AssociateVpcCidrBlockResult"}, - "documentation":"

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through bring your own IP addresses (BYOIP).

You must specify one of the following in the request: an IPv4 CIDR block, an IPv6 pool, or an Amazon-provided IPv6 CIDR block.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see IP addressing for your VPCs and subnets in the Amazon VPC User Guide.

" + "documentation":"

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through bring your own IP addresses (BYOIP).

You must specify one of the following in the request: an IPv4 CIDR block, an IPv6 pool, or an Amazon-provided IPv6 CIDR block.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see IP addressing for your VPCs and subnets in the Amazon VPC User Guide.

" }, "AttachClassicLinkVpc":{ "name":"AttachClassicLinkVpc", @@ -496,7 +498,7 @@ }, "input":{"shape":"CancelReservedInstancesListingRequest"}, "output":{"shape":"CancelReservedInstancesListingResult"}, - "documentation":"

Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace.

For more information, see Reserved Instance Marketplace in the Amazon EC2 User Guide.

" + "documentation":"

Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace.

For more information, see Sell in the Reserved Instance Marketplace in the Amazon EC2 User Guide.

" }, "CancelSpotFleetRequests":{ "name":"CancelSpotFleetRequests", @@ -556,7 +558,7 @@ }, "input":{"shape":"CopySnapshotRequest"}, "output":{"shape":"CopySnapshotResult"}, - "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy a snapshot within the same Region, from one Region to another, or from a Region to an Outpost. You can't copy a snapshot from an Outpost to a Region, from one Outpost to another, or within the same Outpost.

You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs).

When copying snapshots to a Region, copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable encryption for the snapshot copy operation. By default, encrypted snapshot copies use the default Key Management Service (KMS) KMS key; however, you can specify a different KMS key. To copy an encrypted snapshot that has been shared from another account, you must have permissions for the KMS key used to encrypt the snapshot.

Snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region, or a different key that you specify in the request using KmsKeyId. Outposts do not support unencrypted snapshots. For more information, Amazon EBS local snapshots on Outposts in the Amazon EBS User Guide.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copy an Amazon EBS snapshot in the Amazon EBS User Guide.

" + "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy a snapshot within the same Region, from one Region to another, or from a Region to an Outpost. You can't copy a snapshot from an Outpost to a Region, from one Outpost to another, or within the same Outpost.

You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs).

When copying snapshots to a Region, copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable encryption for the snapshot copy operation. By default, encrypted snapshot copies use the default KMS key; however, you can specify a different KMS key. To copy an encrypted snapshot that has been shared from another account, you must have permissions for the KMS key used to encrypt the snapshot.

Snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region, or a different key that you specify in the request using KmsKeyId. Outposts do not support unencrypted snapshots. For more information, Amazon EBS local snapshots on Outposts in the Amazon EBS User Guide.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copy an Amazon EBS snapshot in the Amazon EBS User Guide.

" }, "CreateCapacityReservation":{ "name":"CreateCapacityReservation", @@ -568,6 +570,16 @@ "output":{"shape":"CreateCapacityReservationResult"}, "documentation":"

Creates a new Capacity Reservation with the specified attributes.

Capacity Reservations enable you to reserve capacity for your Amazon EC2 instances in a specific Availability Zone for any duration. This gives you the flexibility to selectively add capacity reservations and still get the Regional RI discounts for that usage. By creating Capacity Reservations, you ensure that you always have access to Amazon EC2 capacity when you need it, for as long as you need it. For more information, see Capacity Reservations in the Amazon EC2 User Guide.

Your request to create a Capacity Reservation could fail if Amazon EC2 does not have sufficient capacity to fulfill the request. If your request fails due to Amazon EC2 capacity constraints, either try again at a later time, try in a different Availability Zone, or request a smaller capacity reservation. If your application is flexible across instance types and sizes, try to create a Capacity Reservation with different instance attributes.

Your request could also fail if the requested quantity exceeds your On-Demand Instance limit for the selected instance type. If your request fails due to limit constraints, increase your On-Demand Instance limit for the required instance type and try again. For more information about increasing your instance limits, see Amazon EC2 Service Quotas in the Amazon EC2 User Guide.

" }, + "CreateCapacityReservationBySplitting":{ + "name":"CreateCapacityReservationBySplitting", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCapacityReservationBySplittingRequest"}, + "output":{"shape":"CreateCapacityReservationBySplittingResult"}, + "documentation":"

Create a new Capacity Reservation by splitting the available capacity of the source Capacity Reservation. The new Capacity Reservation will have the same attributes as the source Capacity Reservation except for tags. The source Capacity Reservation must be active and owned by your Amazon Web Services account.

" + }, "CreateCapacityReservationFleet":{ "name":"CreateCapacityReservationFleet", "http":{ @@ -576,7 +588,7 @@ }, "input":{"shape":"CreateCapacityReservationFleetRequest"}, "output":{"shape":"CreateCapacityReservationFleetResult"}, - "documentation":"

Creates a Capacity Reservation Fleet. For more information, see Create a Capacity Reservation Fleet in the Amazon EC2 User Guide.

" + "documentation":"

Creates a Capacity Reservation Fleet. For more information, see Create a Capacity Reservation Fleet in the Amazon EC2 User Guide.

" }, "CreateCarrierGateway":{ "name":"CreateCarrierGateway", @@ -666,7 +678,7 @@ }, "input":{"shape":"CreateDhcpOptionsRequest"}, "output":{"shape":"CreateDhcpOptionsResult"}, - "documentation":"

Creates a custom set of DHCP options. After you create a DHCP option set, you associate it with a VPC. After you associate a DHCP option set with a VPC, all existing and newly launched instances in the VPC use this set of DHCP options.

The following are the individual DHCP options you can specify. For more information, see DHCP options sets in the Amazon VPC User Guide.

  • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in any other Region, specify region.compute.internal. Otherwise, specify a custom domain name. This value is used to complete unqualified DNS hostnames.

    Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP option set is associated with a VPC that has instances running operating systems that treat the value as a single domain, specify only one domain name.

  • domain-name-servers - The IP addresses of up to four DNS servers, or AmazonProvidedDNS. To specify multiple domain name servers in a single parameter, separate the IP addresses using commas. To have your instances receive custom DNS hostnames as specified in domain-name, you must specify a custom DNS server.

  • ntp-servers - The IP addresses of up to eight Network Time Protocol (NTP) servers (four IPv4 addresses and four IPv6 addresses).

  • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

  • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2. Broadcast and multicast are not supported. For more information about NetBIOS node types, see RFC 2132.

  • ipv6-address-preferred-lease-time - A value (in seconds, minutes, hours, or years) for how frequently a running instance with an IPv6 assigned to it goes through DHCPv6 lease renewal. Acceptable values are between 140 and 2147483647 seconds (approximately 68 years). If no value is entered, the default lease time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase the lease time and avoid frequent lease renewal requests. Lease renewal typically occurs when half of the lease time has elapsed.

" + "documentation":"

Creates a custom set of DHCP options. After you create a DHCP option set, you associate it with a VPC. After you associate a DHCP option set with a VPC, all existing and newly launched instances in the VPC use this set of DHCP options.

The following are the individual DHCP options you can specify. For more information, see DHCP option sets in the Amazon VPC User Guide.

  • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in any other Region, specify region.compute.internal. Otherwise, specify a custom domain name. This value is used to complete unqualified DNS hostnames.

    Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP option set is associated with a VPC that has instances running operating systems that treat the value as a single domain, specify only one domain name.

  • domain-name-servers - The IP addresses of up to four DNS servers, or AmazonProvidedDNS. To specify multiple domain name servers in a single parameter, separate the IP addresses using commas. To have your instances receive custom DNS hostnames as specified in domain-name, you must specify a custom DNS server.

  • ntp-servers - The IP addresses of up to eight Network Time Protocol (NTP) servers (four IPv4 addresses and four IPv6 addresses).

  • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

  • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2. Broadcast and multicast are not supported. For more information about NetBIOS node types, see RFC 2132.

  • ipv6-address-preferred-lease-time - A value (in seconds, minutes, hours, or years) for how frequently a running instance with an IPv6 assigned to it goes through DHCPv6 lease renewal. Acceptable values are between 140 and 2147483647 seconds (approximately 68 years). If no value is entered, the default lease time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase the lease time and avoid frequent lease renewal requests. Lease renewal typically occurs when half of the lease time has elapsed.

" }, "CreateEgressOnlyInternetGateway":{ "name":"CreateEgressOnlyInternetGateway", @@ -696,7 +708,7 @@ }, "input":{"shape":"CreateFlowLogsRequest"}, "output":{"shape":"CreateFlowLogsResult"}, - "documentation":"

Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC.

Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow log records in the Amazon Virtual Private Cloud User Guide.

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket.

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC.

Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow log records in the Amazon VPC User Guide.

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket.

For more information, see VPC Flow Logs in the Amazon VPC User Guide.

" }, "CreateFpgaImage":{ "name":"CreateFpgaImage", @@ -768,6 +780,16 @@ "output":{"shape":"CreateIpamResult"}, "documentation":"

Create an IPAM. Amazon VPC IP Address Manager (IPAM) is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization.

For more information, see Create an IPAM in the Amazon VPC IPAM User Guide.

" }, + "CreateIpamExternalResourceVerificationToken":{ + "name":"CreateIpamExternalResourceVerificationToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateIpamExternalResourceVerificationTokenRequest"}, + "output":{"shape":"CreateIpamExternalResourceVerificationTokenResult"}, + "documentation":"

Create a verification token. A verification token is an Amazon Web Services-generated random value that you can use to prove ownership of an external resource. For example, you can use a verification token to validate that you control a public IP address range when you bring an IP address range to Amazon Web Services (BYOIP).

" + }, "CreateIpamPool":{ "name":"CreateIpamPool", "http":{ @@ -816,7 +838,7 @@ }, "input":{"shape":"CreateLaunchTemplateRequest"}, "output":{"shape":"CreateLaunchTemplateResult"}, - "documentation":"

Creates a launch template.

A launch template contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify a launch template instead of providing the launch parameters in the request. For more information, see Launch an instance from a launch template in the Amazon Elastic Compute Cloud User Guide.

To clone an existing launch template as the basis for a new launch template, use the Amazon EC2 console. The API, SDKs, and CLI do not support cloning a template. For more information, see Create a launch template from an existing launch template in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a launch template.

A launch template contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify a launch template instead of providing the launch parameters in the request. For more information, see Launch an instance from a launch template in the Amazon EC2 User Guide.

To clone an existing launch template as the basis for a new launch template, use the Amazon EC2 console. The API, SDKs, and CLI do not support cloning a template. For more information, see Create a launch template from an existing launch template in the Amazon EC2 User Guide.

" }, "CreateLaunchTemplateVersion":{ "name":"CreateLaunchTemplateVersion", @@ -826,7 +848,7 @@ }, "input":{"shape":"CreateLaunchTemplateVersionRequest"}, "output":{"shape":"CreateLaunchTemplateVersionResult"}, - "documentation":"

Creates a new version of a launch template. You must specify an existing launch template, either by name or ID. You can determine whether the new version inherits parameters from a source version, and add or overwrite parameters as needed.

Launch template versions are numbered in the order in which they are created. You can't specify, change, or replace the numbering of launch template versions.

Launch templates are immutable; after you create a launch template, you can't modify it. Instead, you can create a new version of the launch template that includes the changes that you require.

For more information, see Modify a launch template (manage launch template versions) in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a new version of a launch template. You must specify an existing launch template, either by name or ID. You can determine whether the new version inherits parameters from a source version, and add or overwrite parameters as needed.

Launch template versions are numbered in the order in which they are created. You can't specify, change, or replace the numbering of launch template versions.

Launch templates are immutable; after you create a launch template, you can't modify it. Instead, you can create a new version of the launch template that includes the changes that you require.

For more information, see Modify a launch template (manage launch template versions) in the Amazon EC2 User Guide.

" }, "CreateLocalGatewayRoute":{ "name":"CreateLocalGatewayRoute", @@ -935,7 +957,7 @@ }, "input":{"shape":"CreateNetworkInterfaceRequest"}, "output":{"shape":"CreateNetworkInterfaceResult"}, - "documentation":"

Creates a network interface in the specified subnet.

The number of IP addresses you can assign to a network interface varies by instance type. For more information, see IP Addresses Per ENI Per Instance Type in the Amazon Virtual Private Cloud User Guide.

For more information about network interfaces, see Elastic network interfaces in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a network interface in the specified subnet.

The number of IP addresses you can assign to a network interface varies by instance type.

For more information about network interfaces, see Elastic network interfaces in the Amazon EC2 User Guide.

" }, "CreateNetworkInterfacePermission":{ "name":"CreateNetworkInterfacePermission", @@ -975,7 +997,7 @@ }, "input":{"shape":"CreateReplaceRootVolumeTaskRequest"}, "output":{"shape":"CreateReplaceRootVolumeTaskResult"}, - "documentation":"

Replaces the EBS-backed root volume for a running instance with a new volume that is restored to the original root volume's launch state, that is restored to a specific snapshot taken from the original root volume, or that is restored from an AMI that has the same key characteristics as that of the instance.

For more information, see Replace a root volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Replaces the EBS-backed root volume for a running instance with a new volume that is restored to the original root volume's launch state, that is restored to a specific snapshot taken from the original root volume, or that is restored from an AMI that has the same key characteristics as that of the instance.

For more information, see Replace a root volume in the Amazon EC2 User Guide.

" }, "CreateReservedInstancesListing":{ "name":"CreateReservedInstancesListing", @@ -985,7 +1007,7 @@ }, "input":{"shape":"CreateReservedInstancesListingRequest"}, "output":{"shape":"CreateReservedInstancesListingResult"}, - "documentation":"

Creates a listing for Amazon EC2 Standard Reserved Instances to be sold in the Reserved Instance Marketplace. You can submit one Standard Reserved Instance listing at a time. To get a list of your Standard Reserved Instances, you can use the DescribeReservedInstances operation.

Only Standard Reserved Instances can be sold in the Reserved Instance Marketplace. Convertible Reserved Instances cannot be sold.

The Reserved Instance Marketplace matches sellers who want to resell Standard Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

To sell your Standard Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Standard Reserved Instances, and specify the upfront price to receive for them. Your Standard Reserved Instance listings then become available for purchase. To view the details of your Standard Reserved Instance listing, you can use the DescribeReservedInstancesListings operation.

For more information, see Reserved Instance Marketplace in the Amazon EC2 User Guide.

" + "documentation":"

Creates a listing for Amazon EC2 Standard Reserved Instances to be sold in the Reserved Instance Marketplace. You can submit one Standard Reserved Instance listing at a time. To get a list of your Standard Reserved Instances, you can use the DescribeReservedInstances operation.

Only Standard Reserved Instances can be sold in the Reserved Instance Marketplace. Convertible Reserved Instances cannot be sold.

The Reserved Instance Marketplace matches sellers who want to resell Standard Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

To sell your Standard Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Standard Reserved Instances, and specify the upfront price to receive for them. Your Standard Reserved Instance listings then become available for purchase. To view the details of your Standard Reserved Instance listing, you can use the DescribeReservedInstancesListings operation.

For more information, see Sell in the Reserved Instance Marketplace in the Amazon EC2 User Guide.

" }, "CreateRestoreImageTask":{ "name":"CreateRestoreImageTask", @@ -1035,7 +1057,7 @@ }, "input":{"shape":"CreateSnapshotRequest"}, "output":{"shape":"Snapshot"}, - "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

You can create snapshots of volumes in a Region and volumes on an Outpost. If you create a snapshot of a volume in a Region, the snapshot must be stored in the same Region as the volume. If you create a snapshot of a volume on an Outpost, the snapshot can be stored on the same Outpost as the volume, or in the Region for that Outpost.

When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your Amazon EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

When you create a snapshot for an EBS volume that serves as a root device, we recommend that you stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Amazon Elastic Block Store and Amazon EBS encryption in the Amazon EBS User Guide.

" + "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

You can create snapshots of volumes in a Region and volumes on an Outpost. If you create a snapshot of a volume in a Region, the snapshot must be stored in the same Region as the volume. If you create a snapshot of a volume on an Outpost, the snapshot can be stored on the same Outpost as the volume, or in the Region for that Outpost.

When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your Amazon EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

When you create a snapshot for an EBS volume that serves as a root device, we recommend that you stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tag your Amazon EC2 resources in the Amazon EC2 User Guide.

For more information, see Amazon EBS and Amazon EBS encryption in the Amazon EBS User Guide.

" }, "CreateSnapshots":{ "name":"CreateSnapshots", @@ -1055,7 +1077,7 @@ }, "input":{"shape":"CreateSpotDatafeedSubscriptionRequest"}, "output":{"shape":"CreateSpotDatafeedSubscriptionResult"}, - "documentation":"

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per Amazon Web Services account. For more information, see Spot Instance data feed in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per Amazon Web Services account. For more information, see Spot Instance data feed in the Amazon EC2 User Guide.

" }, "CreateStoreImageTask":{ "name":"CreateStoreImageTask", @@ -1085,7 +1107,7 @@ }, "input":{"shape":"CreateSubnetCidrReservationRequest"}, "output":{"shape":"CreateSubnetCidrReservationResult"}, - "documentation":"

Creates a subnet CIDR reservation. For more information, see Subnet CIDR reservations in the Amazon Virtual Private Cloud User Guide and Assign prefixes to network interfaces in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a subnet CIDR reservation. For more information, see Subnet CIDR reservations in the Amazon VPC User Guide and Assign prefixes to network interfaces in the Amazon EC2 User Guide.

" }, "CreateTags":{ "name":"CreateTags", @@ -1164,7 +1186,7 @@ }, "input":{"shape":"CreateTransitGatewayConnectPeerRequest"}, "output":{"shape":"CreateTransitGatewayConnectPeerResult"}, - "documentation":"

Creates a Connect peer for a specified transit gateway Connect attachment between a transit gateway and an appliance.

The peer address and transit gateway address must be the same IP address family (IPv4 or IPv6).

For more information, see Connect peers in the Transit Gateways Guide.

" + "documentation":"

Creates a Connect peer for a specified transit gateway Connect attachment between a transit gateway and an appliance.

The peer address and transit gateway address must be the same IP address family (IPv4 or IPv6).

For more information, see Connect peers in the Amazon Web Services Transit Gateways Guide.

" }, "CreateTransitGatewayMulticastDomain":{ "name":"CreateTransitGatewayMulticastDomain", @@ -1294,7 +1316,7 @@ }, "input":{"shape":"CreateVolumeRequest"}, "output":{"shape":"Volume"}, - "documentation":"

Creates an EBS volume that can be attached to an instance in the same Availability Zone.

You can create a new empty volume or restore a volume from an EBS snapshot. Any Amazon Web Services Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes. Encrypted volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS encryption in the Amazon EBS User Guide.

You can tag your volumes during creation. For more information, see Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Create an Amazon EBS volume in the Amazon EBS User Guide.

" + "documentation":"

Creates an EBS volume that can be attached to an instance in the same Availability Zone.

You can create a new empty volume or restore a volume from an EBS snapshot. Any Amazon Web Services Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes. Encrypted volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS encryption in the Amazon EBS User Guide.

You can tag your volumes during creation. For more information, see Tag your Amazon EC2 resources in the Amazon EC2 User Guide.

For more information, see Create an Amazon EBS volume in the Amazon EBS User Guide.

" }, "CreateVpc":{ "name":"CreateVpc", @@ -1324,7 +1346,7 @@ }, "input":{"shape":"CreateVpcEndpointConnectionNotificationRequest"}, "output":{"shape":"CreateVpcEndpointConnectionNotificationResult"}, - "documentation":"

Creates a connection notification for a specified VPC endpoint or VPC endpoint service. A connection notification notifies you of specific endpoint events. You must create an SNS topic to receive notifications. For more information, see Create a Topic in the Amazon Simple Notification Service Developer Guide.

You can create a connection notification for interface endpoints only.

" + "documentation":"

Creates a connection notification for a specified VPC endpoint or VPC endpoint service. A connection notification notifies you of specific endpoint events. You must create an SNS topic to receive notifications. For more information, see Creating an Amazon SNS topic in the Amazon SNS Developer Guide.

You can create a connection notification for interface endpoints only.

" }, "CreateVpcEndpointServiceConfiguration":{ "name":"CreateVpcEndpointServiceConfiguration", @@ -1344,7 +1366,7 @@ }, "input":{"shape":"CreateVpcPeeringConnectionRequest"}, "output":{"shape":"CreateVpcPeeringConnectionResult"}, - "documentation":"

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another Amazon Web Services account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

" + "documentation":"

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another Amazon Web Services account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the VPC peering limitations in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

" }, "CreateVpnConnection":{ "name":"CreateVpnConnection", @@ -1522,6 +1544,16 @@ "output":{"shape":"DeleteIpamResult"}, "documentation":"

Delete an IPAM. Deleting an IPAM removes all monitored data associated with the IPAM including the historical data for CIDRs.

For more information, see Delete an IPAM in the Amazon VPC IPAM User Guide.

" }, + "DeleteIpamExternalResourceVerificationToken":{ + "name":"DeleteIpamExternalResourceVerificationToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIpamExternalResourceVerificationTokenRequest"}, + "output":{"shape":"DeleteIpamExternalResourceVerificationTokenResult"}, + "documentation":"

Delete a verification token. A verification token is an Amazon Web Services-generated random value that you can use to prove ownership of an external resource. For example, you can use a verification token to validate that you control a public IP address range when you bring an IP address range to Amazon Web Services (BYOIP).

" + }, "DeleteIpamPool":{ "name":"DeleteIpamPool", "http":{ @@ -1580,7 +1612,7 @@ }, "input":{"shape":"DeleteLaunchTemplateVersionsRequest"}, "output":{"shape":"DeleteLaunchTemplateVersionsResult"}, - "documentation":"

Deletes one or more versions of a launch template.

You can't delete the default version of a launch template; you must first assign a different version as the default. If the default version is the only version for the launch template, you must delete the entire launch template using DeleteLaunchTemplate.

You can delete up to 200 launch template versions in a single request. To delete more than 200 versions in a single request, use DeleteLaunchTemplate, which deletes the launch template and all of its versions.

For more information, see Delete a launch template version in the EC2 User Guide.

" + "documentation":"

Deletes one or more versions of a launch template.

You can't delete the default version of a launch template; you must first assign a different version as the default. If the default version is the only version for the launch template, you must delete the entire launch template using DeleteLaunchTemplate.

You can delete up to 200 launch template versions in a single request. To delete more than 200 versions in a single request, use DeleteLaunchTemplate, which deletes the launch template and all of its versions.

For more information, see Delete a launch template version in the Amazon EC2 User Guide.

" }, "DeleteLocalGatewayRoute":{ "name":"DeleteLocalGatewayRoute", @@ -2193,7 +2225,7 @@ }, "input":{"shape":"DescribeAddressTransfersRequest"}, "output":{"shape":"DescribeAddressTransfersResult"}, - "documentation":"

Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for three days after the transfers have been accepted.

" + "documentation":"

Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.

When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for 14 days after the transfers have been accepted.

" }, "DescribeAddresses":{ "name":"DescribeAddresses", @@ -2233,7 +2265,7 @@ }, "input":{"shape":"DescribeAvailabilityZonesRequest"}, "output":{"shape":"DescribeAvailabilityZonesResult"}, - "documentation":"

Describes the Availability Zones, Local Zones, and Wavelength Zones that are available to you. If there is an event impacting a zone, you can use this request to view the state and any provided messages for that zone.

For more information about Availability Zones, Local Zones, and Wavelength Zones, see Regions and zones in the Amazon Elastic Compute Cloud User Guide.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" + "documentation":"

Describes the Availability Zones, Local Zones, and Wavelength Zones that are available to you. If there is an event impacting a zone, you can use this request to view the state and any provided messages for that zone.

For more information about Availability Zones, Local Zones, and Wavelength Zones, see Regions and zones in the Amazon EC2 User Guide.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" }, "DescribeAwsNetworkPerformanceMetricSubscriptions":{ "name":"DescribeAwsNetworkPerformanceMetricSubscriptions", @@ -2313,7 +2345,7 @@ }, "input":{"shape":"DescribeClassicLinkInstancesRequest"}, "output":{"shape":"DescribeClassicLinkInstancesResult"}, - "documentation":"

This action is deprecated.

Describes one or more of your linked EC2-Classic instances. This request only returns information about EC2-Classic instances linked to a VPC through ClassicLink. You cannot use this request to return information about other instances.

" + "documentation":"

This action is deprecated.

Describes your linked EC2-Classic instances. This request only returns information about EC2-Classic instances linked to a VPC through ClassicLink. You cannot use this request to return information about other instances.

" }, "DescribeClientVpnAuthorizationRules":{ "name":"DescribeClientVpnAuthorizationRules", @@ -2403,7 +2435,7 @@ }, "input":{"shape":"DescribeDhcpOptionsRequest"}, "output":{"shape":"DescribeDhcpOptionsResult"}, - "documentation":"

Describes one or more of your DHCP options sets.

For more information, see DHCP options sets in the Amazon VPC User Guide.

" + "documentation":"

Describes your DHCP option sets. The default is to describe all your DHCP option sets. Alternatively, you can specify specific DHCP option set IDs or filter the results to include only the DHCP option sets that match specific criteria.

For more information, see DHCP option sets in the Amazon VPC User Guide.

" }, "DescribeEgressOnlyInternetGateways":{ "name":"DescribeEgressOnlyInternetGateways", @@ -2413,7 +2445,7 @@ }, "input":{"shape":"DescribeEgressOnlyInternetGatewaysRequest"}, "output":{"shape":"DescribeEgressOnlyInternetGatewaysResult"}, - "documentation":"

Describes one or more of your egress-only internet gateways.

" + "documentation":"

Describes your egress-only internet gateways. The default is to describe all your egress-only internet gateways. Alternatively, you can specify specific egress-only internet gateway IDs or filter the results to include only the egress-only internet gateways that match specific criteria.

" }, "DescribeElasticGpus":{ "name":"DescribeElasticGpus", @@ -2423,7 +2455,7 @@ }, "input":{"shape":"DescribeElasticGpusRequest"}, "output":{"shape":"DescribeElasticGpusResult"}, - "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

Describes the Elastic Graphics accelerator associated with your instances. For more information about Elastic Graphics, see Amazon Elastic Graphics.

" + "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4, G5, or G6 instances.

Describes the Elastic Graphics accelerator associated with your instances.

" }, "DescribeExportImageTasks":{ "name":"DescribeExportImageTasks", @@ -2733,7 +2765,7 @@ }, "input":{"shape":"DescribeInternetGatewaysRequest"}, "output":{"shape":"DescribeInternetGatewaysResult"}, - "documentation":"

Describes one or more of your internet gateways.

" + "documentation":"

Describes your internet gateways. The default is to describe all your internet gateways. Alternatively, you can specify specific internet gateway IDs or filter the results to include only the internet gateways that match specific criteria.

" }, "DescribeIpamByoasn":{ "name":"DescribeIpamByoasn", @@ -2745,6 +2777,16 @@ "output":{"shape":"DescribeIpamByoasnResult"}, "documentation":"

Describes your Autonomous System Numbers (ASNs), their provisioning statuses, and the BYOIP CIDRs with which they are associated. For more information, see Tutorial: Bring your ASN to IPAM in the Amazon VPC IPAM guide.

" }, + "DescribeIpamExternalResourceVerificationTokens":{ + "name":"DescribeIpamExternalResourceVerificationTokens", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIpamExternalResourceVerificationTokensRequest"}, + "output":{"shape":"DescribeIpamExternalResourceVerificationTokensResult"}, + "documentation":"

Describe verification tokens. A verification token is an Amazon Web Services-generated random value that you can use to prove ownership of an external resource. For example, you can use a verification token to validate that you control a public IP address range when you bring an IP address range to Amazon Web Services (BYOIP).

" + }, "DescribeIpamPools":{ "name":"DescribeIpamPools", "http":{ @@ -2943,7 +2985,7 @@ }, "input":{"shape":"DescribeNatGatewaysRequest"}, "output":{"shape":"DescribeNatGatewaysResult"}, - "documentation":"

Describes one or more of your NAT gateways.

" + "documentation":"

Describes your NAT gateways. The default is to describe all your NAT gateways. Alternatively, you can specify specific NAT gateway IDs or filter the results to include only the NAT gateways that match specific criteria.

" }, "DescribeNetworkAcls":{ "name":"DescribeNetworkAcls", @@ -2953,7 +2995,7 @@ }, "input":{"shape":"DescribeNetworkAclsRequest"}, "output":{"shape":"DescribeNetworkAclsResult"}, - "documentation":"

Describes one or more of your network ACLs.

For more information, see Network ACLs in the Amazon VPC User Guide.

" + "documentation":"

Describes your network ACLs. The default is to describe all your network ACLs. Alternatively, you can specify specific network ACL IDs or filter the results to include only the network ACLs that match specific criteria.

For more information, see Network ACLs in the Amazon VPC User Guide.

" }, "DescribeNetworkInsightsAccessScopeAnalyses":{ "name":"DescribeNetworkInsightsAccessScopeAnalyses", @@ -3033,7 +3075,7 @@ }, "input":{"shape":"DescribePlacementGroupsRequest"}, "output":{"shape":"DescribePlacementGroupsResult"}, - "documentation":"

Describes the specified placement groups or all of your placement groups. For more information, see Placement groups in the Amazon EC2 User Guide.

" + "documentation":"

Describes the specified placement groups or all of your placement groups.

To describe a specific placement group that is shared with your account, you must specify the ID of the placement group using the GroupId parameter. Specifying the name of a shared placement group using the GroupNames parameter will result in an error.

For more information, see Placement groups in the Amazon EC2 User Guide.

" }, "DescribePrefixLists":{ "name":"DescribePrefixLists", @@ -3073,7 +3115,7 @@ }, "input":{"shape":"DescribeRegionsRequest"}, "output":{"shape":"DescribeRegionsResult"}, - "documentation":"

Describes the Regions that are enabled for your account, or all Regions.

For a list of the Regions supported by Amazon EC2, see Amazon Elastic Compute Cloud endpoints and quotas.

For information about enabling and disabling Regions for your account, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" + "documentation":"

Describes the Regions that are enabled for your account, or all Regions.

For a list of the Regions supported by Amazon EC2, see Amazon EC2 service endpoints.

For information about enabling and disabling Regions for your account, see Specify which Amazon Web Services Regions your account can use in the Amazon Web Services Account Management Reference Guide.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" }, "DescribeReplaceRootVolumeTasks":{ "name":"DescribeReplaceRootVolumeTasks", @@ -3083,7 +3125,7 @@ }, "input":{"shape":"DescribeReplaceRootVolumeTasksRequest"}, "output":{"shape":"DescribeReplaceRootVolumeTasksResult"}, - "documentation":"

Describes a root volume replacement task. For more information, see Replace a root volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes a root volume replacement task. For more information, see Replace a root volume in the Amazon EC2 User Guide.

" }, "DescribeReservedInstances":{ "name":"DescribeReservedInstances", @@ -3103,7 +3145,7 @@ }, "input":{"shape":"DescribeReservedInstancesListingsRequest"}, "output":{"shape":"DescribeReservedInstancesListingsResult"}, - "documentation":"

Describes your account's Reserved Instance listings in the Reserved Instance Marketplace.

The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase.

As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase.

For more information, see Reserved Instance Marketplace in the Amazon EC2 User Guide.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" + "documentation":"

Describes your account's Reserved Instance listings in the Reserved Instance Marketplace.

The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase.

As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase.

For more information, see Sell in the Reserved Instance Marketplace in the Amazon EC2 User Guide.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" }, "DescribeReservedInstancesModifications":{ "name":"DescribeReservedInstancesModifications", @@ -3113,7 +3155,7 @@ }, "input":{"shape":"DescribeReservedInstancesModificationsRequest"}, "output":{"shape":"DescribeReservedInstancesModificationsResult"}, - "documentation":"

Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned.

For more information, see Modifying Reserved Instances in the Amazon EC2 User Guide.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" + "documentation":"

Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned.

For more information, see Modify Reserved Instances in the Amazon EC2 User Guide.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" }, "DescribeReservedInstancesOfferings":{ "name":"DescribeReservedInstancesOfferings", @@ -3123,7 +3165,7 @@ }, "input":{"shape":"DescribeReservedInstancesOfferingsRequest"}, "output":{"shape":"DescribeReservedInstancesOfferingsResult"}, - "documentation":"

Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used.

If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances.

For more information, see Reserved Instance Marketplace in the Amazon EC2 User Guide.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" + "documentation":"

Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used.

If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances.

For more information, see Sell in the Reserved Instance Marketplace in the Amazon EC2 User Guide.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" }, "DescribeRouteTables":{ "name":"DescribeRouteTables", @@ -3133,7 +3175,7 @@ }, "input":{"shape":"DescribeRouteTablesRequest"}, "output":{"shape":"DescribeRouteTablesResult"}, - "documentation":"

Describes one or more of your route tables.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information, see Route tables in the Amazon VPC User Guide.

" + "documentation":"

Describes your route tables. The default is to describe all your route tables. Alternatively, you can specify specific route table IDs or filter the results to include only the route tables that match specific criteria.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information, see Route tables in the Amazon VPC User Guide.

" }, "DescribeScheduledInstanceAvailability":{ "name":"DescribeScheduledInstanceAvailability", @@ -3223,7 +3265,7 @@ }, "input":{"shape":"DescribeSpotDatafeedSubscriptionRequest"}, "output":{"shape":"DescribeSpotDatafeedSubscriptionResult"}, - "documentation":"

Describes the data feed for Spot Instances. For more information, see Spot Instance data feed in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Describes the data feed for Spot Instances. For more information, see Spot Instance data feed in the Amazon EC2 User Guide.

" }, "DescribeSpotFleetInstances":{ "name":"DescribeSpotFleetInstances", @@ -3273,7 +3315,7 @@ }, "input":{"shape":"DescribeSpotPriceHistoryRequest"}, "output":{"shape":"DescribeSpotPriceHistoryResult"}, - "documentation":"

Describes the Spot price history. For more information, see Spot Instance pricing history in the Amazon EC2 User Guide for Linux Instances.

When you specify a start and end time, the operation returns the prices of the instance types within that time range. It also returns the last price change before the start time, which is the effective price as of the start time.

" + "documentation":"

Describes the Spot price history. For more information, see Spot Instance pricing history in the Amazon EC2 User Guide.

When you specify a start and end time, the operation returns the prices of the instance types within that time range. It also returns the last price change before the start time, which is the effective price as of the start time.

" }, "DescribeStaleSecurityGroups":{ "name":"DescribeStaleSecurityGroups", @@ -3303,7 +3345,7 @@ }, "input":{"shape":"DescribeSubnetsRequest"}, "output":{"shape":"DescribeSubnetsResult"}, - "documentation":"

Describes one or more of your subnets.

For more information, see Subnets in the Amazon VPC User Guide.

" + "documentation":"

Describes your subnets. The default is to describe all your subnets. Alternatively, you can specify specific subnet IDs or filter the results to include only the subnets that match specific criteria.

For more information, see Subnets in the Amazon VPC User Guide.

" }, "DescribeTags":{ "name":"DescribeTags", @@ -3315,6 +3357,16 @@ "output":{"shape":"DescribeTagsResult"}, "documentation":"

Describes the specified tags for your EC2 resources.

For more information about tags, see Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

We strongly recommend using only paginated requests. Unpaginated requests are susceptible to throttling and timeouts.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" }, + "DescribeTrafficMirrorFilterRules":{ + "name":"DescribeTrafficMirrorFilterRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrafficMirrorFilterRulesRequest"}, + "output":{"shape":"DescribeTrafficMirrorFilterRulesResult"}, + "documentation":"

Describe traffic mirror filters that determine the traffic that is mirrored.

" + }, "DescribeTrafficMirrorFilters":{ "name":"DescribeTrafficMirrorFilters", "http":{ @@ -3543,7 +3595,7 @@ }, "input":{"shape":"DescribeVolumesModificationsRequest"}, "output":{"shape":"DescribeVolumesModificationsResult"}, - "documentation":"

Describes the most recent volume modification request for the specified EBS volumes.

If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification request.

You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide.

" + "documentation":"

Describes the most recent volume modification request for the specified EBS volumes.

For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide.

" }, "DescribeVpcAttribute":{ "name":"DescribeVpcAttribute", @@ -3633,7 +3685,7 @@ }, "input":{"shape":"DescribeVpcEndpointsRequest"}, "output":{"shape":"DescribeVpcEndpointsResult"}, - "documentation":"

Describes your VPC endpoints.

" + "documentation":"

Describes your VPC endpoints. The default is to describe all your VPC endpoints. Alternatively, you can specify specific VPC endpoint IDs or filter the results to include only the VPC endpoints that match specific criteria.

" }, "DescribeVpcPeeringConnections":{ "name":"DescribeVpcPeeringConnections", @@ -3643,7 +3695,7 @@ }, "input":{"shape":"DescribeVpcPeeringConnectionsRequest"}, "output":{"shape":"DescribeVpcPeeringConnectionsResult"}, - "documentation":"

Describes one or more of your VPC peering connections.

" + "documentation":"

Describes your VPC peering connections. The default is to describe all your VPC peering connections. Alternatively, you can specify specific VPC peering connection IDs or filter the results to include only the VPC peering connections that match specific criteria.

" }, "DescribeVpcs":{ "name":"DescribeVpcs", @@ -3653,7 +3705,7 @@ }, "input":{"shape":"DescribeVpcsRequest"}, "output":{"shape":"DescribeVpcsResult"}, - "documentation":"

Describes one or more of your VPCs.

" + "documentation":"

Describes your VPCs. The default is to describe all your VPCs. Alternatively, you can specify specific VPC IDs or filter the results to include only the VPCs that match specific criteria.

" }, "DescribeVpnConnections":{ "name":"DescribeVpnConnections", @@ -3740,7 +3792,7 @@ }, "input":{"shape":"DisableAddressTransferRequest"}, "output":{"shape":"DisableAddressTransferResult"}, - "documentation":"

Disables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Disables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.

" }, "DisableAwsNetworkPerformanceMetricSubscription":{ "name":"DisableAwsNetworkPerformanceMetricSubscription", @@ -4047,7 +4099,7 @@ }, "input":{"shape":"EnableAddressTransferRequest"}, "output":{"shape":"EnableAddressTransferResult"}, - "documentation":"

Enables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Enables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.

" }, "EnableAwsNetworkPerformanceMetricSubscription":{ "name":"EnableAwsNetworkPerformanceMetricSubscription", @@ -4255,7 +4307,7 @@ }, "input":{"shape":"ExportTransitGatewayRoutesRequest"}, "output":{"shape":"ExportTransitGatewayRoutesResult"}, - "documentation":"

Exports routes from the specified transit gateway route table to the specified S3 bucket. By default, all routes are exported. Alternatively, you can filter by CIDR range.

The routes are saved to the specified bucket in a JSON file. For more information, see Export Route Tables to Amazon S3 in Transit Gateways.

" + "documentation":"

Exports routes from the specified transit gateway route table to the specified S3 bucket. By default, all routes are exported. Alternatively, you can filter by CIDR range.

The routes are saved to the specified bucket in a JSON file. For more information, see Export route tables to Amazon S3 in the Amazon Web Services Transit Gateways Guide.

" }, "GetAssociatedEnclaveCertificateIamRoles":{ "name":"GetAssociatedEnclaveCertificateIamRoles", @@ -4315,7 +4367,7 @@ }, "input":{"shape":"GetConsoleOutputRequest"}, "output":{"shape":"GetConsoleOutputResult"}, - "documentation":"

Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes the last three system event log errors.

By default, the console output returns buffered information that was posted shortly after an instance transition state (start, stop, reboot, or terminate). This information is available for at least one hour after the most recent post. Only the most recent 64 KB of console output is available.

You can optionally retrieve the latest serial console output at any time during the instance lifecycle. This option is supported on instance types that use the Nitro hypervisor.

For more information, see Instance console output in the Amazon EC2 User Guide.

" + "documentation":"

Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes the last three system event log errors.

For more information, see Instance console output in the Amazon EC2 User Guide.

" }, "GetConsoleScreenshot":{ "name":"GetConsoleScreenshot", @@ -4485,7 +4537,7 @@ }, "input":{"shape":"GetIpamPoolAllocationsRequest"}, "output":{"shape":"GetIpamPoolAllocationsResult"}, - "documentation":"

Get a list of all the CIDR allocations in an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations.

If you use this action after AllocateIpamPoolCidr or ReleaseIpamPoolAllocation, note that all EC2 API actions follow an eventual consistency model.

" + "documentation":"

Get a list of all the CIDR allocations in an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations.

If you use this action after AllocateIpamPoolCidr or ReleaseIpamPoolAllocation, note that all EC2 API actions follow an eventual consistency model.

" }, "GetIpamPoolCidrs":{ "name":"GetIpamPoolCidrs", @@ -4565,7 +4617,7 @@ }, "input":{"shape":"GetPasswordDataRequest"}, "output":{"shape":"GetPasswordDataResult"}, - "documentation":"

Retrieves the encrypted administrator password for a running Windows instance.

The Windows password is generated at boot by the EC2Config service or EC2Launch scripts (Windows Server 2016 and later). This usually only happens the first time an instance is launched. For more information, see EC2Config and EC2Launch in the Amazon EC2 User Guide.

For the EC2Config service, the password is not generated for rebundled AMIs unless Ec2SetPassword is enabled before bundling.

The password is encrypted using the key pair that you specified when you launched the instance. You must provide the corresponding key pair file.

When you launch an instance, password generation and encryption may take a few minutes. If you try to retrieve the password before it's available, the output returns an empty string. We recommend that you wait up to 15 minutes after launching an instance before trying to retrieve the generated password.

" + "documentation":"

Retrieves the encrypted administrator password for a running Windows instance.

The Windows password is generated at boot by the EC2Config service or EC2Launch scripts (Windows Server 2016 and later). This usually only happens the first time an instance is launched. For more information, see EC2Config and EC2Launch in the Amazon EC2 User Guide.

For the EC2Config service, the password is not generated for rebundled AMIs unless Ec2SetPassword is enabled before bundling.

The password is encrypted using the key pair that you specified when you launched the instance. You must provide the corresponding key pair file.

When you launch an instance, password generation and encryption may take a few minutes. If you try to retrieve the password before it's available, the output returns an empty string. We recommend that you wait up to 15 minutes after launching an instance before trying to retrieve the generated password.

" }, "GetReservedInstancesExchangeQuote":{ "name":"GetReservedInstancesExchangeQuote", @@ -4615,7 +4667,7 @@ }, "input":{"shape":"GetSpotPlacementScoresRequest"}, "output":{"shape":"GetSpotPlacementScoresResult"}, - "documentation":"

Calculates the Spot placement score for a Region or Availability Zone based on the specified target capacity and compute requirements.

You can specify your compute requirements either by using InstanceRequirementsWithMetadata and letting Amazon EC2 choose the optimal instance types to fulfill your Spot request, or you can specify the instance types by using InstanceTypes.

For more information, see Spot placement score in the Amazon EC2 User Guide.

" + "documentation":"

Calculates the Spot placement score for a Region or Availability Zone based on the specified target capacity and compute requirements.

You can specify your compute requirements either by using InstanceRequirementsWithMetadata and letting Amazon EC2 choose the optimal instance types to fulfill your Spot request, or you can specify the instance types by using InstanceTypes.

For more information, see Spot placement score in the Amazon EC2 User Guide.

" }, "GetSubnetCidrReservations":{ "name":"GetSubnetCidrReservations", @@ -4855,7 +4907,7 @@ }, "input":{"shape":"ModifyAvailabilityZoneGroupRequest"}, "output":{"shape":"ModifyAvailabilityZoneGroupResult"}, - "documentation":"

Changes the opt-in status of the Local Zone and Wavelength Zone group for your account.

Use DescribeAvailabilityZones to view the value for GroupName.

" + "documentation":"

Changes the opt-in status of the specified zone group for your account.

" }, "ModifyCapacityReservation":{ "name":"ModifyCapacityReservation", @@ -4865,7 +4917,7 @@ }, "input":{"shape":"ModifyCapacityReservationRequest"}, "output":{"shape":"ModifyCapacityReservationResult"}, - "documentation":"

Modifies a Capacity Reservation's capacity and the conditions under which it is to be released. You cannot change a Capacity Reservation's instance type, EBS optimization, instance store settings, platform, Availability Zone, or instance eligibility. If you need to modify any of these attributes, we recommend that you cancel the Capacity Reservation, and then create a new one with the required attributes.

" + "documentation":"

Modifies a Capacity Reservation's capacity, instance eligibility, and the conditions under which it is to be released. You can't modify a Capacity Reservation's instance type, EBS optimization, platform, instance store settings, Availability Zone, or tenancy. If you need to modify any of these attributes, we recommend that you cancel the Capacity Reservation, and then create a new one with the required attributes. For more information, see Modify an active Capacity Reservation.

" }, "ModifyCapacityReservationFleet":{ "name":"ModifyCapacityReservationFleet", @@ -5160,7 +5212,7 @@ }, "input":{"shape":"ModifyReservedInstancesRequest"}, "output":{"shape":"ModifyReservedInstancesResult"}, - "documentation":"

Modifies the configuration of your Reserved Instances, such as the Availability Zone, instance count, or instance type. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type.

For more information, see Modifying Reserved Instances in the Amazon EC2 User Guide.

" + "documentation":"

Modifies the configuration of your Reserved Instances, such as the Availability Zone, instance count, or instance type. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type.

For more information, see Modify Reserved Instances in the Amazon EC2 User Guide.

" }, "ModifySecurityGroupRules":{ "name":"ModifySecurityGroupRules", @@ -5348,7 +5400,7 @@ }, "input":{"shape":"ModifyVolumeRequest"}, "output":{"shape":"ModifyVolumeResult"}, - "documentation":"

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you might be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying EBS volumes, see Amazon EBS Elastic Volumes in the Amazon EBS User Guide.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For more information, see Extend the file system.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using DescribeVolumesModifications. For information about tracking status changes using either method, see Monitor the progress of volume modifications.

With previous-generation instance types, resizing an EBS volume might require detaching and reattaching the volume or stopping and restarting the instance.

After modifying a volume, you must wait at least six hours and ensure that the volume is in the in-use or available state before you can modify the same volume. This is sometimes referred to as a cooldown period.

" + "documentation":"

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you might be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying EBS volumes, see Amazon EBS Elastic Volumes in the Amazon EBS User Guide.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For more information, see Extend the file system.

For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide.

With previous-generation instance types, resizing an EBS volume might require detaching and reattaching the volume or stopping and restarting the instance.

After modifying a volume, you must wait at least six hours and ensure that the volume is in the in-use or available state before you can modify the same volume. This is sometimes referred to as a cooldown period.

" }, "ModifyVolumeAttribute":{ "name":"ModifyVolumeAttribute", @@ -5508,6 +5560,16 @@ "output":{"shape":"MoveByoipCidrToIpamResult"}, "documentation":"

Move a BYOIPv4 CIDR to IPAM from a public IPv4 pool.

If you already have a BYOIPv4 CIDR with Amazon Web Services, you can move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to IPAM. If you are bringing a new IP address to Amazon Web Services for the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM.

" }, + "MoveCapacityReservationInstances":{ + "name":"MoveCapacityReservationInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MoveCapacityReservationInstancesRequest"}, + "output":{"shape":"MoveCapacityReservationInstancesResult"}, + "documentation":"

Move available capacity from a source Capacity Reservation to a destination Capacity Reservation. The source Capacity Reservation and the destination Capacity Reservation must be active, owned by your Amazon Web Services account, and share the following:

  • Instance type

  • Platform

  • Availability Zone

  • Tenancy

  • Placement group

  • Capacity Reservation end time - At specific time or Manually.

" + }, "ProvisionByoipCidr":{ "name":"ProvisionByoipCidr", "http":{ @@ -5516,7 +5578,7 @@ }, "input":{"shape":"ProvisionByoipCidrRequest"}, "output":{"shape":"ProvisionByoipCidrResult"}, - "documentation":"

Provisions an IPv4 or IPv6 address range for use with your Amazon Web Services resources through bring your own IP addresses (BYOIP) and creates a corresponding address pool. After the address range is provisioned, it is ready to be advertised using AdvertiseByoipCidr.

Amazon Web Services verifies that you own the address range and are authorized to advertise it. You must ensure that the address range is registered to you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. For more information, see Bring your own IP addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

Provisioning an address range is an asynchronous operation, so the call returns immediately, but the address range is not ready to use until its status changes from pending-provision to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. To allocate an Elastic IP address from your IPv4 address pool, use AllocateAddress with either the specific address from the address pool or the ID of the address pool.

" + "documentation":"

Provisions an IPv4 or IPv6 address range for use with your Amazon Web Services resources through bring your own IP addresses (BYOIP) and creates a corresponding address pool. After the address range is provisioned, it is ready to be advertised using AdvertiseByoipCidr.

Amazon Web Services verifies that you own the address range and are authorized to advertise it. You must ensure that the address range is registered to you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. For more information, see Bring your own IP addresses (BYOIP) in the Amazon EC2 User Guide.

Provisioning an address range is an asynchronous operation, so the call returns immediately, but the address range is not ready to use until its status changes from pending-provision to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. To allocate an Elastic IP address from your IPv4 address pool, use AllocateAddress with either the specific address from the address pool or the ID of the address pool.

" }, "ProvisionIpamByoasn":{ "name":"ProvisionIpamByoasn", @@ -5576,7 +5638,7 @@ }, "input":{"shape":"PurchaseReservedInstancesOfferingRequest"}, "output":{"shape":"PurchaseReservedInstancesOfferingResult"}, - "documentation":"

Purchases a Reserved Instance for use with your account. With Reserved Instances, you pay a lower hourly rate compared to On-Demand instance pricing.

Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings that match your specifications. After you've purchased a Reserved Instance, you can check for your new Reserved Instance with DescribeReservedInstances.

To queue a purchase for a future date and time, specify a purchase time. If you do not specify a purchase time, the default is the current time.

For more information, see Reserved Instances and Reserved Instance Marketplace in the Amazon EC2 User Guide.

" + "documentation":"

Purchases a Reserved Instance for use with your account. With Reserved Instances, you pay a lower hourly rate compared to On-Demand instance pricing.

Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings that match your specifications. After you've purchased a Reserved Instance, you can check for your new Reserved Instance with DescribeReservedInstances.

To queue a purchase for a future date and time, specify a purchase time. If you do not specify a purchase time, the default is the current time.

For more information, see Reserved Instances and Sell in the Reserved Instance Marketplace in the Amazon EC2 User Guide.

" }, "PurchaseScheduledInstances":{ "name":"PurchaseScheduledInstances", @@ -5625,7 +5687,7 @@ }, "input":{"shape":"RegisterTransitGatewayMulticastGroupMembersRequest"}, "output":{"shape":"RegisterTransitGatewayMulticastGroupMembersResult"}, - "documentation":"

Registers members (network interfaces) with the transit gateway multicast group. A member is a network interface associated with a supported EC2 instance that receives multicast traffic. For information about supported instances, see Multicast Consideration in Amazon VPC Transit Gateways.

After you add the members, use SearchTransitGatewayMulticastGroups to verify that the members were added to the transit gateway multicast group.

" + "documentation":"

Registers members (network interfaces) with the transit gateway multicast group. A member is a network interface associated with a supported EC2 instance that receives multicast traffic. For more information, see Multicast on transit gateways in the Amazon Web Services Transit Gateways Guide.

After you add the members, use SearchTransitGatewayMulticastGroups to verify that the members were added to the transit gateway multicast group.

" }, "RegisterTransitGatewayMulticastGroupSources":{ "name":"RegisterTransitGatewayMulticastGroupSources", @@ -5635,7 +5697,7 @@ }, "input":{"shape":"RegisterTransitGatewayMulticastGroupSourcesRequest"}, "output":{"shape":"RegisterTransitGatewayMulticastGroupSourcesResult"}, - "documentation":"

Registers sources (network interfaces) with the specified transit gateway multicast group.

A multicast source is a network interface attached to a supported instance that sends multicast traffic. For information about supported instances, see Multicast Considerations in Amazon VPC Transit Gateways.

After you add the source, use SearchTransitGatewayMulticastGroups to verify that the source was added to the multicast group.

" + "documentation":"

Registers sources (network interfaces) with the specified transit gateway multicast group.

A multicast source is a network interface attached to a supported instance that sends multicast traffic. For more information about supported instances, see Multicast on transit gateways in the Amazon Web Services Transit Gateways Guide.

After you add the source, use SearchTransitGatewayMulticastGroups to verify that the source was added to the multicast group.

" }, "RejectTransitGatewayMulticastDomainAssociations":{ "name":"RejectTransitGatewayMulticastDomainAssociations", @@ -5714,7 +5776,7 @@ }, "input":{"shape":"ReleaseIpamPoolAllocationRequest"}, "output":{"shape":"ReleaseIpamPoolAllocationResult"}, - "documentation":"

Release an allocation within an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.

All EC2 API actions follow an eventual consistency model.

" + "documentation":"

Release an allocation within an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.

All EC2 API actions follow an eventual consistency model.

" }, "ReplaceIamInstanceProfileAssociation":{ "name":"ReplaceIamInstanceProfileAssociation", @@ -5811,7 +5873,7 @@ }, "input":{"shape":"RequestSpotInstancesRequest"}, "output":{"shape":"RequestSpotInstancesResult"}, - "documentation":"

Creates a Spot Instance request.

For more information, see Spot Instance requests in the Amazon EC2 User Guide for Linux Instances.

We strongly discourage using the RequestSpotInstances API because it is a legacy API with no planned investment. For options for requesting Spot Instances, see Which is the best Spot request method to use? in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Creates a Spot Instance request.

For more information, see Work with Spot Instance in the Amazon EC2 User Guide.

We strongly discourage using the RequestSpotInstances API because it is a legacy API with no planned investment. For options for requesting Spot Instances, see Which is the best Spot request method to use? in the Amazon EC2 User Guide.

" }, "ResetAddressAttribute":{ "name":"ResetAddressAttribute", @@ -5859,7 +5921,7 @@ "requestUri":"/" }, "input":{"shape":"ResetInstanceAttributeRequest"}, - "documentation":"

Resets an attribute of an instance to its default value. To reset the kernel or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck, the instance can be either running or stopped.

The sourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon VPC User Guide.

" + "documentation":"

Resets an attribute of an instance to its default value. To reset the kernel or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck, the instance can be either running or stopped.

The sourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT instances in the Amazon VPC User Guide.

" }, "ResetNetworkInterfaceAttribute":{ "name":"ResetNetworkInterfaceAttribute", @@ -5967,7 +6029,7 @@ }, "input":{"shape":"RunInstancesRequest"}, "output":{"shape":"Reservation"}, - "documentation":"

Launches the specified number of instances using an AMI for which you have permissions.

You can specify a number of options, or leave the default options. The following rules apply:

  • If you don't specify a subnet ID, we choose a default subnet from your default VPC for you. If you don't have a default VPC, you must specify a subnet ID in the request.

  • All instances have a network interface with a primary private IPv4 address. If you don't specify this address, we choose one from the IPv4 range of your subnet.

  • Not all instance types support IPv6 addresses. For more information, see Instance types.

  • If you don't specify a security group ID, we use the default security group. For more information, see Security groups.

  • If any of the AMIs have a product code attached for which the user has not subscribed, the request fails.

You can create a launch template, which is a resource that contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify the launch template instead of specifying the launch parameters.

To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead of one launch request for 500 instances.

An instance is ready for you to use when it's in the running state. You can check the state of your instance using DescribeInstances. You can tag instances and EBS volumes during launch, after launch, or both. For more information, see CreateTags and Tagging your Amazon EC2 resources.

Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key pairs.

For troubleshooting, see What to do if an instance immediately terminates, and Troubleshooting connecting to your instance.

" + "documentation":"

Launches the specified number of instances using an AMI for which you have permissions.

You can specify a number of options, or leave the default options. The following rules apply:

  • If you don't specify a subnet ID, we choose a default subnet from your default VPC for you. If you don't have a default VPC, you must specify a subnet ID in the request.

  • All instances have a network interface with a primary private IPv4 address. If you don't specify this address, we choose one from the IPv4 range of your subnet.

  • Not all instance types support IPv6 addresses. For more information, see Instance types.

  • If you don't specify a security group ID, we use the default security group for the VPC. For more information, see Security groups.

  • If any of the AMIs have a product code attached for which the user has not subscribed, the request fails.

You can create a launch template, which is a resource that contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify the launch template instead of specifying the launch parameters.

To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead of one launch request for 500 instances.

RunInstances is subject to both request rate limiting and resource rate limiting. For more information, see Request throttling.

An instance is ready for you to use when it's in the running state. You can check the state of your instance using DescribeInstances. You can tag instances and EBS volumes during launch, after launch, or both. For more information, see CreateTags and Tagging your Amazon EC2 resources.

Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key pairs.

For troubleshooting, see What to do if an instance immediately terminates, and Troubleshooting connecting to your instance.

" }, "RunScheduledInstances":{ "name":"RunScheduledInstances", @@ -5977,7 +6039,7 @@ }, "input":{"shape":"RunScheduledInstancesRequest"}, "output":{"shape":"RunScheduledInstancesResult"}, - "documentation":"

Launches the specified Scheduled Instances.

Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances.

You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a Scheduled Instance before the current scheduled time period ends, you can launch it again after a few minutes. For more information, see Scheduled Instances in the Amazon EC2 User Guide.

" + "documentation":"

Launches the specified Scheduled Instances.

Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances.

You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a Scheduled Instance before the current scheduled time period ends, you can launch it again after a few minutes.

" }, "SearchLocalGatewayRoutes":{ "name":"SearchLocalGatewayRoutes", @@ -6016,7 +6078,7 @@ "requestUri":"/" }, "input":{"shape":"SendDiagnosticInterruptRequest"}, - "documentation":"

Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI).

In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace.

Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks.

For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a diagnostic interrupt (for advanced users) (Linux instances) or Send a diagnostic interrupt (for advanced users) (Windows instances).

" + "documentation":"

Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI).

In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace.

Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks.

For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a diagnostic interrupt (for advanced users) in the Amazon EC2 User Guide.

" }, "StartInstances":{ "name":"StartInstances", @@ -6026,7 +6088,7 @@ }, "input":{"shape":"StartInstancesRequest"}, "output":{"shape":"StartInstancesResult"}, - "documentation":"

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

If you attempt to start a T3 instance with host tenancy and the unlimited CPU credit option, the request fails. The unlimited CPU credit option is not supported on Dedicated Hosts. Before you start the instance, either change its CPU credit option to standard, or change its tenancy to default or dedicated.

For more information, see Stop and start your instance in the Amazon EC2 User Guide.

" + "documentation":"

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

If you attempt to start a T3 instance with host tenancy and the unlimited CPU credit option, the request fails. The unlimited CPU credit option is not supported on Dedicated Hosts. Before you start the instance, either change its CPU credit option to standard, or change its tenancy to default or dedicated.

For more information, see Stop and start Amazon EC2 instances in the Amazon EC2 User Guide.

" }, "StartNetworkInsightsAccessScopeAnalysis":{ "name":"StartNetworkInsightsAccessScopeAnalysis", @@ -6066,7 +6128,7 @@ }, "input":{"shape":"StopInstancesRequest"}, "output":{"shape":"StopInstancesResult"}, - "documentation":"

Stops an Amazon EBS-backed instance. For more information, see Stop and start your instance in the Amazon EC2 User Guide.

You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide.

When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.

Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.

When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshoot stopping your instance in the Amazon EC2 User Guide.

" + "documentation":"

Stops an Amazon EBS-backed instance. For more information, see Stop and start Amazon EC2 instances in the Amazon EC2 User Guide.

You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide.

We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide.

When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.

Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.

When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshoot stopping your instance in the Amazon EC2 User Guide.

" }, "TerminateClientVpnConnections":{ "name":"TerminateClientVpnConnections", @@ -6956,7 +7018,7 @@ "locationName":"addressTransferStatus" } }, - "documentation":"

Details on the Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Details on the Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.

" }, "AddressTransferList":{ "type":"list", @@ -7029,7 +7091,7 @@ }, "NetworkBorderGroup":{ "shape":"String", - "documentation":"

A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups.

Use DescribeAvailabilityZones to view the network border groups.

" + "documentation":"

A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups.

" }, "CustomerOwnedIpv4Pool":{ "shape":"String", @@ -7098,7 +7160,7 @@ "members":{ "AutoPlacement":{ "shape":"AutoPlacement", - "documentation":"

Indicates whether the host accepts any untargeted instance launches that match its instance type configuration, or if it only accepts Host tenancy instance launches that specify its unique host ID. For more information, see Understanding auto-placement and affinity in the Amazon EC2 User Guide.

Default: on

", + "documentation":"

Indicates whether the host accepts any untargeted instance launches that match its instance type configuration, or if it only accepts Host tenancy instance launches that specify its unique host ID. For more information, see Understanding auto-placement and affinity in the Amazon EC2 User Guide.

Default: off

", "locationName":"autoPlacement" }, "AvailabilityZone":{ @@ -7182,7 +7244,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "Description":{ @@ -7492,7 +7554,7 @@ }, "DestinationPrefixListId":{ "shape":"String", - "documentation":"

The prefix of the Amazon Web Service.

", + "documentation":"

The prefix of the Amazon Web Services service.

", "locationName":"destinationPrefixListId" }, "EgressOnlyInternetGatewayId":{ @@ -7993,7 +8055,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -8454,7 +8516,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -8473,7 +8535,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "locationName":"clientToken" } } @@ -8810,7 +8872,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -9025,7 +9087,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -9673,7 +9735,7 @@ }, "State":{ "shape":"ByoipCidrState", - "documentation":"

The state of the address pool.

", + "documentation":"

The state of the address range.

  • advertised: The address range is being advertised to the internet by Amazon Web Services.

  • deprovisioned: The address range is deprovisioned.

  • failed-deprovision: The request to deprovision the address range was unsuccessful. Ensure that all EIPs from the range have been deallocated and try again.

  • failed-provision: The request to provision the address range was unsuccessful.

  • pending-deprovision: You’ve submitted a request to deprovision an address range and it's pending.

  • pending-provision: You’ve submitted a request to provision an address range and it's pending.

  • provisioned: The address range is provisioned and can be advertised. The range is not currently advertised.

  • provisioned-not-publicly-advertisable: The address range is provisioned and cannot be advertised.

", "locationName":"state" }, "NetworkBorderGroup":{ @@ -10329,7 +10391,7 @@ }, "TotalTargetCapacity":{ "shape":"Integer", - "documentation":"

The total number of capacity units for which the Capacity Reservation Fleet reserves capacity. For more information, see Total target capacity in the Amazon EC2 User Guide.

", + "documentation":"

The total number of capacity units for which the Capacity Reservation Fleet reserves capacity. For more information, see Total target capacity in the Amazon EC2 User Guide.

", "locationName":"totalTargetCapacity" }, "TotalFulfilledCapacity":{ @@ -10359,7 +10421,7 @@ }, "AllocationStrategy":{ "shape":"String", - "documentation":"

The strategy used by the Capacity Reservation Fleet to determine which of the specified instance types to use. For more information, see For more information, see Allocation strategy in the Amazon EC2 User Guide.

", + "documentation":"

The strategy used by the Capacity Reservation Fleet to determine which of the specified instance types to use. For more information, see For more information, see Allocation strategy in the Amazon EC2 User Guide.

", "locationName":"allocationStrategy" }, "InstanceTypeSpecifications":{ @@ -10704,7 +10766,7 @@ "documentation":"

The signed authorization message for the prefix and account.

" } }, - "documentation":"

Provides authorization for Amazon to bring a specific IP address range to a specific Amazon Web Services account using bring your own IP addresses (BYOIP). For more information, see Configuring your BYOIP address range in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Provides authorization for Amazon to bring a specific IP address range to a specific Amazon Web Services account using bring your own IP addresses (BYOIP). For more information, see Configuring your BYOIP address range in the Amazon EC2 User Guide.

" }, "CidrBlock":{ "type":"structure", @@ -11696,7 +11758,7 @@ "locationName":"udpTimeout" } }, - "documentation":"

A security group connection tracking configuration that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

A security group connection tracking configuration that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

" }, "ConnectionTrackingSpecification":{ "type":"structure", @@ -11717,7 +11779,7 @@ "locationName":"udpStreamTimeout" } }, - "documentation":"

A security group connection tracking specification that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

A security group connection tracking specification that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

" }, "ConnectionTrackingSpecificationRequest":{ "type":"structure", @@ -11735,7 +11797,7 @@ "documentation":"

Timeout (in seconds) for idle UDP flows that have seen traffic only in a single direction or a single request-response transaction. Min: 30 seconds. Max: 60 seconds. Default: 30 seconds.

" } }, - "documentation":"

A security group connection tracking specification request that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

A security group connection tracking specification request that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

" }, "ConnectionTrackingSpecificationResponse":{ "type":"structure", @@ -11756,7 +11818,7 @@ "locationName":"udpTimeout" } }, - "documentation":"

A security group connection tracking specification response that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

A security group connection tracking specification response that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

" }, "ConnectivityType":{ "type":"string", @@ -11862,7 +11924,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

" } } }, @@ -11973,12 +12035,12 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the KMS key using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

", + "documentation":"

The identifier of the KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the KMS key using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

", "locationName":"kmsKeyId" }, "PresignedUrl":{ "shape":"CopySnapshotRequestPSU", - "documentation":"

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query requests.

The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using Amazon Web Services Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

", + "documentation":"

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query requests.

The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using Amazon Web Services Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) in the Amazon S3 API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

", "locationName":"presignedUrl" }, "SourceRegion":{ @@ -12088,6 +12150,57 @@ }, "documentation":"

The CPU options for the instance. Both the core count and threads per core must be specified in the request.

" }, + "CreateCapacityReservationBySplittingRequest":{ + "type":"structure", + "required":[ + "SourceCapacityReservationId", + "InstanceCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.

", + "idempotencyToken":true + }, + "SourceCapacityReservationId":{ + "shape":"CapacityReservationId", + "documentation":"

The ID of the Capacity Reservation from which you want to split the available capacity.

" + }, + "InstanceCount":{ + "shape":"Integer", + "documentation":"

The number of instances to split from the source Capacity Reservation.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the new Capacity Reservation.

", + "locationName":"TagSpecification" + } + } + }, + "CreateCapacityReservationBySplittingResult":{ + "type":"structure", + "members":{ + "SourceCapacityReservation":{ + "shape":"CapacityReservation", + "documentation":"

Information about the source Capacity Reservation.

", + "locationName":"sourceCapacityReservation" + }, + "DestinationCapacityReservation":{ + "shape":"CapacityReservation", + "documentation":"

Information about the destination Capacity Reservation.

", + "locationName":"destinationCapacityReservation" + }, + "InstanceCount":{ + "shape":"Integer", + "documentation":"

The number of instances in the new Capacity Reservation. The number of instances in the source Capacity Reservation was reduced by this amount.

", + "locationName":"instanceCount" + } + } + }, "CreateCapacityReservationFleetRequest":{ "type":"structure", "required":[ @@ -12097,7 +12210,7 @@ "members":{ "AllocationStrategy":{ "shape":"String", - "documentation":"

The strategy used by the Capacity Reservation Fleet to determine which of the specified instance types to use. Currently, only the prioritized allocation strategy is supported. For more information, see Allocation strategy in the Amazon EC2 User Guide.

Valid values: prioritized

" + "documentation":"

The strategy used by the Capacity Reservation Fleet to determine which of the specified instance types to use. Currently, only the prioritized allocation strategy is supported. For more information, see Allocation strategy in the Amazon EC2 User Guide.

Valid values: prioritized

" }, "ClientToken":{ "shape":"String", @@ -12115,7 +12228,7 @@ }, "TotalTargetCapacity":{ "shape":"Integer", - "documentation":"

The total number of capacity units to be reserved by the Capacity Reservation Fleet. This value, together with the instance type weights that you assign to each instance type used by the Fleet determine the number of instances for which the Fleet reserves capacity. Both values are based on units that make sense for your workload. For more information, see Total target capacity in the Amazon EC2 User Guide.

" + "documentation":"

The total number of capacity units to be reserved by the Capacity Reservation Fleet. This value, together with the instance type weights that you assign to each instance type used by the Fleet determine the number of instances for which the Fleet reserves capacity. Both values are based on units that make sense for your workload. For more information, see Total target capacity in the Amazon EC2 User Guide.

" }, "EndDate":{ "shape":"MillisecondDateTime", @@ -12299,7 +12412,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true } } @@ -12366,7 +12479,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "TagSpecifications":{ @@ -12447,7 +12560,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -12532,7 +12645,7 @@ "members":{ "BgpAsn":{ "shape":"Integer", - "documentation":"

For devices that support BGP, the customer gateway's BGP ASN.

Default: 65000

" + "documentation":"

For customer gateway devices that support BGP, specify the device's ASN. You must specify either BgpAsn or BgpAsnExtended when creating the customer gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended.

Default: 65000

Valid values: 1 to 2,147,483,647

" }, "PublicIp":{ "shape":"String", @@ -12557,12 +12670,16 @@ }, "IpAddress":{ "shape":"String", - "documentation":"

IPv4 address for the customer gateway device's outside interface. The address must be static.

" + "documentation":"

IPv4 address for the customer gateway device's outside interface. The address must be static. If OutsideIpAddressType in your VPN connection options is set to PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. If OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address.

" }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" + }, + "BgpAsnExtended":{ + "shape":"Long", + "documentation":"

For customer gateway devices that support BGP, specify the device's ASN. You must specify either BgpAsn or BgpAsnExtended when creating the customer gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended.

Valid values: 2,147,483,648 to 4,294,967,295

" } }, "documentation":"

Contains the parameters for CreateCustomerGateway.

" @@ -12662,7 +12779,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

" }, "DryRun":{ "shape":"Boolean", @@ -12864,7 +12981,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

" }, "DeliverLogsPermissionArn":{ "shape":"String", @@ -12910,7 +13027,7 @@ }, "MaxAggregationInterval":{ "shape":"Integer", - "documentation":"

The maximum interval of time during which a flow of packets is captured and aggregated into a flow log record. The possible values are 60 seconds (1 minute) or 600 seconds (10 minutes). This parameter must be 60 seconds for transit gateway resource types.

When a network interface is attached to a Nitro-based instance, the aggregation interval is always 60 seconds or less, regardless of the value that you specify.

Default: 600

" + "documentation":"

The maximum interval of time during which a flow of packets is captured and aggregated into a flow log record. The possible values are 60 seconds (1 minute) or 600 seconds (10 minutes). This parameter must be 60 seconds for transit gateway resource types.

When a network interface is attached to a Nitro-based instance, the aggregation interval is always 60 seconds or less, regardless of the value that you specify.

Default: 600

" }, "DestinationOptions":{ "shape":"DestinationOptionsRequest", @@ -12964,7 +13081,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" }, "TagSpecifications":{ "shape":"TagSpecificationList", @@ -13061,7 +13178,7 @@ }, "PreserveClientIp":{ "shape":"Boolean", - "documentation":"

Indicates whether your client's IP address is preserved as the source. The value is true or false.

  • If true, your client's IP address is used when you connect to a resource.

  • If false, the elastic network interface IP address is used when you connect to a resource.

Default: true

" + "documentation":"

Indicates whether the client IP address is preserved as the source. The following are the possible values.

  • true - Use the client IP address as the source.

  • false - Use the network interface IP address as the source.

Default: false

" }, "ClientToken":{ "shape":"String", @@ -13197,6 +13314,40 @@ } } }, + "CreateIpamExternalResourceVerificationTokenRequest":{ + "type":"structure", + "required":["IpamId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "IpamId":{ + "shape":"IpamId", + "documentation":"

The ID of the IPAM that will create the token.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Token tags.

", + "locationName":"TagSpecification" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", + "idempotencyToken":true + } + } + }, + "CreateIpamExternalResourceVerificationTokenResult":{ + "type":"structure", + "members":{ + "IpamExternalResourceVerificationToken":{ + "shape":"IpamExternalResourceVerificationToken", + "documentation":"

The verification token.

", + "locationName":"ipamExternalResourceVerificationToken" + } + } + }, "CreateIpamPoolRequest":{ "type":"structure", "required":[ @@ -13214,7 +13365,7 @@ }, "Locale":{ "shape":"String", - "documentation":"

In IPAM, the locale is the Amazon Web Services Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC’s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool.

Possible values: Any Amazon Web Services Region, such as us-east-1.

" + "documentation":"

The locale for the pool should be one of the following:

  • An Amazon Web Services Region where you want this IPAM pool to be available for allocations.

  • The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope.

If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool.

Possible values: Any Amazon Web Services Region or supported Amazon Web Services Local Zone.

" }, "SourceIpamPoolId":{ "shape":"IpamPoolId", @@ -13260,7 +13411,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "AwsService":{ @@ -13310,12 +13461,16 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "Tier":{ "shape":"IpamTier", "documentation":"

IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab.

" + }, + "EnablePrivateGua":{ + "shape":"Boolean", + "documentation":"

Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default.

" } } }, @@ -13390,7 +13545,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true } } @@ -13516,7 +13671,7 @@ }, "ResolveAlias":{ "shape":"Boolean", - "documentation":"

If true, and if a Systems Manager parameter is specified for ImageId, the AMI ID is displayed in the response for imageID. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide.

Default: false

" + "documentation":"

If true, and if a Systems Manager parameter is specified for ImageId, the AMI ID is displayed in the response for imageID. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

Default: false

" } } }, @@ -13716,7 +13871,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraints: Up to 255 UTF-8 characters in length.

", + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

Constraints: Up to 255 UTF-8 characters in length.

", "idempotencyToken":true } } @@ -13741,7 +13896,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

Constraint: Maximum 64 ASCII characters.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

Constraint: Maximum 64 ASCII characters.

", "idempotencyToken":true }, "DryRun":{ @@ -13879,7 +14034,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true } } @@ -13915,7 +14070,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true }, "TagSpecifications":{ @@ -13987,7 +14142,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true }, "FilterAtSource":{ @@ -14027,7 +14182,7 @@ }, "AwsService":{ "shape":"String", - "documentation":"

The Amazon Web Service. Currently not supported.

" + "documentation":"

The Amazon Web Services service. Currently not supported.

" }, "Permission":{ "shape":"InterfacePermissionType", @@ -14129,7 +14284,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "EnablePrimaryIpv6":{ @@ -14211,6 +14366,10 @@ "shape":"TagSpecificationList", "documentation":"

The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

", "locationName":"TagSpecification" + }, + "NetworkBorderGroup":{ + "shape":"String", + "documentation":"

The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide.

" } } }, @@ -14238,7 +14397,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency. For more information, see Ensuring idempotency.

", + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -14464,7 +14623,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true } } @@ -14725,7 +14884,7 @@ }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The Availability Zone or Local Zone for the subnet.

Default: Amazon Web Services selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet.

To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about the Regions that support Local Zones, see Local Zones locations.

To create a subnet in an Outpost, set this value to the Availability Zone for the Outpost and specify the Outpost ARN.

" + "documentation":"

The Availability Zone or Local Zone for the subnet.

Default: Amazon Web Services selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet.

To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about the Regions that support Local Zones, see Available Local Zones.

To create a subnet in an Outpost, set this value to the Availability Zone for the Outpost and specify the Outpost ARN.

" }, "AvailabilityZoneId":{ "shape":"String", @@ -14826,7 +14985,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true } } @@ -14841,7 +15000,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "locationName":"clientToken" } } @@ -14903,8 +15062,13 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Traffic Mirroring tags specifications.

", + "locationName":"TagSpecification" } } }, @@ -14918,7 +15082,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "locationName":"clientToken" } } @@ -14954,7 +15118,7 @@ }, "VirtualNetworkId":{ "shape":"Integer", - "documentation":"

The VXLAN ID for the Traffic Mirror session. For more information about the VXLAN protocol, see RFC 7348. If you do not specify a VirtualNetworkId, an account-wide unique id is chosen at random.

" + "documentation":"

The VXLAN ID for the Traffic Mirror session. For more information about the VXLAN protocol, see RFC 7348. If you do not specify a VirtualNetworkId, an account-wide unique ID is chosen at random.

" }, "Description":{ "shape":"String", @@ -14971,7 +15135,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true } } @@ -14986,7 +15150,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "locationName":"clientToken" } } @@ -15017,7 +15181,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true }, "GatewayLoadBalancerEndpointId":{ @@ -15036,7 +15200,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "locationName":"clientToken" } } @@ -15620,7 +15784,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -15673,7 +15837,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -15710,7 +15874,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -15823,7 +15987,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -15898,15 +16062,15 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type:

  • gp3: 3,000 - 16,000 IOPS

  • io1: 100 - 64,000 IOPS

  • io2: 100 - 256,000 IOPS

For io2 volumes, you can achieve up to 256,000 IOPS on instances built on the Nitro System. On other instances, you can achieve performance up to 32,000 IOPS.

This parameter is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard volumes.

" + "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type:

  • gp3: 3,000 - 16,000 IOPS

  • io1: 100 - 64,000 IOPS

  • io2: 100 - 256,000 IOPS

For io2 volumes, you can achieve up to 256,000 IOPS on instances built on the Nitro System. On other instances, you can achieve performance up to 32,000 IOPS.

This parameter is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard volumes.

" }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the KMS key using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

" + "documentation":"

The identifier of the KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the KMS key using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

" }, "OutpostArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the Outpost.

" + "documentation":"

The Amazon Resource Name (ARN) of the Outpost on which to create the volume.

If you intend to use a volume with an instance running on an outpost, then you must create the volume on the same outpost as the instance. You can't use a volume created in an Amazon Web Services Region with an instance on an Amazon Web Services outpost, or the other way around.

" }, "Size":{ "shape":"Integer", @@ -15932,7 +16096,7 @@ }, "MultiAttachEnabled":{ "shape":"Boolean", - "documentation":"

Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the volume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is supported with io1 and io2 volumes only. For more information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide.

" + "documentation":"

Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the volume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is supported with io1 and io2 volumes only. For more information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide.

" }, "Throughput":{ "shape":"Integer", @@ -15940,7 +16104,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.

", "idempotencyToken":true } } @@ -15974,7 +16138,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

" } } }, @@ -16045,7 +16209,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

" }, "PrivateDnsEnabled":{ "shape":"Boolean", @@ -16110,7 +16274,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

" }, "TagSpecifications":{ "shape":"TagSpecificationList", @@ -16388,7 +16552,7 @@ "members":{ "BgpAsn":{ "shape":"String", - "documentation":"

The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

", + "documentation":"

The customer gateway device's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

Valid values: 1 to 2,147,483,647

", "locationName":"bgpAsn" }, "CustomerGatewayId":{ @@ -16398,7 +16562,7 @@ }, "IpAddress":{ "shape":"String", - "documentation":"

The IP address of the customer gateway device's outside interface.

", + "documentation":"

IPv4 address for the customer gateway device's outside interface. The address must be static. If OutsideIpAddressType in your VPN connection options is set to PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. If OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address.

", "locationName":"ipAddress" }, "CertificateArn":{ @@ -16425,6 +16589,11 @@ "shape":"TagList", "documentation":"

Any tags assigned to the customer gateway.

", "locationName":"tagSet" + }, + "BgpAsnExtended":{ + "shape":"String", + "documentation":"

The customer gateway device's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

Valid values: 2,147,483,648 to 4,294,967,295

", + "locationName":"bgpAsnExtended" } }, "documentation":"

Describes a customer gateway.

" @@ -17016,6 +17185,30 @@ } } }, + "DeleteIpamExternalResourceVerificationTokenRequest":{ + "type":"structure", + "required":["IpamExternalResourceVerificationTokenId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "IpamExternalResourceVerificationTokenId":{ + "shape":"IpamExternalResourceVerificationTokenId", + "documentation":"

The token ID.

" + } + } + }, + "DeleteIpamExternalResourceVerificationTokenResult":{ + "type":"structure", + "members":{ + "IpamExternalResourceVerificationToken":{ + "shape":"IpamExternalResourceVerificationToken", + "documentation":"

The verification token.

", + "locationName":"ipamExternalResourceVerificationToken" + } + } + }, "DeleteIpamPoolRequest":{ "type":"structure", "required":["IpamPoolId"], @@ -17647,6 +17840,10 @@ "PoolId":{ "shape":"Ipv4PoolEc2Id", "documentation":"

The ID of the public IPv4 pool you want to delete.

" + }, + "NetworkBorderGroup":{ + "shape":"String", + "documentation":"

The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide.

" } } }, @@ -18257,7 +18454,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -18286,7 +18483,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -18319,7 +18516,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true } } @@ -18348,7 +18545,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true } } @@ -19701,7 +19898,7 @@ "members":{ "DhcpOptionsIds":{ "shape":"DhcpOptionsIdStringList", - "documentation":"

The IDs of one or more DHCP options sets.

Default: Describes all your DHCP options sets.

", + "documentation":"

The IDs of DHCP option sets.

", "locationName":"DhcpOptionsId" }, "Filters":{ @@ -19729,7 +19926,7 @@ "members":{ "DhcpOptions":{ "shape":"DhcpOptionsList", - "documentation":"

Information about one or more DHCP options sets.

", + "documentation":"

Information about the DHCP options sets.

", "locationName":"dhcpOptionsSet" }, "NextToken":{ @@ -21222,7 +21419,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

  • architecture - The instance architecture (i386 | x86_64 | arm64).

  • availability-zone - The Availability Zone of the instance.

  • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z.

  • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

  • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

  • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

  • block-device-mapping.volume-id - The volume ID of the EBS volume.

  • boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred).

  • capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched.

  • capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none).

  • capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation.

  • capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group.

  • client-token - The idempotency token you provided when you launched the instance.

  • current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi).

  • dns-name - The public DNS name of the instance.

  • ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O.

  • ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA.

  • enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.

  • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

  • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

  • hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors.

  • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

  • iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID.

  • iam-instance-profile.name - The instance profile associated with the instance. Specified as an name.

  • image-id - The ID of the image used to launch the instance.

  • instance-id - The ID of the instance.

  • instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block).

  • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

  • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

  • instance-type - The type of instance (for example, t2.micro).

  • instance.group-id - The ID of the security group for the instance.

  • instance.group-name - The name of the security group for the instance.

  • ip-address - The public IPv4 address of the instance.

  • ipv6-address - The IPv6 address of the instance.

  • kernel-id - The kernel ID.

  • key-name - The name of the key pair used when the instance was launched.

  • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

  • launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day.

  • maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default).

  • metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled)

  • metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled).

  • metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled).

  • metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64)

  • metadata-options.http-tokens - The metadata request authorization state (optional | required)

  • metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled)

  • metadata-options.state - The state of the metadata option changes (pending | applied).

  • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

  • network-interface.addresses.association.allocation-id - The allocation ID.

  • network-interface.addresses.association.association-id - The association ID.

  • network-interface.addresses.association.carrier-ip - The carrier IP address.

  • network-interface.addresses.association.customer-owned-ip - The customer-owned IP address.

  • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

  • network-interface.addresses.association.public-dns-name - The public DNS name.

  • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

  • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

  • network-interface.addresses.private-dns-name - The private DNS name.

  • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

  • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • network-interface.association.carrier-ip - The customer-owned IP address.

  • network-interface.association.customer-owned-ip - The customer-owned IP address.

  • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • network-interface.association.public-dns-name - The public DNS name.

  • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

  • network-interface.attachment.attachment-id - The ID of the interface attachment.

  • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

  • network-interface.attachment.device-index - The device index to which the network interface is attached.

  • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

  • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • network-interface.attachment.network-card-index - The index of the network card.

  • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • network-interface.availability-zone - The Availability Zone for the network interface.

  • network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet.

  • network-interface.description - The description of the network interface.

  • network-interface.group-id - The ID of a security group associated with the network interface.

  • network-interface.group-name - The name of a security group associated with the network interface.

  • network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface.

  • network-interface.ipv6-address - The IPv6 address associated with the network interface.

  • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

  • network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address.

  • network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface.

  • network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface.

  • network-interface.mac-address - The MAC address of the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.outpost-arn - The ARN of the Outpost.

  • network-interface.owner-id - The ID of the owner of the network interface.

  • network-interface.private-dns-name - The private DNS name of the network interface.

  • network-interface.private-ip-address - The private IPv4 address.

  • network-interface.public-dns-name - The public DNS name.

  • network-interface.requester-id - The requester ID for the network interface.

  • network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services.

  • network-interface.status - The status of the network interface (available) | in-use).

  • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • network-interface.subnet-id - The ID of the subnet for the network interface.

  • network-interface.tag-key - The key of a tag assigned to the network interface.

  • network-interface.tag-value - The value of a tag assigned to the network interface.

  • network-interface.vpc-id - The ID of the VPC for the network interface.

  • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

  • owner-id - The Amazon Web Services account ID of the instance owner.

  • placement-group-name - The name of the placement group for the instance.

  • placement-partition-number - The partition in which the instance is located.

  • platform - The platform. To list only Windows instances, use windows.

  • platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web).

  • private-dns-name - The private IPv4 DNS name of the instance.

  • private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records.

  • private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.

  • private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name).

  • private-ip-address - The private IPv4 address of the instance.

  • product-code - The product code associated with the AMI used to launch the instance.

  • product-code.type - The type of product code (devpay | marketplace).

  • ramdisk-id - The RAM disk ID.

  • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

  • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

  • root-device-name - The device name of the root device volume (for example, /dev/sda1).

  • root-device-type - The type of the root device volume (ebs | instance-store).

  • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

  • spot-instance-request-id - The ID of the Spot Instance request.

  • state-reason-code - The reason code for the state change.

  • state-reason-message - A message that describes the state change.

  • subnet-id - The ID of the subnet for the instance.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

  • tenancy - The tenancy of an instance (dedicated | default | host).

  • tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0).

  • usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202).

  • usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z.

  • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

  • vpc-id - The ID of the VPC that the instance is running in.

", + "documentation":"

The filters.

  • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

  • architecture - The instance architecture (i386 | x86_64 | arm64).

  • availability-zone - The Availability Zone of the instance.

  • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z.

  • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

  • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

  • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

  • block-device-mapping.volume-id - The volume ID of the EBS volume.

  • boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred).

  • capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched.

  • capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none).

  • capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation.

  • capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group.

  • client-token - The idempotency token you provided when you launched the instance.

  • current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi).

  • dns-name - The public DNS name of the instance.

  • ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O.

  • ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA.

  • enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.

  • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

  • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

  • hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors.

  • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

  • iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID.

  • iam-instance-profile.name - The instance profile associated with the instance. Specified as an name.

  • image-id - The ID of the image used to launch the instance.

  • instance-id - The ID of the instance.

  • instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block).

  • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

  • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

  • instance-type - The type of instance (for example, t2.micro).

  • instance.group-id - The ID of the security group for the instance.

  • instance.group-name - The name of the security group for the instance.

  • ip-address - The public IPv4 address of the instance.

  • ipv6-address - The IPv6 address of the instance.

  • kernel-id - The kernel ID.

  • key-name - The name of the key pair used when the instance was launched.

  • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

  • launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day.

  • maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default).

  • metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled)

  • metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled).

  • metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled).

  • metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64)

  • metadata-options.http-tokens - The metadata request authorization state (optional | required)

  • metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled)

  • metadata-options.state - The state of the metadata option changes (pending | applied).

  • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

  • network-interface.addresses.association.allocation-id - The allocation ID.

  • network-interface.addresses.association.association-id - The association ID.

  • network-interface.addresses.association.carrier-ip - The carrier IP address.

  • network-interface.addresses.association.customer-owned-ip - The customer-owned IP address.

  • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

  • network-interface.addresses.association.public-dns-name - The public DNS name.

  • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

  • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

  • network-interface.addresses.private-dns-name - The private DNS name.

  • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

  • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • network-interface.association.carrier-ip - The customer-owned IP address.

  • network-interface.association.customer-owned-ip - The customer-owned IP address.

  • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • network-interface.association.public-dns-name - The public DNS name.

  • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

  • network-interface.attachment.attachment-id - The ID of the interface attachment.

  • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

  • network-interface.attachment.device-index - The device index to which the network interface is attached.

  • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

  • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • network-interface.attachment.network-card-index - The index of the network card.

  • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • network-interface.availability-zone - The Availability Zone for the network interface.

  • network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet.

  • network-interface.description - The description of the network interface.

  • network-interface.group-id - The ID of a security group associated with the network interface.

  • network-interface.group-name - The name of a security group associated with the network interface.

  • network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface.

  • network-interface.ipv6-address - The IPv6 address associated with the network interface.

  • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

  • network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address.

  • network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface.

  • network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface.

  • network-interface.mac-address - The MAC address of the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.outpost-arn - The ARN of the Outpost.

  • network-interface.owner-id - The ID of the owner of the network interface.

  • network-interface.private-dns-name - The private DNS name of the network interface.

  • network-interface.private-ip-address - The private IPv4 address.

  • network-interface.public-dns-name - The public DNS name.

  • network-interface.requester-id - The requester ID for the network interface.

  • network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services.

  • network-interface.status - The status of the network interface (available) | in-use).

  • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • network-interface.subnet-id - The ID of the subnet for the network interface.

  • network-interface.tag-key - The key of a tag assigned to the network interface.

  • network-interface.tag-value - The value of a tag assigned to the network interface.

  • network-interface.vpc-id - The ID of the VPC for the network interface.

  • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

  • owner-id - The Amazon Web Services account ID of the instance owner.

  • placement-group-name - The name of the placement group for the instance.

  • placement-partition-number - The partition in which the instance is located.

  • platform - The platform. To list only Windows instances, use windows.

  • platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web).

  • private-dns-name - The private IPv4 DNS name of the instance.

  • private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records.

  • private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.

  • private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name).

  • private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address.

  • product-code - The product code associated with the AMI used to launch the instance.

  • product-code.type - The type of product code (devpay | marketplace).

  • ramdisk-id - The RAM disk ID.

  • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

  • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

  • root-device-name - The device name of the root device volume (for example, /dev/sda1).

  • root-device-type - The type of the root device volume (ebs | instance-store).

  • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

  • spot-instance-request-id - The ID of the Spot Instance request.

  • state-reason-code - The reason code for the state change.

  • state-reason-message - A message that describes the state change.

  • subnet-id - The ID of the subnet for the instance.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

  • tenancy - The tenancy of an instance (dedicated | default | host).

  • tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0).

  • usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202).

  • usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z.

  • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

  • vpc-id - The ID of the VPC that the instance is running in.

", "locationName":"Filter" }, "InstanceIds":{ @@ -21300,7 +21497,7 @@ "members":{ "InternetGateways":{ "shape":"InternetGatewayList", - "documentation":"

Information about one or more internet gateways.

", + "documentation":"

Information about the internet gateways.

", "locationName":"internetGatewaySet" }, "NextToken":{ @@ -21347,6 +21544,48 @@ } } }, + "DescribeIpamExternalResourceVerificationTokensRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters for the request. For more information about filtering, see Filtering CLI output.

Available filters:

  • ipam-arn

  • ipam-external-resource-verification-token-arn

  • ipam-external-resource-verification-token-id

  • ipam-id

  • ipam-region

  • state

  • status

  • token-name

  • token-value

", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + }, + "MaxResults":{ + "shape":"IpamMaxResults", + "documentation":"

The maximum number of tokens to return in one page of results.

" + }, + "IpamExternalResourceVerificationTokenIds":{ + "shape":"ValueStringList", + "documentation":"

Verification token IDs.

", + "locationName":"IpamExternalResourceVerificationTokenId" + } + } + }, + "DescribeIpamExternalResourceVerificationTokensResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + }, + "IpamExternalResourceVerificationTokens":{ + "shape":"IpamExternalResourceVerificationTokenSet", + "documentation":"

Verification tokens.

", + "locationName":"ipamExternalResourceVerificationTokenSet" + } + } + }, "DescribeIpamPoolsRequest":{ "type":"structure", "members":{ @@ -21681,7 +21920,7 @@ }, "ResolveAlias":{ "shape":"Boolean", - "documentation":"

If true, and if a Systems Manager parameter is specified for ImageId, the AMI ID is displayed in the response for imageId.

If false, and if a Systems Manager parameter is specified for ImageId, the parameter is displayed in the response for imageId.

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide.

Default: false

" + "documentation":"

If true, and if a Systems Manager parameter is specified for ImageId, the AMI ID is displayed in the response for imageId.

If false, and if a Systems Manager parameter is specified for ImageId, the parameter is displayed in the response for imageId.

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

Default: false

" } } }, @@ -22252,7 +22491,7 @@ }, "NetworkAclIds":{ "shape":"NetworkAclIdStringList", - "documentation":"

The IDs of the network ACLs.

Default: Describes all your network ACLs.

", + "documentation":"

The IDs of the network ACLs.

", "locationName":"NetworkAclId" }, "NextToken":{ @@ -22270,7 +22509,7 @@ "members":{ "NetworkAcls":{ "shape":"NetworkAclList", - "documentation":"

Information about one or more network ACLs.

", + "documentation":"

Information about the network ACLs.

", "locationName":"networkAclSet" }, "NextToken":{ @@ -22545,7 +22784,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • network-interface-permission.network-interface-permission-id - The ID of the permission.

  • network-interface-permission.network-interface-id - The ID of the network interface.

  • network-interface-permission.aws-account-id - The Amazon Web Services account ID.

  • network-interface-permission.aws-service - The Amazon Web Service.

  • network-interface-permission.permission - The type of permission (INSTANCE-ATTACH | EIP-ASSOCIATE).

", + "documentation":"

One or more filters.

  • network-interface-permission.network-interface-permission-id - The ID of the permission.

  • network-interface-permission.network-interface-id - The ID of the network interface.

  • network-interface-permission.aws-account-id - The Amazon Web Services account ID.

  • network-interface-permission.aws-service - The Amazon Web Services service.

  • network-interface-permission.permission - The type of permission (INSTANCE-ATTACH | EIP-ASSOCIATE).

", "locationName":"Filter" }, "NextToken":{ @@ -22585,7 +22824,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

  • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

  • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

  • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

  • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • association.public-dns-name - The public DNS name for the network interface (IPv4).

  • attachment.attach-time - The time that the network interface was attached to an instance.

  • attachment.attachment-id - The ID of the interface attachment.

  • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

  • attachment.device-index - The device index to which the network interface is attached.

  • attachment.instance-id - The ID of the instance to which the network interface is attached.

  • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • availability-zone - The Availability Zone of the network interface.

  • description - The description of the network interface.

  • group-id - The ID of a security group associated with the network interface.

  • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

  • interface-type - The type of network interface (api_gateway_managed | aws_codestar_connections_managed | branch | ec2_instance_connect_endpoint | efa | efs | gateway_load_balancer | gateway_load_balancer_endpoint | global_accelerator_managed | interface | iot_rules_managed | lambda | load_balancer | nat_gateway | network_load_balancer | quicksight | transit_gateway | trunk | vpc_endpoint).

  • mac-address - The MAC address of the network interface.

  • network-interface-id - The ID of the network interface.

  • owner-id - The Amazon Web Services account ID of the network interface owner.

  • private-dns-name - The private DNS name of the network interface (IPv4).

  • private-ip-address - The private IPv4 address or addresses of the network interface.

  • requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

  • requester-managed - Indicates whether the network interface is being managed by an Amazon Web Service (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

  • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

  • subnet-id - The ID of the subnet for the network interface.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network interface.

", + "documentation":"

One or more filters.

  • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

  • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

  • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

  • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

  • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • association.public-dns-name - The public DNS name for the network interface (IPv4).

  • attachment.attach-time - The time that the network interface was attached to an instance.

  • attachment.attachment-id - The ID of the interface attachment.

  • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

  • attachment.device-index - The device index to which the network interface is attached.

  • attachment.instance-id - The ID of the instance to which the network interface is attached.

  • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • availability-zone - The Availability Zone of the network interface.

  • description - The description of the network interface.

  • group-id - The ID of a security group associated with the network interface.

  • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

  • interface-type - The type of network interface (api_gateway_managed | aws_codestar_connections_managed | branch | ec2_instance_connect_endpoint | efa | efs | gateway_load_balancer | gateway_load_balancer_endpoint | global_accelerator_managed | interface | iot_rules_managed | lambda | load_balancer | nat_gateway | network_load_balancer | quicksight | transit_gateway | trunk | vpc_endpoint).

  • mac-address - The MAC address of the network interface.

  • network-interface-id - The ID of the network interface.

  • owner-id - The Amazon Web Services account ID of the network interface owner.

  • private-dns-name - The private DNS name of the network interface (IPv4).

  • private-ip-address - The private IPv4 address or addresses of the network interface.

  • requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

  • requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services service (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

  • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

  • subnet-id - The ID of the subnet for the network interface.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network interface.

", "locationName":"filter" }, "DryRun":{ @@ -22639,7 +22878,7 @@ }, "GroupNames":{ "shape":"PlacementGroupStringList", - "documentation":"

The names of the placement groups.

Default: Describes all your placement groups, or only those otherwise specified.

", + "documentation":"

The names of the placement groups.

Constraints:

  • You can specify a name only if the placement group is owned by your account.

  • If a placement group is shared with your account, specifying the name results in an error. You must use the GroupId parameter instead.

", "locationName":"groupName" }, "GroupIds":{ @@ -22949,7 +23188,7 @@ }, "InstanceType":{ "shape":"InstanceType", - "documentation":"

The instance type that the reservation will cover (for example, m1.small). For more information, see Instance types in the Amazon EC2 User Guide.

" + "documentation":"

The instance type that the reservation will cover (for example, m1.small). For more information, see Amazon EC2 instance types in the Amazon EC2 User Guide.

" }, "MaxDuration":{ "shape":"Long", @@ -23071,7 +23310,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • association.gateway-id - The ID of the gateway involved in the association.

  • association.route-table-association-id - The ID of an association ID for the route table.

  • association.route-table-id - The ID of the route table involved in the association.

  • association.subnet-id - The ID of the subnet involved in the association.

  • association.main - Indicates whether the route table is the main route table for the VPC (true | false). Route tables that do not have an association ID are not returned in the response.

  • owner-id - The ID of the Amazon Web Services account that owns the route table.

  • route-table-id - The ID of the route table.

  • route.destination-cidr-block - The IPv4 CIDR range specified in a route in the table.

  • route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route table.

  • route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Service specified in a route in the table.

  • route.egress-only-internet-gateway-id - The ID of an egress-only Internet gateway specified in a route in the route table.

  • route.gateway-id - The ID of a gateway specified in a route in the table.

  • route.instance-id - The ID of an instance specified in a route in the table.

  • route.nat-gateway-id - The ID of a NAT gateway.

  • route.transit-gateway-id - The ID of a transit gateway.

  • route.origin - Describes how the route was created. CreateRouteTable indicates that the route was automatically created when the route table was created; CreateRoute indicates that the route was manually added to the route table; EnableVgwRoutePropagation indicates that the route was propagated by route propagation.

  • route.state - The state of a route in the route table (active | blackhole). The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, the specified NAT instance has been terminated, and so on).

  • route.vpc-peering-connection-id - The ID of a VPC peering connection specified in a route in the table.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the route table.

", + "documentation":"

The filters.

  • association.gateway-id - The ID of the gateway involved in the association.

  • association.route-table-association-id - The ID of an association ID for the route table.

  • association.route-table-id - The ID of the route table involved in the association.

  • association.subnet-id - The ID of the subnet involved in the association.

  • association.main - Indicates whether the route table is the main route table for the VPC (true | false). Route tables that do not have an association ID are not returned in the response.

  • owner-id - The ID of the Amazon Web Services account that owns the route table.

  • route-table-id - The ID of the route table.

  • route.destination-cidr-block - The IPv4 CIDR range specified in a route in the table.

  • route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route table.

  • route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Services service specified in a route in the table.

  • route.egress-only-internet-gateway-id - The ID of an egress-only Internet gateway specified in a route in the route table.

  • route.gateway-id - The ID of a gateway specified in a route in the table.

  • route.instance-id - The ID of an instance specified in a route in the table.

  • route.nat-gateway-id - The ID of a NAT gateway.

  • route.transit-gateway-id - The ID of a transit gateway.

  • route.origin - Describes how the route was created. CreateRouteTable indicates that the route was automatically created when the route table was created; CreateRoute indicates that the route was manually added to the route table; EnableVgwRoutePropagation indicates that the route was propagated by route propagation.

  • route.state - The state of a route in the route table (active | blackhole). The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, the specified NAT instance has been terminated, and so on).

  • route.vpc-peering-connection-id - The ID of a VPC peering connection specified in a route in the table.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the route table.

", "locationName":"Filter" }, "DryRun":{ @@ -23081,7 +23320,7 @@ }, "RouteTableIds":{ "shape":"RouteTableIdStringList", - "documentation":"

The IDs of the route tables.

Default: Describes all your route tables.

", + "documentation":"

The IDs of the route tables.

", "locationName":"RouteTableId" }, "NextToken":{ @@ -23099,7 +23338,7 @@ "members":{ "RouteTables":{ "shape":"RouteTableList", - "documentation":"

Information about one or more route tables.

", + "documentation":"

Information about the route tables.

", "locationName":"routeTableSet" }, "NextToken":{ @@ -23436,7 +23675,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of snapshots to return for this request. This value can be between 5 and 1,000; if this value is larger than 1,000, only 1,000 results are returned. If this parameter is not used, then the request returns all snapshots. You cannot specify this parameter and the snapshot IDs parameter in the same request. For more information, see Pagination.

" + "documentation":"

The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.

" }, "NextToken":{ "shape":"String", @@ -23474,7 +23713,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The token to include in another request to return the next page of snapshots. This value is null when there are no more snapshots to return.

", + "documentation":"

The token to include in another request to get the next page of items. This value is null when there are no more items to return.

", "locationName":"nextToken" } } @@ -23677,7 +23916,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • availability-zone-group - The Availability Zone group.

  • create-time - The time stamp when the Spot Instance request was created.

  • fault-code - The fault code related to the request.

  • fault-message - The fault message related to the request.

  • instance-id - The ID of the instance that fulfilled the request.

  • launch-group - The Spot Instance launch group.

  • launch.block-device-mapping.delete-on-termination - Indicates whether the EBS volume is deleted on instance termination.

  • launch.block-device-mapping.device-name - The device name for the volume in the block device mapping (for example, /dev/sdh or xvdh).

  • launch.block-device-mapping.snapshot-id - The ID of the snapshot for the EBS volume.

  • launch.block-device-mapping.volume-size - The size of the EBS volume, in GiB.

  • launch.block-device-mapping.volume-type - The type of EBS volume: gp2 or gp3 for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic.

  • launch.group-id - The ID of the security group for the instance.

  • launch.group-name - The name of the security group for the instance.

  • launch.image-id - The ID of the AMI.

  • launch.instance-type - The type of instance (for example, m3.medium).

  • launch.kernel-id - The kernel ID.

  • launch.key-name - The name of the key pair the instance launched with.

  • launch.monitoring-enabled - Whether detailed monitoring is enabled for the Spot Instance.

  • launch.ramdisk-id - The RAM disk ID.

  • launched-availability-zone - The Availability Zone in which the request is launched.

  • network-interface.addresses.primary - Indicates whether the IP address is the primary private IP address.

  • network-interface.delete-on-termination - Indicates whether the network interface is deleted when the instance is terminated.

  • network-interface.description - A description of the network interface.

  • network-interface.device-index - The index of the device for the network interface attachment on the instance.

  • network-interface.group-id - The ID of the security group associated with the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.private-ip-address - The primary private IP address of the network interface.

  • network-interface.subnet-id - The ID of the subnet for the instance.

  • product-description - The product description associated with the instance (Linux/UNIX | Windows).

  • spot-instance-request-id - The Spot Instance request ID.

  • spot-price - The maximum hourly price for any Spot Instance launched to fulfill the request.

  • state - The state of the Spot Instance request (open | active | closed | cancelled | failed). Spot request status information can help you track your Amazon EC2 Spot Instance requests. For more information, see Spot request status in the Amazon EC2 User Guide for Linux Instances.

  • status-code - The short code describing the most recent evaluation of your Spot Instance request.

  • status-message - The message explaining the status of the Spot Instance request.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • type - The type of Spot Instance request (one-time | persistent).

  • valid-from - The start date of the request.

  • valid-until - The end date of the request.

", + "documentation":"

The filters.

  • availability-zone-group - The Availability Zone group.

  • create-time - The time stamp when the Spot Instance request was created.

  • fault-code - The fault code related to the request.

  • fault-message - The fault message related to the request.

  • instance-id - The ID of the instance that fulfilled the request.

  • launch-group - The Spot Instance launch group.

  • launch.block-device-mapping.delete-on-termination - Indicates whether the EBS volume is deleted on instance termination.

  • launch.block-device-mapping.device-name - The device name for the volume in the block device mapping (for example, /dev/sdh or xvdh).

  • launch.block-device-mapping.snapshot-id - The ID of the snapshot for the EBS volume.

  • launch.block-device-mapping.volume-size - The size of the EBS volume, in GiB.

  • launch.block-device-mapping.volume-type - The type of EBS volume: gp2 or gp3 for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic.

  • launch.group-id - The ID of the security group for the instance.

  • launch.group-name - The name of the security group for the instance.

  • launch.image-id - The ID of the AMI.

  • launch.instance-type - The type of instance (for example, m3.medium).

  • launch.kernel-id - The kernel ID.

  • launch.key-name - The name of the key pair the instance launched with.

  • launch.monitoring-enabled - Whether detailed monitoring is enabled for the Spot Instance.

  • launch.ramdisk-id - The RAM disk ID.

  • launched-availability-zone - The Availability Zone in which the request is launched.

  • network-interface.addresses.primary - Indicates whether the IP address is the primary private IP address.

  • network-interface.delete-on-termination - Indicates whether the network interface is deleted when the instance is terminated.

  • network-interface.description - A description of the network interface.

  • network-interface.device-index - The index of the device for the network interface attachment on the instance.

  • network-interface.group-id - The ID of the security group associated with the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.private-ip-address - The primary private IP address of the network interface.

  • network-interface.subnet-id - The ID of the subnet for the instance.

  • product-description - The product description associated with the instance (Linux/UNIX | Windows).

  • spot-instance-request-id - The Spot Instance request ID.

  • spot-price - The maximum hourly price for any Spot Instance launched to fulfill the request.

  • state - The state of the Spot Instance request (open | active | closed | cancelled | failed). Spot request status information can help you track your Amazon EC2 Spot Instance requests. For more information, see Spot request status in the Amazon EC2 User Guide.

  • status-code - The short code describing the most recent evaluation of your Spot Instance request.

  • status-message - The message explaining the status of the Spot Instance request.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • type - The type of Spot Instance request (one-time | persistent).

  • valid-from - The start date of the request.

  • valid-until - The end date of the request.

", "locationName":"Filter" }, "DryRun":{ @@ -23916,7 +24155,7 @@ "members":{ "Subnets":{ "shape":"SubnetList", - "documentation":"

Information about one or more subnets.

", + "documentation":"

Information about the subnets.

", "locationName":"subnetSet" }, "NextToken":{ @@ -23966,6 +24205,52 @@ } } }, + "DescribeTrafficMirrorFilterRulesRequest":{ + "type":"structure", + "members":{ + "TrafficMirrorFilterRuleIds":{ + "shape":"TrafficMirrorFilterRuleIdList", + "documentation":"

Traffic filter rule IDs.

", + "locationName":"TrafficMirrorFilterRuleId" + }, + "TrafficMirrorFilterId":{ + "shape":"TrafficMirrorFilterId", + "documentation":"

Traffic filter ID.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Traffic mirror filters.

  • traffic-mirror-filter-rule-id: The ID of the Traffic Mirror rule.

  • traffic-mirror-filter-id: The ID of the filter that this rule is associated with.

  • rule-number: The number of the Traffic Mirror rule.

  • rule-action: The action taken on the filtered traffic. Possible actions are accept and reject.

  • traffic-direction: The traffic direction. Possible directions are ingress and egress.

  • protocol: The protocol, for example UDP, assigned to the Traffic Mirror rule.

  • source-cidr-block: The source CIDR block assigned to the Traffic Mirror rule.

  • destination-cidr-block: The destination CIDR block assigned to the Traffic Mirror rule.

  • description: The description of the Traffic Mirror rule.

", + "locationName":"Filter" + }, + "MaxResults":{ + "shape":"TrafficMirroringMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + } + } + }, + "DescribeTrafficMirrorFilterRulesResult":{ + "type":"structure", + "members":{ + "TrafficMirrorFilterRules":{ + "shape":"TrafficMirrorFilterRuleSet", + "documentation":"

Traffic mirror rules.

", + "locationName":"trafficMirrorFilterRuleSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. The value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, "DescribeTrafficMirrorFiltersRequest":{ "type":"structure", "members":{ @@ -24848,7 +25133,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. This value can be between 5 and 1,000; if the value is larger than 1,000, only 1,000 results are returned. If this parameter is not used, then all items are returned. You cannot specify this parameter and the volume IDs parameter in the same request. For more information, see Pagination.

" + "documentation":"

The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.

" }, "NextToken":{ "shape":"String", @@ -24900,7 +25185,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The token returned by a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" + "documentation":"

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" }, "MaxResults":{ "shape":"Integer", @@ -24918,7 +25203,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The token to include in another request to get the next page of items. This value is null if there are no more items to return.

", + "documentation":"

The token to include in another request to get the next page of items. This value is null when there are no more items to return.

", "locationName":"nextToken" } } @@ -24933,7 +25218,7 @@ }, "VolumeIds":{ "shape":"VolumeIdStringList", - "documentation":"

The volume IDs.

", + "documentation":"

The volume IDs. If not specified, then all volumes are included in the response.

", "locationName":"VolumeId" }, "DryRun":{ @@ -24943,12 +25228,12 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of volumes to return for this request. This value can be between 5 and 500; if you specify a value larger than 500, only 500 items are returned. If this parameter is not used, then all items are returned. You cannot specify this parameter and the volume IDs parameter in the same request. For more information, see Pagination.

", + "documentation":"

The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.

", "locationName":"maxResults" }, "NextToken":{ "shape":"String", - "documentation":"

The token returned from a previous paginated request. Pagination continues from the end of the items returned from the previous request.

", + "documentation":"

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

", "locationName":"nextToken" } } @@ -25330,7 +25615,7 @@ "members":{ "VpcEndpoints":{ "shape":"VpcEndpointSet", - "documentation":"

Information about the endpoints.

", + "documentation":"

Information about the VPC endpoints.

", "locationName":"vpcEndpointSet" }, "NextToken":{ @@ -25403,7 +25688,7 @@ }, "VpcIds":{ "shape":"VpcIdStringList", - "documentation":"

The IDs of the VPCs.

Default: Describes all your VPCs.

", + "documentation":"

The IDs of the VPCs.

", "locationName":"VpcId" }, "DryRun":{ @@ -25426,7 +25711,7 @@ "members":{ "Vpcs":{ "shape":"VpcList", - "documentation":"

Information about one or more VPCs.

", + "documentation":"

Information about the VPCs.

", "locationName":"vpcSet" }, "NextToken":{ @@ -25643,7 +25928,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -26788,7 +27073,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -26807,7 +27092,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "locationName":"clientToken" } } @@ -27472,7 +27757,7 @@ "locationName":"elasticGpuAssociationTime" } }, - "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

Describes the association between an instance and an Elastic Graphics accelerator.

" + "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4, G5, or G6 instances.

Describes the association between an instance and an Elastic Graphics accelerator.

" }, "ElasticGpuAssociationList":{ "type":"list", @@ -27490,7 +27775,7 @@ "locationName":"status" } }, - "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

Describes the status of an Elastic Graphics accelerator.

" + "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4, G5, or G6 instances.

Describes the status of an Elastic Graphics accelerator.

" }, "ElasticGpuId":{"type":"string"}, "ElasticGpuIdSet":{ @@ -27513,10 +27798,10 @@ "members":{ "Type":{ "shape":"String", - "documentation":"

The type of Elastic Graphics accelerator. For more information about the values to specify for Type, see Elastic Graphics Basics, specifically the Elastic Graphics accelerator column, in the Amazon Elastic Compute Cloud User Guide for Windows Instances.

" + "documentation":"

The type of Elastic Graphics accelerator.

" } }, - "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

A specification for an Elastic Graphics accelerator.

" + "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4, G5, or G6 instances.

A specification for an Elastic Graphics accelerator.

" }, "ElasticGpuSpecificationList":{ "type":"list", @@ -27600,7 +27885,7 @@ "locationName":"tagSet" } }, - "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

Describes an Elastic Graphics accelerator.

" + "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4, G5, or G6 instances.

Describes an Elastic Graphics accelerator.

" }, "ElasticInferenceAccelerator":{ "type":"structure", @@ -29515,7 +29800,7 @@ }, "FulfilledCapacity":{ "shape":"Double", - "documentation":"

The number of capacity units fulfilled by the Capacity Reservation. For more information, see Total target capacity in the Amazon EC2 User Guide.

", + "documentation":"

The number of capacity units fulfilled by the Capacity Reservation. For more information, see Total target capacity in the Amazon EC2 User Guide.

", "locationName":"fulfilledCapacity" }, "EbsOptimized":{ @@ -29530,12 +29815,12 @@ }, "Weight":{ "shape":"DoubleWithConstraints", - "documentation":"

The weight of the instance type in the Capacity Reservation Fleet. For more information, see Instance type weight in the Amazon EC2 User Guide.

", + "documentation":"

The weight of the instance type in the Capacity Reservation Fleet. For more information, see Instance type weight in the Amazon EC2 User Guide.

", "locationName":"weight" }, "Priority":{ "shape":"IntegerWithConstraints", - "documentation":"

The priority of the instance type in the Capacity Reservation Fleet. For more information, see Instance type priority in the Amazon EC2 User Guide.

", + "documentation":"

The priority of the instance type in the Capacity Reservation Fleet. For more information, see Instance type priority in the Amazon EC2 User Guide.

", "locationName":"priority" } }, @@ -29762,7 +30047,7 @@ }, "WeightedCapacity":{ "shape":"Double", - "documentation":"

The number of units provided by the specified instance type.

When specifying weights, the price used in the lowest-price and price-capacity-optimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour.

", + "documentation":"

The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.

If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.

When specifying weights, the price used in the lowest-price and price-capacity-optimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour.

", "locationName":"weightedCapacity" }, "Priority":{ @@ -29782,7 +30067,7 @@ }, "ImageId":{ "shape":"ImageId", - "documentation":"

The ID of the AMI. An AMI is required to launch an instance. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.

", + "documentation":"

The ID of the AMI in the format ami-17characters00000.

Alternatively, you can specify a Systems Manager parameter, using one of the following formats. The Systems Manager parameter will resolve to an AMI ID on launch.

To reference a public parameter:

  • resolve:ssm:public-parameter

To reference a parameter stored in the same account:

  • resolve:ssm:parameter-name

  • resolve:ssm:parameter-name:version-number

  • resolve:ssm:parameter-name:label

To reference a parameter shared from another Amazon Web Services account:

  • resolve:ssm:parameter-ARN

  • resolve:ssm:parameter-ARN:version-number

  • resolve:ssm:parameter-ARN:label

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.

", "locationName":"imageId" } }, @@ -29823,7 +30108,7 @@ }, "WeightedCapacity":{ "shape":"Double", - "documentation":"

The number of units provided by the specified instance type.

When specifying weights, the price used in the lowest-price and price-capacity-optimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour.

" + "documentation":"

The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.

If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.

When specifying weights, the price used in the lowest-price and price-capacity-optimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour.

" }, "Priority":{ "shape":"Double", @@ -29839,7 +30124,7 @@ }, "ImageId":{ "shape":"ImageId", - "documentation":"

The ID of the AMI. An AMI is required to launch an instance. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.

" + "documentation":"

The ID of the AMI in the format ami-17characters00000.

Alternatively, you can specify a Systems Manager parameter, using one of the following formats. The Systems Manager parameter will resolve to an AMI ID on launch.

To reference a public parameter:

  • resolve:ssm:public-parameter

To reference a parameter stored in the same account:

  • resolve:ssm:parameter-name

  • resolve:ssm:parameter-name:version-number

  • resolve:ssm:parameter-name:label

To reference a parameter shared from another Amazon Web Services account:

  • resolve:ssm:parameter-ARN

  • resolve:ssm:parameter-ARN:version-number

  • resolve:ssm:parameter-ARN:label

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.

" } }, "documentation":"

Describes overrides for a launch template.

" @@ -30051,7 +30336,7 @@ }, "MaxAggregationInterval":{ "shape":"Integer", - "documentation":"

The maximum interval of time, in seconds, during which a flow of packets is captured and aggregated into a flow log record.

When a network interface is attached to a Nitro-based instance, the aggregation interval is always 60 seconds (1 minute) or less, regardless of the specified value.

Valid Values: 60 | 600

", + "documentation":"

The maximum interval of time, in seconds, during which a flow of packets is captured and aggregated into a flow log record.

When a network interface is attached to a Nitro-based instance, the aggregation interval is always 60 seconds (1 minute) or less, regardless of the specified value.

Valid Values: 60 | 600

", "locationName":"maxAggregationInterval" }, "DestinationOptions":{ @@ -32441,7 +32726,7 @@ "locationName":"configured" } }, - "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

" + "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide.

" }, "HibernationOptionsRequest":{ "type":"structure", @@ -32451,7 +32736,7 @@ "documentation":"

Set to true to enable your instance for hibernation.

For Spot Instances, if you set Configured to true, either omit the InstanceInterruptionBehavior parameter (for SpotMarketOptions ), or set it to hibernate. When Configured is true:

  • If you omit InstanceInterruptionBehavior, it defaults to hibernate.

  • If you set InstanceInterruptionBehavior to a value other than hibernate, you'll get an error.

Default: false

" } }, - "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

" + "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide.

" }, "HistoryRecord":{ "type":"structure", @@ -32837,6 +33122,7 @@ "HostTenancy":{ "type":"string", "enum":[ + "default", "dedicated", "host" ] @@ -35517,7 +35803,7 @@ }, "ConnectionTrackingConfiguration":{ "shape":"ConnectionTrackingSpecificationResponse", - "documentation":"

A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

", "locationName":"connectionTrackingConfiguration" } }, @@ -35707,7 +35993,7 @@ }, "ConnectionTrackingSpecification":{ "shape":"ConnectionTrackingSpecificationRequest", - "documentation":"

A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

" } }, "documentation":"

Describes a network interface.

" @@ -35872,7 +36158,7 @@ }, "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice":{ "shape":"Integer", - "documentation":"

[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999.

", + "documentation":"

[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999.

", "locationName":"maxSpotPriceAsPercentageOfOptimalOnDemandPrice" } }, @@ -35987,7 +36273,7 @@ }, "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice":{ "shape":"Integer", - "documentation":"

[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999.

" + "documentation":"

[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999.

" } }, "documentation":"

The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.

You must specify VCpuCount and MemoryMiB. All other attributes are optional. Any unspecified optional attribute is set to its default.

When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.

To limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:

  • AllowedInstanceTypes - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.

  • ExcludedInstanceTypes - The instance types to exclude from the list, even if they match your specified attributes.

If you specify InstanceRequirements, you can't specify InstanceType.

Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the launch instance wizard, or with the RunInstances API or AWS::EC2::Instance Amazon Web Services CloudFormation resource, you can't specify InstanceRequirements.

For more information, see Attribute-based instance type selection for EC2 Fleet, Attribute-based instance type selection for Spot Fleet, and Spot placement score in the Amazon EC2 User Guide.

" @@ -37120,7 +37406,31 @@ "g6.24xlarge", "g6.48xlarge", "gr6.4xlarge", - "gr6.8xlarge" + "gr6.8xlarge", + "c7i-flex.large", + "c7i-flex.xlarge", + "c7i-flex.2xlarge", + "c7i-flex.4xlarge", + "c7i-flex.8xlarge", + "u7i-12tb.224xlarge", + "u7in-16tb.224xlarge", + "u7in-24tb.224xlarge", + "u7in-32tb.224xlarge", + "u7ib-12tb.224xlarge", + "c7gn.metal", + "r8g.medium", + "r8g.large", + "r8g.xlarge", + "r8g.2xlarge", + "r8g.4xlarge", + "r8g.8xlarge", + "r8g.12xlarge", + "r8g.16xlarge", + "r8g.24xlarge", + "r8g.48xlarge", + "r8g.metal-24xl", + "r8g.metal-48xl", + "mac2-m1ultra.metal" ] }, "InstanceTypeHypervisor":{ @@ -37586,6 +37896,14 @@ "locationName":"item" } }, + "IpSource":{ + "type":"string", + "enum":[ + "amazon", + "byoip", + "none" + ] + }, "Ipam":{ "type":"structure", "members":{ @@ -37668,6 +37986,11 @@ "shape":"IpamTier", "documentation":"

IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab.

", "locationName":"tier" + }, + "EnablePrivateGua":{ + "shape":"Boolean", + "documentation":"

Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default.

", + "locationName":"enablePrivateGua" } }, "documentation":"

IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide.

" @@ -37908,7 +38231,7 @@ }, "NetworkBorderGroup":{ "shape":"String", - "documentation":"

The network border group that the resource that the IP address is assigned to is in.

", + "documentation":"

The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide.

", "locationName":"networkBorderGroup" }, "SecurityGroups":{ @@ -37959,6 +38282,11 @@ "documentation":"

The resource CIDR.

", "locationName":"resourceCidr" }, + "IpSource":{ + "shape":"IpamResourceCidrIpSource", + "documentation":"

The source that allocated the IP address space. byoip or amazon indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none indicates private space.

", + "locationName":"ipSource" + }, "ResourceType":{ "shape":"IpamResourceType", "documentation":"

The resource type.

", @@ -37979,10 +38307,20 @@ "documentation":"

The VPC ID.

", "locationName":"vpcId" }, + "NetworkInterfaceAttachmentStatus":{ + "shape":"IpamNetworkInterfaceAttachmentStatus", + "documentation":"

For elastic network interfaces, this is the status of whether or not the elastic network interface is attached.

", + "locationName":"networkInterfaceAttachmentStatus" + }, "SampleTime":{ "shape":"MillisecondDateTime", "documentation":"

The last successful resource discovery time.

", "locationName":"sampleTime" + }, + "AvailabilityZoneId":{ + "shape":"String", + "documentation":"

The Availability Zone ID.

", + "locationName":"availabilityZoneId" } }, "documentation":"

An IPAM discovered resource CIDR. A discovered resource is a resource CIDR monitored under a resource discovery. The following resources can be discovered: VPCs, Public IPv4 pools, VPC subnets, and Elastic IP addresses. The discovered resource CIDR is the IP address range in CIDR notation that is associated with the resource.

" @@ -38018,6 +38356,86 @@ }, "documentation":"

The discovery failure reason.

" }, + "IpamExternalResourceVerificationToken":{ + "type":"structure", + "members":{ + "IpamExternalResourceVerificationTokenId":{ + "shape":"IpamExternalResourceVerificationTokenId", + "documentation":"

The ID of the token.

", + "locationName":"ipamExternalResourceVerificationTokenId" + }, + "IpamExternalResourceVerificationTokenArn":{ + "shape":"ResourceArn", + "documentation":"

Token ARN.

", + "locationName":"ipamExternalResourceVerificationTokenArn" + }, + "IpamId":{ + "shape":"IpamId", + "documentation":"

The ID of the IPAM that created the token.

", + "locationName":"ipamId" + }, + "IpamArn":{ + "shape":"ResourceArn", + "documentation":"

ARN of the IPAM that created the token.

", + "locationName":"ipamArn" + }, + "IpamRegion":{ + "shape":"String", + "documentation":"

Region of the IPAM that created the token.

", + "locationName":"ipamRegion" + }, + "TokenValue":{ + "shape":"String", + "documentation":"

Token value.

", + "locationName":"tokenValue" + }, + "TokenName":{ + "shape":"String", + "documentation":"

Token name.

", + "locationName":"tokenName" + }, + "NotAfter":{ + "shape":"MillisecondDateTime", + "documentation":"

Token expiration.

", + "locationName":"notAfter" + }, + "Status":{ + "shape":"TokenState", + "documentation":"

Token status.

", + "locationName":"status" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Token tags.

", + "locationName":"tagSet" + }, + "State":{ + "shape":"IpamExternalResourceVerificationTokenState", + "documentation":"

Token state.

", + "locationName":"state" + } + }, + "documentation":"

A verification token is an Amazon Web Services-generated random value that you can use to prove ownership of an external resource. For example, you can use a verification token to validate that you control a public IP address range when you bring an IP address range to Amazon Web Services (BYOIP).

" + }, + "IpamExternalResourceVerificationTokenId":{"type":"string"}, + "IpamExternalResourceVerificationTokenSet":{ + "type":"list", + "member":{ + "shape":"IpamExternalResourceVerificationToken", + "locationName":"item" + } + }, + "IpamExternalResourceVerificationTokenState":{ + "type":"string", + "enum":[ + "create-in-progress", + "create-complete", + "create-failed", + "delete-in-progress", + "delete-complete", + "delete-failed" + ] + }, "IpamId":{"type":"string"}, "IpamManagementState":{ "type":"string", @@ -38037,6 +38455,13 @@ "max":128, "min":0 }, + "IpamNetworkInterfaceAttachmentStatus":{ + "type":"string", + "enum":[ + "available", + "in-use" + ] + }, "IpamOperatingRegion":{ "type":"structure", "members":{ @@ -38108,7 +38533,7 @@ }, "Locale":{ "shape":"String", - "documentation":"

The locale of the IPAM pool. In IPAM, the locale is the Amazon Web Services Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC’s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an Amazon Web Services Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.

", + "documentation":"

The locale of the IPAM pool.

The locale for the pool should be one of the following:

  • An Amazon Web Services Region where you want this IPAM pool to be available for allocations.

  • The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope.

If you choose an Amazon Web Services Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.

", "locationName":"locale" }, "PoolDepth":{ @@ -38178,7 +38603,7 @@ }, "PublicIpSource":{ "shape":"IpamPoolPublicIpSource", - "documentation":"

The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is BYOIP. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool. For information on increasing the default limit, see Quotas for your IPAM in the Amazon VPC IPAM User Guide.

", + "documentation":"

The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is BYOIP. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool. For information on increasing the default limit, see Quotas for your IPAM in the Amazon VPC IPAM User Guide.

", "locationName":"publicIpSource" }, "SourceResource":{ @@ -38590,10 +39015,23 @@ "shape":"String", "documentation":"

The ID of a VPC.

", "locationName":"vpcId" + }, + "AvailabilityZoneId":{ + "shape":"String", + "documentation":"

The Availability Zone ID.

", + "locationName":"availabilityZoneId" } }, "documentation":"

The CIDR for an IPAM resource.

" }, + "IpamResourceCidrIpSource":{ + "type":"string", + "enum":[ + "amazon", + "byoip", + "none" + ] + }, "IpamResourceCidrSet":{ "type":"list", "member":{ @@ -38939,7 +39377,7 @@ "members":{ "Ipv4Prefix":{ "shape":"String", - "documentation":"

The IPv4 prefix. For information, see Assigning prefixes to Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

The IPv4 prefix. For information, see Assigning prefixes to network interfaces in the Amazon EC2 User Guide.

", "locationName":"ipv4Prefix" } }, @@ -38950,7 +39388,7 @@ "members":{ "Ipv4Prefix":{ "shape":"String", - "documentation":"

The IPv4 prefix. For information, see Assigning prefixes to Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The IPv4 prefix. For information, see Assigning prefixes to network interfaces in the Amazon EC2 User Guide.

" } }, "documentation":"

Describes the IPv4 prefix option for a network interface.

" @@ -38974,6 +39412,13 @@ } }, "Ipv6Address":{"type":"string"}, + "Ipv6AddressAttribute":{ + "type":"string", + "enum":[ + "public", + "private" + ] + }, "Ipv6AddressList":{ "type":"list", "member":{ @@ -39982,7 +40427,7 @@ "locationName":"instanceMetadataTags" } }, - "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon EC2 User Guide.

" }, "LaunchTemplateInstanceMetadataOptionsRequest":{ "type":"structure", @@ -40008,7 +40453,7 @@ "documentation":"

Set to enabled to allow access to instance tags from the instance metadata. Set to disabled to turn off access to instance tags from the instance metadata. For more information, see Work with instance tags using the instance metadata.

Default: disabled

" } }, - "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon EC2 User Guide.

" }, "LaunchTemplateInstanceMetadataOptionsState":{ "type":"string", @@ -40141,7 +40586,7 @@ }, "ConnectionTrackingSpecification":{ "shape":"ConnectionTrackingSpecification", - "documentation":"

A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Idle connection tracking timeout in the Amazon EC2 User Guide.

", "locationName":"connectionTrackingSpecification" } }, @@ -40184,7 +40629,7 @@ }, "InterfaceType":{ "shape":"String", - "documentation":"

The type of network interface. To create an Elastic Fabric Adapter (EFA), specify efa. For more information, see Elastic Fabric Adapter in the Amazon Elastic Compute Cloud User Guide.

If you are not creating an EFA, specify interface or omit this parameter.

Valid values: interface | efa

" + "documentation":"

The type of network interface. To create an Elastic Fabric Adapter (EFA), specify efa. For more information, see Elastic Fabric Adapter in the Amazon EC2 User Guide.

If you are not creating an EFA, specify interface or omit this parameter.

Valid values: interface | efa

" }, "Ipv6AddressCount":{ "shape":"Integer", @@ -40246,7 +40691,7 @@ }, "ConnectionTrackingSpecification":{ "shape":"ConnectionTrackingSpecificationRequest", - "documentation":"

A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Idle connection tracking timeout in the Amazon EC2 User Guide.

" } }, "documentation":"

The parameters for a network interface.

" @@ -40331,7 +40776,7 @@ }, "WeightedCapacity":{ "shape":"Double", - "documentation":"

The number of units provided by the specified instance type.

When specifying weights, the price used in the lowest-price and price-capacity-optimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour.

", + "documentation":"

The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.

If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.

When specifying weights, the price used in the lowestPrice and priceCapacityOptimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour.

", "locationName":"weightedCapacity" }, "Priority":{ @@ -41900,7 +42345,7 @@ }, "OptInStatus":{ "shape":"ModifyAvailabilityZoneOptInStatus", - "documentation":"

Indicates whether you are opted in to the Local Zone group or Wavelength Zone group. The only valid value is opted-in. You must contact Amazon Web Services Support to opt out of a Local Zone or Wavelength Zone group.

" + "documentation":"

Indicates whether to opt in to the zone group. The only valid value is opted-in. You must contact Amazon Web Services Support to opt out of a Local Zone or Wavelength Zone group.

" }, "DryRun":{ "shape":"Boolean", @@ -41935,7 +42380,7 @@ }, "TotalTargetCapacity":{ "shape":"Integer", - "documentation":"

The total number of capacity units to be reserved by the Capacity Reservation Fleet. This value, together with the instance type weights that you assign to each instance type used by the Fleet determine the number of instances for which the Fleet reserves capacity. Both values are based on units that make sense for your workload. For more information, see Total target capacity in the Amazon EC2 User Guide.

" + "documentation":"

The total number of capacity units to be reserved by the Capacity Reservation Fleet. This value, together with the instance type weights that you assign to each instance type used by the Fleet determine the number of instances for which the Fleet reserves capacity. Both values are based on units that make sense for your workload. For more information, see Total target capacity in the Amazon EC2 User Guide.

" }, "EndDate":{ "shape":"MillisecondDateTime", @@ -41992,6 +42437,10 @@ "AdditionalInfo":{ "shape":"String", "documentation":"

Reserved for future use.

" + }, + "InstanceMatchCriteria":{ + "shape":"InstanceMatchCriteria", + "documentation":"

The matching criteria (instance eligibility) that you want to use in the modified Capacity Reservation. If you change the instance eligibility of an existing Capacity Reservation from targeted to open, any running instances that match the attributes of the Capacity Reservation, have the CapacityReservationPreference set to open, and are not yet running in the Capacity Reservation, will automatically use the modified Capacity Reservation.

To modify the instance eligibility, the Capacity Reservation must be completely idle (zero usage).

" } } }, @@ -42115,7 +42564,7 @@ "members":{ "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the KMS key using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

Amazon EBS does not support asymmetric KMS keys.

" + "documentation":"

The identifier of the KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the KMS key using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

Amazon EBS does not support asymmetric KMS keys.

" }, "DryRun":{ "shape":"Boolean", @@ -42249,7 +42698,7 @@ }, "HostRecovery":{ "shape":"HostRecovery", - "documentation":"

Indicates whether to enable or disable host recovery for the Dedicated Host. For more information, see Host recovery in the Amazon EC2 User Guide.

" + "documentation":"

Indicates whether to enable or disable host recovery for the Dedicated Host. For more information, see Host recovery in the Amazon EC2 User Guide.

" }, "InstanceType":{ "shape":"String", @@ -42261,7 +42710,7 @@ }, "HostMaintenance":{ "shape":"HostMaintenance", - "documentation":"

Indicates whether to enable or disable host maintenance for the Dedicated Host. For more information, see Host maintenance in the Amazon EC2 User Guide.

" + "documentation":"

Indicates whether to enable or disable host maintenance for the Dedicated Host. For more information, see Host maintenance in the Amazon EC2 User Guide.

" } } }, @@ -42462,7 +42911,7 @@ }, "UserData":{ "shape":"BlobAttributeValue", - "documentation":"

Changes the instance's user data to the specified value. If you are using an Amazon Web Services SDK or command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text.

", + "documentation":"

Changes the instance's user data to the specified value. User data must be base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might be performed for you. For more information, see Work with instance user data.

", "locationName":"userData" }, "Value":{ @@ -42472,7 +42921,7 @@ }, "DisableApiStop":{ "shape":"AttributeBooleanValue", - "documentation":"

Indicates whether an instance is enabled for stop protection. For more information, see Stop Protection.

" + "documentation":"

Indicates whether an instance is enabled for stop protection. For more information, see Enable stop protection for your instance.

" } } }, @@ -42870,6 +43319,10 @@ "Tier":{ "shape":"IpamTier", "documentation":"

IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC pricing > IPAM tab.

" + }, + "EnablePrivateGua":{ + "shape":"Boolean", + "documentation":"

Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default.

" } } }, @@ -43539,7 +43992,7 @@ "members":{ "TrafficMirrorFilterRule":{ "shape":"TrafficMirrorFilterRule", - "documentation":"

Modifies a Traffic Mirror rule.

", + "documentation":"

Tags are not returned for ModifyTrafficMirrorFilterRule.

A Traffic Mirror rule.

", "locationName":"trafficMirrorFilterRule" } } @@ -43827,7 +44280,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -43886,7 +44339,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -43930,7 +44383,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -43981,7 +44434,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -44021,7 +44474,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true } } @@ -44054,7 +44507,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true } } @@ -44139,7 +44592,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "SseSpecification":{ @@ -44199,7 +44652,7 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The target IOPS rate of the volume. This parameter is valid only for gp3, io1, and io2 volumes.

The following are the supported values for each volume type:

  • gp3: 3,000 - 16,000 IOPS

  • io1: 100 - 64,000 IOPS

  • io2: 100 - 256,000 IOPS

For io2 volumes, you can achieve up to 256,000 IOPS on instances built on the Nitro System. On other instances, you can achieve performance up to 32,000 IOPS.

Default: The existing value is retained if you keep the same volume type. If you change the volume type to io1, io2, or gp3, the default is 3,000.

" + "documentation":"

The target IOPS rate of the volume. This parameter is valid only for gp3, io1, and io2 volumes.

The following are the supported values for each volume type:

  • gp3: 3,000 - 16,000 IOPS

  • io1: 100 - 64,000 IOPS

  • io2: 100 - 256,000 IOPS

For io2 volumes, you can achieve up to 256,000 IOPS on instances built on the Nitro System. On other instances, you can achieve performance up to 32,000 IOPS.

Default: The existing value is retained if you keep the same volume type. If you change the volume type to io1, io2, or gp3, the default is 3,000.

" }, "Throughput":{ "shape":"Integer", @@ -44207,7 +44660,7 @@ }, "MultiAttachEnabled":{ "shape":"Boolean", - "documentation":"

Specifies whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the volume to up to 16 Nitro-based instances in the same Availability Zone. This parameter is supported with io1 and io2 volumes only. For more information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide.

" + "documentation":"

Specifies whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the volume to up to 16 Nitro-based instances in the same Availability Zone. This parameter is supported with io1 and io2 volumes only. For more information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide.

" } } }, @@ -44911,6 +45364,57 @@ } } }, + "MoveCapacityReservationInstancesRequest":{ + "type":"structure", + "required":[ + "SourceCapacityReservationId", + "DestinationCapacityReservationId", + "InstanceCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.

", + "idempotencyToken":true + }, + "SourceCapacityReservationId":{ + "shape":"CapacityReservationId", + "documentation":"

The ID of the Capacity Reservation from which you want to move capacity.

" + }, + "DestinationCapacityReservationId":{ + "shape":"CapacityReservationId", + "documentation":"

The ID of the Capacity Reservation that you want to move capacity into.

" + }, + "InstanceCount":{ + "shape":"Integer", + "documentation":"

The number of instances that you want to move from the source Capacity Reservation.

" + } + } + }, + "MoveCapacityReservationInstancesResult":{ + "type":"structure", + "members":{ + "SourceCapacityReservation":{ + "shape":"CapacityReservation", + "documentation":"

Information about the source Capacity Reservation.

", + "locationName":"sourceCapacityReservation" + }, + "DestinationCapacityReservation":{ + "shape":"CapacityReservation", + "documentation":"

Information about the destination Capacity Reservation.

", + "locationName":"destinationCapacityReservation" + }, + "InstanceCount":{ + "shape":"Integer", + "documentation":"

The number of instances that were moved from the source Capacity Reservation to the destination Capacity Reservation.

", + "locationName":"instanceCount" + } + } + }, "MoveStatus":{ "type":"string", "enum":[ @@ -44983,7 +45487,7 @@ }, "ProvisionedBandwidth":{ "shape":"ProvisionedBandwidth", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact Amazon Web Services Support.

", "locationName":"provisionedBandwidth" }, "State":{ @@ -45109,7 +45613,7 @@ "members":{ "Associations":{ "shape":"NetworkAclAssociationList", - "documentation":"

Any associations between the network ACL and one or more subnets

", + "documentation":"

Any associations between the network ACL and your subnets

", "locationName":"associationSet" }, "Entries":{ @@ -45743,7 +46247,7 @@ }, "ConnectionTrackingConfiguration":{ "shape":"ConnectionTrackingConfiguration", - "documentation":"

A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

", "locationName":"connectionTrackingConfiguration" }, "Description":{ @@ -46086,7 +46590,7 @@ }, "AwsService":{ "shape":"String", - "documentation":"

The Amazon Web Service.

", + "documentation":"

The Amazon Web Services service.

", "locationName":"awsService" }, "Permission":{ @@ -46455,12 +46959,12 @@ }, "MinTargetCapacity":{ "shape":"Integer", - "documentation":"

The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

Supported only for fleets of type instant.

At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType

", + "documentation":"

The minimum target capacity for On-Demand Instances in the fleet. If this minimum capacity isn't reached, no instances are launched.

Constraints: Maximum value of 1000. Supported only for fleets of type instant.

At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType

", "locationName":"minTargetCapacity" }, "MaxTotalPrice":{ "shape":"String", - "documentation":"

The maximum amount per hour for On-Demand Instances that you're willing to pay.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The maxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide.

", + "documentation":"

The maximum amount per hour for On-Demand Instances that you're willing to pay.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The maxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide.

", "locationName":"maxTotalPrice" } }, @@ -46487,11 +46991,11 @@ }, "MinTargetCapacity":{ "shape":"Integer", - "documentation":"

The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

Supported only for fleets of type instant.

At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType

" + "documentation":"

The minimum target capacity for On-Demand Instances in the fleet. If this minimum capacity isn't reached, no instances are launched.

Constraints: Maximum value of 1000. Supported only for fleets of type instant.

At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType

" }, "MaxTotalPrice":{ "shape":"String", - "documentation":"

The maximum amount per hour for On-Demand Instances that you're willing to pay.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The MaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide.

" + "documentation":"

The maximum amount per hour for On-Demand Instances that you're willing to pay.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The MaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide.

" } }, "documentation":"

Describes the configuration of On-Demand Instances in an EC2 Fleet.

" @@ -47397,7 +47901,7 @@ "members":{ "Cidrs":{ "shape":"ValueStringList", - "documentation":"

The IP address range of the Amazon Web Service.

", + "documentation":"

The IP address range of the Amazon Web Services service.

", "locationName":"cidrSet" }, "PrefixListId":{ @@ -48014,7 +48518,7 @@ }, "CidrAuthorizationContext":{ "shape":"IpamCidrAuthorizationContext", - "documentation":"

A signed document that proves that you are authorized to bring a specified IP address range to Amazon using BYOIP. This option applies to public pools only.

" + "documentation":"

A signed document that proves that you are authorized to bring a specified IP address range to Amazon using BYOIP. This option only applies to IPv4 and IPv6 pools in the public scope.

" }, "NetmaskLength":{ "shape":"Integer", @@ -48022,8 +48526,16 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "idempotencyToken":true + }, + "VerificationMethod":{ + "shape":"VerificationMethod", + "documentation":"

The method for verifying control of a public IP address range. Defaults to remarks-x509 if not specified. This option only applies to IPv4 and IPv6 pools in the public scope.

" + }, + "IpamExternalResourceVerificationTokenId":{ + "shape":"IpamExternalResourceVerificationTokenId", + "documentation":"

Verification token ID. This option only applies to IPv4 and IPv6 pools in the public scope.

" } } }, @@ -48059,7 +48571,11 @@ }, "NetmaskLength":{ "shape":"Integer", - "documentation":"

The netmask length of the CIDR you would like to allocate to the public IPv4 pool.

" + "documentation":"

The netmask length of the CIDR you would like to allocate to the public IPv4 pool. The least specific netmask length you can define is 24.

" + }, + "NetworkBorderGroup":{ + "shape":"String", + "documentation":"

The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide.

" } } }, @@ -48083,31 +48599,31 @@ "members":{ "ProvisionTime":{ "shape":"DateTime", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved.

", "locationName":"provisionTime" }, "Provisioned":{ "shape":"String", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved.

", "locationName":"provisioned" }, "RequestTime":{ "shape":"DateTime", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved.

", "locationName":"requestTime" }, "Requested":{ "shape":"String", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved.

", "locationName":"requested" }, "Status":{ "shape":"String", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved.

", "locationName":"status" } }, - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

" + "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact Amazon Web Services Support.

" }, "PtrUpdateStatus":{ "type":"structure", @@ -48435,7 +48951,7 @@ "members":{ "ReservedInstancesId":{ "shape":"String", - "documentation":"

The IDs of the purchased Reserved Instances. If your purchase crosses into a discounted pricing tier, the final Reserved Instances IDs might change. For more information, see Crossing pricing tiers in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

The IDs of the purchased Reserved Instances. If your purchase crosses into a discounted pricing tier, the final Reserved Instances IDs might change. For more information, see Crossing pricing tiers in the Amazon EC2 User Guide.

", "locationName":"reservedInstancesId" } }, @@ -49616,7 +50132,7 @@ "members":{ "KernelId":{ "shape":"KernelId", - "documentation":"

The ID of the kernel.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The ID of the kernel.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide.

" }, "EbsOptimized":{ "shape":"Boolean", @@ -49638,11 +50154,11 @@ }, "ImageId":{ "shape":"ImageId", - "documentation":"

The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.

Valid formats:

  • ami-17characters00000

  • resolve:ssm:parameter-name

  • resolve:ssm:parameter-name:version-number

  • resolve:ssm:parameter-name:label

  • resolve:ssm:public-parameter

Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager parameter. If the launch template will be used by an EC2 Fleet or Spot Fleet, you must specify the AMI ID.

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The ID of the AMI in the format ami-17characters00000.

Alternatively, you can specify a Systems Manager parameter, using one of the following formats. The Systems Manager parameter will resolve to an AMI ID on launch.

To reference a public parameter:

  • resolve:ssm:public-parameter

To reference a parameter stored in the same account:

  • resolve:ssm:parameter-name

  • resolve:ssm:parameter-name:version-number

  • resolve:ssm:parameter-name:label

To reference a parameter shared from another Amazon Web Services account:

  • resolve:ssm:parameter-ARN

  • resolve:ssm:parameter-ARN:version-number

  • resolve:ssm:parameter-ARN:label

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

If the launch template will be used for an EC2 Fleet or Spot Fleet, note the following:

  • Only EC2 Fleets of type instant support specifying a Systems Manager parameter.

  • For EC2 Fleets of type maintain or request, or for Spot Fleets, you must specify the AMI ID.

" }, "InstanceType":{ "shape":"InstanceType", - "documentation":"

The instance type. For more information, see Instance types in the Amazon Elastic Compute Cloud User Guide.

If you specify InstanceType, you can't specify InstanceRequirements.

" + "documentation":"

The instance type. For more information, see Amazon EC2 instance types in the Amazon EC2 User Guide.

If you specify InstanceType, you can't specify InstanceRequirements.

" }, "KeyName":{ "shape":"KeyPairName", @@ -49658,7 +50174,7 @@ }, "RamDiskId":{ "shape":"RamdiskId", - "documentation":"

The ID of the RAM disk.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The ID of the RAM disk.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide.

" }, "DisableApiTermination":{ "shape":"Boolean", @@ -49670,7 +50186,7 @@ }, "UserData":{ "shape":"SensitiveUserData", - "documentation":"

The user data to make available to the instance. You must provide base64-encoded text. User data is limited to 16 KB. For more information, see Run commands on your Linux instance at launch (Linux) or Work with instance user data (Windows) in the Amazon Elastic Compute Cloud User Guide.

If you are creating the launch template for use with Batch, the user data must be provided in the MIME multi-part archive format. For more information, see Amazon EC2 user data in launch templates in the Batch User Guide.

" + "documentation":"

The user data to make available to the instance. You must provide base64-encoded text. User data is limited to 16 KB. For more information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User Guide.

If you are creating the launch template for use with Batch, the user data must be provided in the MIME multi-part archive format. For more information, see Amazon EC2 user data in launch templates in the Batch User Guide.

" }, "TagSpecifications":{ "shape":"LaunchTemplateTagSpecificationRequestList", @@ -49707,7 +50223,7 @@ }, "CpuOptions":{ "shape":"LaunchTemplateCpuOptionsRequest", - "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The CPU options for the instance. For more information, see Optimize CPU options in the Amazon EC2 User Guide.

" }, "CapacityReservationSpecification":{ "shape":"LaunchTemplateCapacityReservationSpecificationRequest", @@ -49720,15 +50236,15 @@ }, "HibernationOptions":{ "shape":"LaunchTemplateHibernationOptionsRequest", - "documentation":"

Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide.

" }, "MetadataOptions":{ "shape":"LaunchTemplateInstanceMetadataOptionsRequest", - "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon EC2 User Guide.

" }, "EnclaveOptions":{ "shape":"LaunchTemplateEnclaveOptionsRequest", - "documentation":"

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.

You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance.

" + "documentation":"

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.

You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance.

" }, "InstanceRequirements":{ "shape":"InstanceRequirementsRequest", @@ -49744,7 +50260,7 @@ }, "DisableApiStop":{ "shape":"Boolean", - "documentation":"

Indicates whether to enable the instance for stop protection. For more information, see Stop protection in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether to enable the instance for stop protection. For more information, see Enable stop protection for your instance in the Amazon EC2 User Guide.

" } }, "documentation":"

The information to include in the launch template.

You must specify at least one parameter for the launch template data.

" @@ -49792,7 +50308,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon EC2 User Guide for Linux Instances.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency in Amazon EC2 API requests in the Amazon EC2 User Guide.

", "locationName":"clientToken" }, "DryRun":{ @@ -50001,7 +50517,7 @@ }, "Weight":{ "shape":"DoubleWithConstraints", - "documentation":"

The number of capacity units provided by the specified instance type. This value, together with the total target capacity that you specify for the Fleet determine the number of instances for which the Fleet reserves capacity. Both values are based on units that make sense for your workload. For more information, see Total target capacity in the Amazon EC2 User Guide.

" + "documentation":"

The number of capacity units provided by the specified instance type. This value, together with the total target capacity that you specify for the Fleet determine the number of instances for which the Fleet reserves capacity. Both values are based on units that make sense for your workload. For more information, see Total target capacity in the Amazon EC2 User Guide.

" }, "AvailabilityZone":{ "shape":"String", @@ -50017,7 +50533,7 @@ }, "Priority":{ "shape":"IntegerWithConstraints", - "documentation":"

The priority to assign to the instance type. This value is used to determine which of the instance types specified for the Fleet should be prioritized for use. A lower value indicates a high priority. For more information, see Instance type priority in the Amazon EC2 User Guide.

" + "documentation":"

The priority to assign to the instance type. This value is used to determine which of the instance types specified for the Fleet should be prioritized for use. A lower value indicates a high priority. For more information, see Instance type priority in the Amazon EC2 User Guide.

" } }, "documentation":"

Information about an instance type to use in a Capacity Reservation Fleet.

" @@ -50846,7 +51362,8 @@ "vpc-block-public-access-exclusion", "ipam-resource-discovery", "ipam-resource-discovery-association", - "instance-connect-endpoint" + "instance-connect-endpoint", + "ipam-external-resource-verification-token" ] }, "ResponseError":{ @@ -50909,7 +51426,7 @@ }, "ImageId":{ "shape":"String", - "documentation":"

The ID of the AMI or a Systems Manager parameter. The Systems Manager parameter will resolve to the ID of the AMI at instance launch.

The value depends on what you specified in the request. The possible values are:

  • If an AMI ID was specified in the request, then this is the AMI ID.

  • If a Systems Manager parameter was specified in the request, and ResolveAlias was configured as true, then this is the AMI ID that the parameter is mapped to in the Parameter Store.

  • If a Systems Manager parameter was specified in the request, and ResolveAlias was configured as false, then this is the parameter value.

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

The ID of the AMI or a Systems Manager parameter. The Systems Manager parameter will resolve to the ID of the AMI at instance launch.

The value depends on what you specified in the request. The possible values are:

  • If an AMI ID was specified in the request, then this is the AMI ID.

  • If a Systems Manager parameter was specified in the request, and ResolveAlias was configured as true, then this is the AMI ID that the parameter is mapped to in the Parameter Store.

  • If a Systems Manager parameter was specified in the request, and ResolveAlias was configured as false, then this is the parameter value.

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

", "locationName":"imageId" }, "InstanceType":{ @@ -50989,7 +51506,7 @@ }, "CpuOptions":{ "shape":"LaunchTemplateCpuOptions", - "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU options in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

The CPU options for the instance. For more information, see Optimize CPU options in the Amazon EC2 User Guide.

", "locationName":"cpuOptions" }, "CapacityReservationSpecification":{ @@ -51004,12 +51521,12 @@ }, "HibernationOptions":{ "shape":"LaunchTemplateHibernationOptions", - "documentation":"

Indicates whether an instance is configured for hibernation. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Indicates whether an instance is configured for hibernation. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide.

", "locationName":"hibernationOptions" }, "MetadataOptions":{ "shape":"LaunchTemplateInstanceMetadataOptions", - "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon EC2 User Guide.

", "locationName":"metadataOptions" }, "EnclaveOptions":{ @@ -51034,7 +51551,7 @@ }, "DisableApiStop":{ "shape":"Boolean", - "documentation":"

Indicates whether the instance is enabled for stop protection. For more information, see Stop protection in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Indicates whether the instance is enabled for stop protection. For more information, see Enable stop protection for your instance in the Amazon EC2 User Guide.

", "locationName":"disableApiStop" } }, @@ -51474,7 +51991,7 @@ }, "DestinationPrefixListId":{ "shape":"String", - "documentation":"

The prefix of the Amazon Web Service.

", + "documentation":"

The prefix of the Amazon Web Services service.

", "locationName":"destinationPrefixListId" }, "EgressOnlyInternetGatewayId":{ @@ -51573,7 +52090,7 @@ "members":{ "Associations":{ "shape":"RouteTableAssociationList", - "documentation":"

The associations between the route table and one or more subnets or a gateway.

", + "documentation":"

The associations between the route table and your subnets or gateways.

", "locationName":"associationSet" }, "PropagatingVgws":{ @@ -51800,7 +52317,7 @@ }, "InstanceType":{ "shape":"InstanceType", - "documentation":"

The instance type. For more information, see Instance types in the Amazon EC2 User Guide.

" + "documentation":"

The instance type. For more information, see Amazon EC2 instance types in the Amazon EC2 User Guide.

" }, "Ipv6AddressCount":{ "shape":"Integer", @@ -51821,11 +52338,11 @@ }, "MaxCount":{ "shape":"Integer", - "documentation":"

The maximum number of instances to launch. If you specify more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above MinCount.

Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 FAQ.

" + "documentation":"

The maximum number of instances to launch. If you specify a value that is more capacity than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above the specified minimum count.

Constraints: Between 1 and the quota for the specified instance type for your account for this Region. For more information, see Amazon EC2 instance type quotas.

" }, "MinCount":{ "shape":"Integer", - "documentation":"

The minimum number of instances to launch. If you specify a minimum that is more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches no instances.

Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ.

" + "documentation":"

The minimum number of instances to launch. If you specify a value that is more capacity than Amazon EC2 can provide in the target Availability Zone, Amazon EC2 does not launch any instances.

Constraints: Between 1 and the quota for the specified instance type for your account for this Region. For more information, see Amazon EC2 instance type quotas.

" }, "Monitoring":{ "shape":"RunInstancesMonitoringEnabled", @@ -51855,7 +52372,7 @@ }, "UserData":{ "shape":"RunInstancesUserData", - "documentation":"

The user data script to make available to the instance. For more information, see Run commands on your Linux instance at launch and Run commands on your Windows instance at launch. If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB.

" + "documentation":"

The user data to make available to the instance. User data must be base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might be performed for you. For more information, see Work with instance user data.

" }, "AdditionalInfo":{ "shape":"String", @@ -51939,7 +52456,7 @@ }, "HibernationOptions":{ "shape":"HibernationOptionsRequest", - "documentation":"

Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance.

" + "documentation":"

Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide.

You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance.

" }, "LicenseSpecifications":{ "shape":"LicenseSpecificationListRequest", @@ -53373,7 +53890,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key that was used to protect the volume encryption key for the parent volume.

", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key that was used to protect the volume encryption key for the parent volume.

", "locationName":"kmsKeyId" }, "OwnerId":{ @@ -53403,7 +53920,7 @@ }, "StateMessage":{ "shape":"String", - "documentation":"

Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper Key Management Service (KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by DescribeSnapshots.

", + "documentation":"

Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper KMS permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by DescribeSnapshots.

", "locationName":"statusMessage" }, "VolumeId":{ @@ -53836,7 +54353,7 @@ "locationName":"terminationDelay" } }, - "documentation":"

The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted. For more information, see Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted. For more information, see Capacity rebalancing in the Amazon EC2 User Guide.

" }, "SpotDatafeedSubscription":{ "type":"structure", @@ -53954,7 +54471,7 @@ }, "WeightedCapacity":{ "shape":"Double", - "documentation":"

The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.

If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.

", + "documentation":"

The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.

If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.

When specifying weights, the price used in the lowestPrice and priceCapacityOptimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour.

", "locationName":"weightedCapacity" }, "TagSpecifications":{ @@ -54026,7 +54543,7 @@ "members":{ "AllocationStrategy":{ "shape":"AllocationStrategy", - "documentation":"

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the Spot Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide.

priceCapacityOptimized (recommended)

Spot Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. Spot Fleet then requests Spot Instances from the lowest priced of these pools.

capacityOptimized

Spot Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacityOptimizedPrioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacityOptimizedPrioritized is supported only if your Spot Fleet uses a launch template. Note that if the OnDemandAllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity.

diversified

Spot Fleet requests instances from all of the Spot Instance pools that you specify.

lowestPrice

Spot Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, Spot Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates.

Default: lowestPrice

", + "documentation":"

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the Spot Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide.

priceCapacityOptimized (recommended)

Spot Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. Spot Fleet then requests Spot Instances from the lowest priced of these pools.

capacityOptimized

Spot Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacityOptimizedPrioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacityOptimizedPrioritized is supported only if your Spot Fleet uses a launch template. Note that if the OnDemandAllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity.

diversified

Spot Fleet requests instances from all of the Spot Instance pools that you specify.

lowestPrice (not recommended)

We don't recommend the lowestPrice allocation strategy because it has the highest risk of interruption for your Spot Instances.

Spot Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, Spot Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates.

Default: lowestPrice

", "locationName":"allocationStrategy" }, "OnDemandAllocationStrategy":{ @@ -54091,12 +54608,12 @@ }, "OnDemandMaxTotalPrice":{ "shape":"String", - "documentation":"

The maximum amount per hour for On-Demand Instances that you're willing to pay. You can use the onDemandMaxTotalPrice parameter, the spotMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The onDemandMaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for onDemandMaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide.

", + "documentation":"

The maximum amount per hour for On-Demand Instances that you're willing to pay. You can use the onDemandMaxTotalPrice parameter, the spotMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The onDemandMaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for onDemandMaxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide.

", "locationName":"onDemandMaxTotalPrice" }, "SpotMaxTotalPrice":{ "shape":"String", - "documentation":"

The maximum amount per hour for Spot Instances that you're willing to pay. You can use the spotMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The spotMaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for spotMaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide.

", + "documentation":"

The maximum amount per hour for Spot Instances that you're willing to pay. You can use the spotMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The spotMaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for spotMaxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide.

", "locationName":"spotMaxTotalPrice" }, "TerminateInstancesWithExpiration":{ @@ -54268,7 +54785,7 @@ }, "State":{ "shape":"SpotInstanceState", - "documentation":"

The state of the Spot Instance request. Spot request status information helps track your Spot Instance requests. For more information, see Spot request status in the Amazon EC2 User Guide for Linux Instances.

", + "documentation":"

The state of the Spot Instance request. Spot request status information helps track your Spot Instance requests. For more information, see Spot request status in the Amazon EC2 User Guide.

", "locationName":"state" }, "Status":{ @@ -54351,7 +54868,7 @@ "members":{ "Code":{ "shape":"String", - "documentation":"

The status code. For a list of status codes, see Spot request status codes in the Amazon EC2 User Guide for Linux Instances.

", + "documentation":"

The status code. For a list of status codes, see Spot request status codes in the Amazon EC2 User Guide.

", "locationName":"code" }, "Message":{ @@ -54379,7 +54896,7 @@ "members":{ "CapacityRebalance":{ "shape":"SpotCapacityRebalance", - "documentation":"

The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted. For more information, see Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances.

", + "documentation":"

The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted. For more information, see Capacity rebalancing in the Amazon EC2 User Guide.

", "locationName":"capacityRebalance" } }, @@ -54416,7 +54933,7 @@ "members":{ "AllocationStrategy":{ "shape":"SpotAllocationStrategy", - "documentation":"

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide.

price-capacity-optimized (recommended)

EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools.

capacity-optimized

EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized is supported only if your EC2 Fleet uses a launch template. Note that if the On-Demand AllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity.

diversified

EC2 Fleet requests instances from all of the Spot Instance pools that you specify.

lowest-price

EC2 Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, EC2 Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates.

Default: lowest-price

", + "documentation":"

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide.

price-capacity-optimized (recommended)

EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools.

capacity-optimized

EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized is supported only if your EC2 Fleet uses a launch template. Note that if the On-Demand AllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity.

diversified

EC2 Fleet requests instances from all of the Spot Instance pools that you specify.

lowest-price (not recommended)

We don't recommend the lowest-price allocation strategy because it has the highest risk of interruption for your Spot Instances.

EC2 Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, EC2 Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates.

Default: lowest-price

", "locationName":"allocationStrategy" }, "MaintenanceStrategies":{ @@ -54446,12 +54963,12 @@ }, "MinTargetCapacity":{ "shape":"Integer", - "documentation":"

The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

Supported only for fleets of type instant.

At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType

", + "documentation":"

The minimum target capacity for Spot Instances in the fleet. If this minimum capacity isn't reached, no instances are launched.

Constraints: Maximum value of 1000. Supported only for fleets of type instant.

At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType

", "locationName":"minTargetCapacity" }, "MaxTotalPrice":{ "shape":"String", - "documentation":"

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The maxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide.

", + "documentation":"

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The maxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide.

", "locationName":"maxTotalPrice" } }, @@ -54462,7 +54979,7 @@ "members":{ "AllocationStrategy":{ "shape":"SpotAllocationStrategy", - "documentation":"

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide.

price-capacity-optimized (recommended)

EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools.

capacity-optimized

EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized is supported only if your EC2 Fleet uses a launch template. Note that if the On-Demand AllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity.

diversified

EC2 Fleet requests instances from all of the Spot Instance pools that you specify.

lowest-price

EC2 Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, EC2 Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates.

Default: lowest-price

" + "documentation":"

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide.

price-capacity-optimized (recommended)

EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools.

capacity-optimized

EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized is supported only if your EC2 Fleet uses a launch template. Note that if the On-Demand AllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity.

diversified

EC2 Fleet requests instances from all of the Spot Instance pools that you specify.

lowest-price (not recommended)

We don't recommend the lowest-price allocation strategy because it has the highest risk of interruption for your Spot Instances.

EC2 Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, EC2 Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates.

Default: lowest-price

" }, "MaintenanceStrategies":{ "shape":"FleetSpotMaintenanceStrategiesRequest", @@ -54486,11 +55003,11 @@ }, "MinTargetCapacity":{ "shape":"Integer", - "documentation":"

The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

Supported only for fleets of type instant.

At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType

" + "documentation":"

The minimum target capacity for Spot Instances in the fleet. If this minimum capacity isn't reached, no instances are launched.

Constraints: Maximum value of 1000. Supported only for fleets of type instant.

At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType

" }, "MaxTotalPrice":{ "shape":"String", - "documentation":"

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The MaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide.

" + "documentation":"

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The MaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide.

" } }, "documentation":"

Describes the configuration of Spot Instances in an EC2 Fleet request.

" @@ -54738,7 +55255,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true } } @@ -54785,7 +55302,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true } } @@ -55280,6 +55797,16 @@ "shape":"SubnetCidrBlockState", "documentation":"

The state of the CIDR block.

", "locationName":"ipv6CidrBlockState" + }, + "Ipv6AddressAttribute":{ + "shape":"Ipv6AddressAttribute", + "documentation":"

Public IPv6 addresses are those advertised on the internet from Amazon Web Services. Private IP addresses are not and cannot be advertised on the internet from Amazon Web Services.

", + "locationName":"ipv6AddressAttribute" + }, + "IpSource":{ + "shape":"IpSource", + "documentation":"

The source that allocated the IP address space. byoip or amazon indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none indicates private space.

", + "locationName":"ipSource" } }, "documentation":"

Describes an association between a subnet and an IPv6 CIDR block.

" @@ -55863,6 +56390,13 @@ "permanent-restore-failed" ] }, + "TokenState":{ + "type":"string", + "enum":[ + "valid", + "expired" + ] + }, "TotalLocalStorageGB":{ "type":"structure", "members":{ @@ -56007,6 +56541,11 @@ "shape":"String", "documentation":"

The description of the Traffic Mirror rule.

", "locationName":"description" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Tags on Traffic Mirroring filter rules.

", + "locationName":"tagSet" } }, "documentation":"

Describes the Traffic Mirror rule.

" @@ -56024,6 +56563,13 @@ "type":"list", "member":{"shape":"TrafficMirrorFilterRuleField"} }, + "TrafficMirrorFilterRuleIdList":{ + "type":"list", + "member":{ + "shape":"TrafficMirrorFilterRuleIdWithResolver", + "locationName":"item" + } + }, "TrafficMirrorFilterRuleIdWithResolver":{"type":"string"}, "TrafficMirrorFilterRuleList":{ "type":"list", @@ -56032,6 +56578,13 @@ "locationName":"item" } }, + "TrafficMirrorFilterRuleSet":{ + "type":"list", + "member":{ + "shape":"TrafficMirrorFilterRule", + "locationName":"item" + } + }, "TrafficMirrorFilterSet":{ "type":"list", "member":{ @@ -58713,6 +59266,13 @@ "locationName":"item" } }, + "VerificationMethod":{ + "type":"string", + "enum":[ + "remarks-x509", + "dns-token" + ] + }, "VerifiedAccessEndpoint":{ "type":"structure", "members":{ @@ -59540,7 +60100,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key that was used to protect the volume encryption key for the volume.

", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key that was used to protect the volume encryption key for the volume.

", "locationName":"kmsKeyId" }, "OutpostArn":{ @@ -59714,7 +60274,7 @@ }, "ModificationState":{ "shape":"VolumeModificationState", - "documentation":"

The current modification state. The modification state is null for unmodified volumes.

", + "documentation":"

The current modification state.

", "locationName":"modificationState" }, "StatusMessage":{ @@ -59788,7 +60348,7 @@ "locationName":"endTime" } }, - "documentation":"

Describes the modification status of an EBS volume.

If the volume has never been modified, some element values will be null.

" + "documentation":"

Describes the modification status of an EBS volume.

" }, "VolumeModificationList":{ "type":"list", @@ -60465,6 +61025,16 @@ "shape":"String", "documentation":"

The ID of the IPv6 address pool from which the IPv6 CIDR block is allocated.

", "locationName":"ipv6Pool" + }, + "Ipv6AddressAttribute":{ + "shape":"Ipv6AddressAttribute", + "documentation":"

Public IPv6 addresses are those advertised on the internet from Amazon Web Services. Private IP addresses are not and cannot be advertised on the internet from Amazon Web Services.

", + "locationName":"ipv6AddressAttribute" + }, + "IpSource":{ + "shape":"IpSource", + "documentation":"

The source that allocated the IP address space. byoip or amazon indicates public IP address space allocated by Amazon or space that you have allocated with Bring your own IP (BYOIP). none indicates private space.

", + "locationName":"ipSource" } }, "documentation":"

Describes an IPv6 CIDR block associated with a VPC.

" diff --git a/botocore/data/ecr/2015-09-21/paginators-1.json b/botocore/data/ecr/2015-09-21/paginators-1.json index 3db2db09a8..a2161d3ce9 100644 --- a/botocore/data/ecr/2015-09-21/paginators-1.json +++ b/botocore/data/ecr/2015-09-21/paginators-1.json @@ -52,6 +52,15 @@ "limit_key": "maxResults", "output_token": "nextToken", "result_key": "pullThroughCacheRules" + }, + "DescribeRepositoryCreationTemplates": { + "input_token": "nextToken", + "limit_key": "maxResults", + "non_aggregate_keys": [ + "registryId" + ], + "output_token": "nextToken", + "result_key": "repositoryCreationTemplates" } } } diff --git a/botocore/data/ecr/2015-09-21/service-2.json b/botocore/data/ecr/2015-09-21/service-2.json index c4a7fe7612..f9d82b9566 100644 --- a/botocore/data/ecr/2015-09-21/service-2.json +++ b/botocore/data/ecr/2015-09-21/service-2.json @@ -12,7 +12,8 @@ "signatureVersion":"v4", "signingName":"ecr", "targetPrefix":"AmazonEC2ContainerRegistry_V20150921", - "uid":"ecr-2015-09-21" + "uid":"ecr-2015-09-21", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchCheckLayerAvailability":{ @@ -139,6 +140,23 @@ ], "documentation":"

Creates a repository. For more information, see Amazon ECR repositories in the Amazon Elastic Container Registry User Guide.

" }, + "CreateRepositoryCreationTemplate":{ + "name":"CreateRepositoryCreationTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRepositoryCreationTemplateRequest"}, + "output":{"shape":"CreateRepositoryCreationTemplateResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"TemplateAlreadyExistsException"} + ], + "documentation":"

Creates a repository creation template. This template is used to define the settings for repositories created by Amazon ECR on your behalf. For example, repositories created through pull through cache actions. For more information, see Private repository creation templates in the Amazon Elastic Container Registry User Guide.

" + }, "DeleteLifecyclePolicy":{ "name":"DeleteLifecyclePolicy", "http":{ @@ -205,6 +223,22 @@ ], "documentation":"

Deletes a repository. If the repository isn't empty, you must either delete the contents of the repository or use the force option to delete the repository and have Amazon ECR delete all of its contents on your behalf.

" }, + "DeleteRepositoryCreationTemplate":{ + "name":"DeleteRepositoryCreationTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryCreationTemplateRequest"}, + "output":{"shape":"DeleteRepositoryCreationTemplateResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TemplateNotFoundException"} + ], + "documentation":"

Deletes a repository creation template.

" + }, "DeleteRepositoryPolicy":{ "name":"DeleteRepositoryPolicy", "http":{ @@ -318,6 +352,36 @@ ], "documentation":"

Describes image repositories in a registry.

" }, + "DescribeRepositoryCreationTemplates":{ + "name":"DescribeRepositoryCreationTemplates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRepositoryCreationTemplatesRequest"}, + "output":{"shape":"DescribeRepositoryCreationTemplatesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Returns details about the repository creation templates in a registry. The prefixes request parameter can be used to return the details for a specific repository creation template.

" + }, + "GetAccountSetting":{ + "name":"GetAccountSetting", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccountSettingRequest"}, + "output":{"shape":"GetAccountSettingResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Retrieves the basic scan type version name.

" + }, "GetAuthorizationToken":{ "name":"GetAuthorizationToken", "http":{ @@ -477,6 +541,22 @@ ], "documentation":"

List the tags for an Amazon ECR resource.

" }, + "PutAccountSetting":{ + "name":"PutAccountSetting", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutAccountSettingRequest"}, + "output":{"shape":"PutAccountSettingResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Allows you to change the basic scan type version by setting the name parameter to either CLAIR to AWS_NATIVE.

" + }, "PutImage":{ "name":"PutImage", "http":{ @@ -589,7 +669,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ValidationException"} ], - "documentation":"

Creates or updates the replication configuration for a registry. The existing replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide.

When configuring cross-account replication, the destination account must grant the source account permission to replicate. This permission is controlled using a registry permissions policy. For more information, see PutRegistryPolicy.

" + "documentation":"

Creates or updates the replication configuration for a registry. The existing replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide. For more information on the custom role for replication, see Creating an IAM role for replication.

When configuring cross-account replication, the destination account must grant the source account permission to replicate. This permission is controlled using a registry permissions policy. For more information, see PutRegistryPolicy.

" }, "SetRepositoryPolicy":{ "name":"SetRepositoryPolicy", @@ -696,6 +776,22 @@ ], "documentation":"

Updates an existing pull through cache rule.

" }, + "UpdateRepositoryCreationTemplate":{ + "name":"UpdateRepositoryCreationTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRepositoryCreationTemplateRequest"}, + "output":{"shape":"UpdateRepositoryCreationTemplateResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TemplateNotFoundException"} + ], + "documentation":"

Updates an existing repository creation template.

" + }, "UploadLayerPart":{ "name":"UploadLayerPart", "http":{ @@ -733,6 +829,12 @@ } }, "shapes":{ + "AccountSettingName":{ + "type":"string", + "max":64, + "min":1 + }, + "AccountSettingValue":{"type":"string"}, "Arch":{"type":"string"}, "Arn":{"type":"string"}, "Attribute":{ @@ -1033,7 +1135,7 @@ }, "upstreamRegistryUrl":{ "shape":"Url", - "documentation":"

The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry.

  • Amazon ECR Public (ecr-public) - public.ecr.aws

  • Docker Hub (docker-hub) - registry-1.docker.io

  • Quay (quay) - quay.io

  • Kubernetes (k8s) - registry.k8s.io

  • GitHub Container Registry (github-container-registry) - ghcr.io

  • Microsoft Azure Container Registry (azure-container-registry) - <custom>.azurecr.io

  • GitLab Container Registry (gitlab-container-registry) - registry.gitlab.com

" + "documentation":"

The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry.

  • Amazon ECR Public (ecr-public) - public.ecr.aws

  • Docker Hub (docker-hub) - registry-1.docker.io

  • Quay (quay) - quay.io

  • Kubernetes (k8s) - registry.k8s.io

  • GitHub Container Registry (github-container-registry) - ghcr.io

  • Microsoft Azure Container Registry (azure-container-registry) - <custom>.azurecr.io

" }, "registryId":{ "shape":"RegistryId", @@ -1078,6 +1180,64 @@ } } }, + "CreateRepositoryCreationTemplateRequest":{ + "type":"structure", + "required":[ + "prefix", + "appliedFor" + ], + "members":{ + "prefix":{ + "shape":"Prefix", + "documentation":"

The repository namespace prefix to associate with the template. All repositories created using this namespace prefix will have the settings defined in this template applied. For example, a prefix of prod would apply to all repositories beginning with prod/. Similarly, a prefix of prod/team would apply to all repositories beginning with prod/team/.

To apply a template to all repositories in your registry that don't have an associated creation template, you can use ROOT as the prefix.

There is always an assumed / applied to the end of the prefix. If you specify ecr-public as the prefix, Amazon ECR treats that as ecr-public/. When using a pull through cache rule, the repository prefix you specify during rule creation is what you should specify as your repository creation template prefix as well.

" + }, + "description":{ + "shape":"RepositoryTemplateDescription", + "documentation":"

A description for the repository creation template.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfigurationForRepositoryCreationTemplate", + "documentation":"

The encryption configuration to use for repositories created using the template.

" + }, + "resourceTags":{ + "shape":"TagList", + "documentation":"

The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + }, + "imageTagMutability":{ + "shape":"ImageTagMutability", + "documentation":"

The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.

" + }, + "repositoryPolicy":{ + "shape":"RepositoryPolicyText", + "documentation":"

The repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.

" + }, + "lifecyclePolicy":{ + "shape":"LifecyclePolicyTextForRepositoryCreationTemplate", + "documentation":"

The lifecycle policy to use for repositories created using the template.

" + }, + "appliedFor":{ + "shape":"RCTAppliedForList", + "documentation":"

A list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION

" + }, + "customRoleArn":{ + "shape":"CustomRoleArn", + "documentation":"

The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as the registry that you are configuring. Amazon ECR will assume your supplied role when the customRoleArn is specified. When this field isn't specified, Amazon ECR will use the service-linked role for the repository creation template.

" + } + } + }, + "CreateRepositoryCreationTemplateResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The registry ID associated with the request.

" + }, + "repositoryCreationTemplate":{ + "shape":"RepositoryCreationTemplate", + "documentation":"

The details of the repository creation template associated with the request.

" + } + } + }, "CreateRepositoryRequest":{ "type":"structure", "required":["repositoryName"], @@ -1124,6 +1284,10 @@ "min":50, "pattern":"^arn:aws:secretsmanager:[a-zA-Z0-9-:]+:secret:ecr\\-pullthroughcache\\/[a-zA-Z0-9\\/_+=.@-]+$" }, + "CustomRoleArn":{ + "type":"string", + "max":2048 + }, "CvssScore":{ "type":"structure", "members":{ @@ -1287,6 +1451,29 @@ } } }, + "DeleteRepositoryCreationTemplateRequest":{ + "type":"structure", + "required":["prefix"], + "members":{ + "prefix":{ + "shape":"Prefix", + "documentation":"

The repository namespace prefix associated with the repository creation template.

" + } + } + }, + "DeleteRepositoryCreationTemplateResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The registry ID associated with the request.

" + }, + "repositoryCreationTemplate":{ + "shape":"RepositoryCreationTemplate", + "documentation":"

The details of the repository creation template that was deleted.

" + } + } + }, "DeleteRepositoryPolicyRequest":{ "type":"structure", "required":["repositoryName"], @@ -1526,7 +1713,7 @@ "members":{ "registryId":{ "shape":"RegistryId", - "documentation":"

The ID of the registry.

" + "documentation":"

The registry ID associated with the request.

" }, "replicationConfiguration":{ "shape":"ReplicationConfiguration", @@ -1568,6 +1755,40 @@ } } }, + "DescribeRepositoryCreationTemplatesRequest":{ + "type":"structure", + "members":{ + "prefixes":{ + "shape":"PrefixList", + "documentation":"

The repository namespace prefixes associated with the repository creation templates to describe. If this value is not specified, all repository creation templates are returned.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken value returned from a previous paginated DescribeRepositoryCreationTemplates request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of repository results returned by DescribeRepositoryCreationTemplatesRequest in paginated output. When this parameter is used, DescribeRepositoryCreationTemplatesRequest only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeRepositoryCreationTemplatesRequest request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeRepositoryCreationTemplatesRequest returns up to 100 results and a nextToken value, if applicable.

" + } + } + }, + "DescribeRepositoryCreationTemplatesResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The registry ID associated with the request.

" + }, + "repositoryCreationTemplates":{ + "shape":"RepositoryCreationTemplateList", + "documentation":"

The details of the repository creation templates.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken value to include in a future DescribeRepositoryCreationTemplates request. When the results of a DescribeRepositoryCreationTemplates request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, "EmptyUploadException":{ "type":"structure", "members":{ @@ -1585,14 +1806,29 @@ "members":{ "encryptionType":{ "shape":"EncryptionType", - "documentation":"

The encryption type to use.

If you use the KMS encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created. For more information, see Protecting data using server-side encryption with an KMS key stored in Key Management Service (SSE-KMS) in the Amazon Simple Storage Service Console Developer Guide.

If you use the AES256 encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the Amazon Simple Storage Service Console Developer Guide.

" + "documentation":"

The encryption type to use.

If you use the KMS encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created. For more information, see Protecting data using server-side encryption with an KMS key stored in Key Management Service (SSE-KMS) in the Amazon Simple Storage Service Console Developer Guide.

If you use the AES256 encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the Amazon Simple Storage Service Console Developer Guide.

" }, "kmsKey":{ "shape":"KmsKey", "documentation":"

If you use the KMS encryption type, specify the KMS key to use for encryption. The alias, key ID, or full ARN of the KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default Amazon Web Services managed KMS key for Amazon ECR will be used.

" } }, - "documentation":"

The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.

By default, when no encryption configuration is set or the AES256 encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.

For more control over the encryption of the contents of your repository, you can use server-side encryption with Key Management Service key stored in Key Management Service (KMS) to encrypt your images. For more information, see Amazon ECR encryption at rest in the Amazon Elastic Container Registry User Guide.

" + "documentation":"

The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.

By default, when no encryption configuration is set or the AES256 encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES256 encryption algorithm. This does not require any action on your part.

For more control over the encryption of the contents of your repository, you can use server-side encryption with Key Management Service key stored in Key Management Service (KMS) to encrypt your images. For more information, see Amazon ECR encryption at rest in the Amazon Elastic Container Registry User Guide.

" + }, + "EncryptionConfigurationForRepositoryCreationTemplate":{ + "type":"structure", + "required":["encryptionType"], + "members":{ + "encryptionType":{ + "shape":"EncryptionType", + "documentation":"

The encryption type to use.

If you use the KMS encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created. For more information, see Protecting data using server-side encryption with an KMS key stored in Key Management Service (SSE-KMS) in the Amazon Simple Storage Service Console Developer Guide.

If you use the AES256 encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the Amazon Simple Storage Service Console Developer Guide.

" + }, + "kmsKey":{ + "shape":"KmsKeyForRepositoryCreationTemplate", + "documentation":"

If you use the KMS encryption type, specify the KMS key to use for encryption. The full ARN of the KMS key must be specified. The key must exist in the same Region as the repository. If no key is specified, the default Amazon Web Services managed KMS key for Amazon ECR will be used.

" + } + }, + "documentation":"

The encryption configuration to associate with the repository creation template.

" }, "EncryptionType":{ "type":"string", @@ -1696,6 +1932,29 @@ "value":{"shape":"SeverityCount"} }, "ForceFlag":{"type":"boolean"}, + "GetAccountSettingRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"AccountSettingName", + "documentation":"

Basic scan type version name.

" + } + } + }, + "GetAccountSettingResponse":{ + "type":"structure", + "members":{ + "name":{ + "shape":"AccountSettingName", + "documentation":"

Retrieves the basic scan type version name.

" + }, + "value":{ + "shape":"AccountSettingName", + "documentation":"

Retrieves the value that specifies what basic scan type is being used: AWS_NATIVE or CLAIR.

" + } + } + }, "GetAuthorizationTokenRegistryIdList":{ "type":"list", "member":{"shape":"RegistryId"}, @@ -1864,7 +2123,7 @@ "members":{ "registryId":{ "shape":"RegistryId", - "documentation":"

The ID of the registry.

" + "documentation":"

The registry ID associated with the request.

" }, "policyText":{ "shape":"RegistryPolicyText", @@ -1882,7 +2141,7 @@ "members":{ "registryId":{ "shape":"RegistryId", - "documentation":"

The ID of the registry.

" + "documentation":"

The registry ID associated with the request.

" }, "scanningConfiguration":{ "shape":"RegistryScanningConfiguration", @@ -2360,6 +2619,12 @@ "max":2048, "min":1 }, + "KmsKeyForRepositoryCreationTemplate":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^$|arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key\\/[a-z0-9-]+" + }, "Layer":{ "type":"structure", "members":{ @@ -2585,6 +2850,11 @@ "max":30720, "min":100 }, + "LifecyclePolicyTextForRepositoryCreationTemplate":{ + "type":"string", + "max":30720, + "min":0 + }, "LifecyclePreviewMaxResults":{ "type":"integer", "max":100, @@ -2736,6 +3006,16 @@ "min":0 }, "Platform":{"type":"string"}, + "Prefix":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*/?|ROOT)$" + }, + "PrefixList":{ + "type":"list", + "member":{"shape":"Prefix"} + }, "ProxyEndpoint":{"type":"string"}, "PullThroughCacheRule":{ "type":"structure", @@ -2804,6 +3084,36 @@ "min":1 }, "PushTimestamp":{"type":"timestamp"}, + "PutAccountSettingRequest":{ + "type":"structure", + "required":[ + "name", + "value" + ], + "members":{ + "name":{ + "shape":"AccountSettingName", + "documentation":"

Basic scan type version name.

" + }, + "value":{ + "shape":"AccountSettingValue", + "documentation":"

Setting value that determines what basic scan type is being used: AWS_NATIVE or CLAIR.

" + } + } + }, + "PutAccountSettingResponse":{ + "type":"structure", + "members":{ + "name":{ + "shape":"AccountSettingName", + "documentation":"

Retrieves the the basic scan type version name.

" + }, + "value":{ + "shape":"AccountSettingValue", + "documentation":"

Retrieves the basic scan type value, either AWS_NATIVE or -.

" + } + } + }, "PutImageRequest":{ "type":"structure", "required":[ @@ -2975,7 +3285,7 @@ "members":{ "registryId":{ "shape":"RegistryId", - "documentation":"

The registry ID.

" + "documentation":"

The registry ID associated with the request.

" }, "policyText":{ "shape":"RegistryPolicyText", @@ -3024,6 +3334,17 @@ } } }, + "RCTAppliedFor":{ + "type":"string", + "enum":[ + "REPLICATION", + "PULL_THROUGH_CACHE" + ] + }, + "RCTAppliedForList":{ + "type":"list", + "member":{"shape":"RCTAppliedFor"} + }, "Reason":{"type":"string"}, "Recommendation":{ "type":"structure", @@ -3162,7 +3483,7 @@ "ReplicationDestinationList":{ "type":"list", "member":{"shape":"ReplicationDestination"}, - "max":25, + "max":100, "min":0 }, "ReplicationError":{"type":"string"}, @@ -3241,6 +3562,60 @@ "documentation":"

The specified repository already exists in the specified registry.

", "exception":true }, + "RepositoryCreationTemplate":{ + "type":"structure", + "members":{ + "prefix":{ + "shape":"Prefix", + "documentation":"

The repository namespace prefix associated with the repository creation template.

" + }, + "description":{ + "shape":"RepositoryTemplateDescription", + "documentation":"

The description associated with the repository creation template.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfigurationForRepositoryCreationTemplate", + "documentation":"

The encryption configuration associated with the repository creation template.

" + }, + "resourceTags":{ + "shape":"TagList", + "documentation":"

The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + }, + "imageTagMutability":{ + "shape":"ImageTagMutability", + "documentation":"

The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.

" + }, + "repositoryPolicy":{ + "shape":"RepositoryPolicyText", + "documentation":"

he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.

" + }, + "lifecyclePolicy":{ + "shape":"LifecyclePolicyTextForRepositoryCreationTemplate", + "documentation":"

The lifecycle policy to use for repositories created using the template.

" + }, + "appliedFor":{ + "shape":"RCTAppliedForList", + "documentation":"

A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION

" + }, + "customRoleArn":{ + "shape":"CustomRoleArn", + "documentation":"

The ARN of the role to be assumed by Amazon ECR. Amazon ECR will assume your supplied role when the customRoleArn is specified. When this field isn't specified, Amazon ECR will use the service-linked role for the repository creation template.

" + }, + "createdAt":{ + "shape":"Date", + "documentation":"

The date and time, in JavaScript date format, when the repository creation template was created.

" + }, + "updatedAt":{ + "shape":"Date", + "documentation":"

The date and time, in JavaScript date format, when the repository creation template was last updated.

" + } + }, + "documentation":"

The details of the repository creation template associated with the request.

" + }, + "RepositoryCreationTemplateList":{ + "type":"list", + "member":{"shape":"RepositoryCreationTemplate"} + }, "RepositoryFilter":{ "type":"structure", "required":[ @@ -3381,6 +3756,10 @@ "type":"list", "member":{"shape":"RepositoryScanningConfiguration"} }, + "RepositoryTemplateDescription":{ + "type":"string", + "max":256 + }, "Resource":{ "type":"structure", "members":{ @@ -3722,6 +4101,22 @@ "key":{"shape":"TagKey"}, "value":{"shape":"TagValue"} }, + "TemplateAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The repository creation template already exists. Specify a unique prefix and try again.

", + "exception":true + }, + "TemplateNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified repository creation template can't be found. Verify the registry ID and prefix and try again.

", + "exception":true + }, "Title":{"type":"string"}, "TooManyTagsException":{ "type":"structure", @@ -3844,6 +4239,58 @@ } } }, + "UpdateRepositoryCreationTemplateRequest":{ + "type":"structure", + "required":["prefix"], + "members":{ + "prefix":{ + "shape":"Prefix", + "documentation":"

The repository namespace prefix that matches an existing repository creation template in the registry. All repositories created using this namespace prefix will have the settings defined in this template applied. For example, a prefix of prod would apply to all repositories beginning with prod/. This includes a repository named prod/team1 as well as a repository named prod/repository1.

To apply a template to all repositories in your registry that don't have an associated creation template, you can use ROOT as the prefix.

" + }, + "description":{ + "shape":"RepositoryTemplateDescription", + "documentation":"

A description for the repository creation template.

" + }, + "encryptionConfiguration":{"shape":"EncryptionConfigurationForRepositoryCreationTemplate"}, + "resourceTags":{ + "shape":"TagList", + "documentation":"

The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + }, + "imageTagMutability":{ + "shape":"ImageTagMutability", + "documentation":"

Updates the tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.

" + }, + "repositoryPolicy":{ + "shape":"RepositoryPolicyText", + "documentation":"

Updates the repository policy created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.

" + }, + "lifecyclePolicy":{ + "shape":"LifecyclePolicyTextForRepositoryCreationTemplate", + "documentation":"

Updates the lifecycle policy associated with the specified repository creation template.

" + }, + "appliedFor":{ + "shape":"RCTAppliedForList", + "documentation":"

Updates the list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION

" + }, + "customRoleArn":{ + "shape":"CustomRoleArn", + "documentation":"

The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as the registry that you are configuring. Amazon ECR will assume your supplied role when the customRoleArn is specified. When this field isn't specified, Amazon ECR will use the service-linked role for the repository creation template.

" + } + } + }, + "UpdateRepositoryCreationTemplateResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The registry ID associated with the request.

" + }, + "repositoryCreationTemplate":{ + "shape":"RepositoryCreationTemplate", + "documentation":"

The details of the repository creation template associated with the request.

" + } + } + }, "UpdatedTimestamp":{"type":"timestamp"}, "UploadId":{ "type":"string", diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index 756316e421..992f12f03c 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"ecs", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Amazon ECS", "serviceFullName":"Amazon EC2 Container Service", "serviceId":"ECS", "signatureVersion":"v4", "targetPrefix":"AmazonEC2ContainerServiceV20141113", - "uid":"ecs-2014-11-13" + "uid":"ecs-2014-11-13", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateCapacityProvider":{ @@ -88,7 +90,7 @@ {"shape":"ServiceNotActiveException"}, {"shape":"NamespaceNotFoundException"} ], - "documentation":"

Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

For information about the maximum number of task sets and other quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.

" }, "DeleteAccountSetting":{ "name":"DeleteAccountSetting", @@ -651,7 +653,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

You can specify a role for your task with the taskRoleArn parameter. When you specify a role for a task, its containers can then use the latest versions of the CLI or SDKs to make API requests to the Amazon Web Services services that are specified in the policy that's associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

You can specify a Docker networking mode for the containers in your task definition with the networkMode parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

You can specify a role for your task with the taskRoleArn parameter. When you specify a role for a task, its containers can then use the latest versions of the CLI or SDKs to make API requests to the Amazon Web Services services that are specified in the policy that's associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

You can specify a Docker networking mode for the containers in your task definition with the networkMode parameter. If you specify the awsvpc network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

" }, "RunTask":{ "name":"RunTask", @@ -706,7 +708,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ClusterNotFoundException"} ], - "documentation":"

Stops a running task. Any tags associated with the task will be deleted.

When StopTask is called on a task, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM value and a default 30-second timeout, after which the SIGKILL value is sent and the containers are forcibly stopped. If the container handles the SIGTERM value gracefully and exits within 30 seconds from receiving it, no SIGKILL value is sent.

The default 30-second timeout can be configured on the Amazon ECS container agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Stops a running task. Any tags associated with the task will be deleted.

When StopTask is called on a task, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM value and a default 30-second timeout, after which the SIGKILL value is sent and the containers are forcibly stopped. If the container handles the SIGTERM value gracefully and exits within 30 seconds from receiving it, no SIGKILL value is sent.

For Windows containers, POSIX signals do not work and runtime stops the container by sending a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown of (Windows) container #25982 on GitHub.

The default 30-second timeout can be configured on the Amazon ECS container agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "SubmitAttachmentStateChanges":{ "name":"SubmitAttachmentStateChanges", @@ -870,7 +872,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ClusterNotFoundException"} ], - "documentation":"

Modifies the status of an Amazon ECS container instance.

Once a container instance has reached an ACTIVE state, you can change the status of a container instance to DRAINING to manually remove an instance from a cluster, for example to perform system updates, update the Docker daemon, or scale down the cluster size.

A container instance can't be changed to DRAINING until it has reached an ACTIVE status. If the instance is in any other status, an error will be received.

When you set a container instance to DRAINING, Amazon ECS prevents new tasks from being scheduled for placement on the container instance and replacement service tasks are started on other container instances in the cluster if the resources are available. Service tasks on the container instance that are in the PENDING state are stopped immediately.

Service tasks on the container instance that are in the RUNNING state are stopped and replaced according to the service's deployment configuration parameters, minimumHealthyPercent and maximumPercent. You can change the deployment configuration of your service using UpdateService.

  • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during task replacement. For example, desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. If the minimum is 100%, the service scheduler can't remove existing tasks until the replacement tasks are considered healthy. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

  • The maximumPercent parameter represents an upper limit on the number of running tasks during task replacement. You can use this to define the replacement batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four tasks to be drained, provided that the cluster resources required to do this are available. If the maximum is 100%, then replacement tasks can't start until the draining tasks have stopped.

Any PENDING or RUNNING tasks that do not belong to a service aren't affected. You must wait for them to finish or stop them manually.

A container instance has completed draining when it has no more RUNNING tasks. You can verify this using ListTasks.

When a container instance has been drained, you can set a container instance to ACTIVE status and once it has reached that status the Amazon ECS scheduler can begin scheduling tasks on the instance again.

" + "documentation":"

Modifies the status of an Amazon ECS container instance.

Once a container instance has reached an ACTIVE state, you can change the status of a container instance to DRAINING to manually remove an instance from a cluster, for example to perform system updates, update the Docker daemon, or scale down the cluster size.

A container instance can't be changed to DRAINING until it has reached an ACTIVE status. If the instance is in any other status, an error will be received.

When you set a container instance to DRAINING, Amazon ECS prevents new tasks from being scheduled for placement on the container instance and replacement service tasks are started on other container instances in the cluster if the resources are available. Service tasks on the container instance that are in the PENDING state are stopped immediately.

Service tasks on the container instance that are in the RUNNING state are stopped and replaced according to the service's deployment configuration parameters, minimumHealthyPercent and maximumPercent. You can change the deployment configuration of your service using UpdateService.

  • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during task replacement. For example, desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. If the minimum is 100%, the service scheduler can't remove existing tasks until the replacement tasks are considered healthy. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

  • The maximumPercent parameter represents an upper limit on the number of running tasks during task replacement. You can use this to define the replacement batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four tasks to be drained, provided that the cluster resources required to do this are available. If the maximum is 100%, then replacement tasks can't start until the draining tasks have stopped.

Any PENDING or RUNNING tasks that do not belong to a service aren't affected. You must wait for them to finish or stop them manually.

A container instance has completed draining when it has no more RUNNING tasks. You can verify this using ListTasks.

When a container instance has been drained, you can set a container instance to ACTIVE status and once it has reached that status the Amazon ECS scheduler can begin scheduling tasks on the instance again.

" }, "UpdateService":{ "name":"UpdateService", @@ -1135,7 +1137,7 @@ "documentation":"

Whether the task's elastic network interface receives a public IP address. The default value is DISABLED.

" } }, - "documentation":"

An object representing the networking details for a task or service. For example awsvpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}

" + "documentation":"

An object representing the networking details for a task or service. For example awsVpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}.

" }, "BlockedException":{ "type":"structure", @@ -1230,7 +1232,7 @@ "documentation":"

The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. If no value is specified, the default value of 0 is used.

" } }, - "documentation":"

The details of a capacity provider strategy. A capacity provider strategy can be set when using the RunTask or CreateCluster APIs or as the default capacity provider strategy for a cluster with the CreateCluster API.

Only capacity providers that are already associated with a cluster and have an ACTIVE or UPDATING status can be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New Auto Scaling group capacity providers can be created with the CreateCapacityProvider API operation.

To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used in a capacity provider strategy.

A capacity provider strategy may contain a maximum of 6 capacity providers.

" + "documentation":"

The details of a capacity provider strategy. A capacity provider strategy can be set when using the RunTask or CreateCluster APIs or as the default capacity provider strategy for a cluster with the CreateCluster API.

Only capacity providers that are already associated with a cluster and have an ACTIVE or UPDATING status can be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New Auto Scaling group capacity providers can be created with the CreateCapacityProvider API operation.

To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used in a capacity provider strategy.

With FARGATE_SPOT, you can run interruption tolerant tasks at a rate that's discounted compared to the FARGATE price. FARGATE_SPOT runs tasks on spare compute capacity. When Amazon Web Services needs the capacity back, your tasks are interrupted with a two-minute warning. FARGATE_SPOT only supports Linux tasks with the X86_64 architecture on platform version 1.3.0 or later.

A capacity provider strategy may contain a maximum of 6 capacity providers.

" }, "CapacityProviderStrategyItemBase":{ "type":"integer", @@ -1265,7 +1267,7 @@ "documentation":"

Message that describes the cause of the exception.

" } }, - "documentation":"

These errors are usually caused by a client action. This client action might be using an action or resource on behalf of a user that doesn't have permissions to use the action or resource. Or, it might be specifying an identifier that isn't valid.

", + "documentation":"

These errors are usually caused by a client action. This client action might be using an action or resource on behalf of a user that doesn't have permissions to use the action or resource. Or, it might be specifying an identifier that isn't valid.

The following list includes additional causes for the error:

  • The RunTask could not be processed because you use managed scaling and there is a capacity error because the quota of tasks in the PROVISIONING per cluster has been reached. For information about the service quotas, see Amazon ECS service quotas.

", "exception":true }, "Cluster":{ @@ -1344,9 +1346,13 @@ "executeCommandConfiguration":{ "shape":"ExecuteCommandConfiguration", "documentation":"

The details of the execute command configuration.

" + }, + "managedStorageConfiguration":{ + "shape":"ManagedStorageConfiguration", + "documentation":"

The details of the managed storage configuration.

" } }, - "documentation":"

The execute command configuration for the cluster.

" + "documentation":"

The execute command and managed storage configuration for the cluster.

" }, "ClusterContainsContainerInstancesException":{ "type":"structure", @@ -1555,11 +1561,11 @@ "members":{ "name":{ "shape":"String", - "documentation":"

The name of a container. If you're linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

" + "documentation":"

The name of a container. If you're linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in tthe docker create-container command and the --name option to docker run.

" }, "image":{ "shape":"String", - "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

  • When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.

  • Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag or registry/repository@digest. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.

  • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

  • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

  • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

" + "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker create-container command and the IMAGE parameter of docker run.

  • When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.

  • Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag or registry/repository@digest. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.

  • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

  • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

  • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

" }, "repositoryCredentials":{ "shape":"RepositoryCredentials", @@ -1567,51 +1573,55 @@ }, "cpu":{ "shape":"Integer", - "documentation":"

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2. However, the CPU parameter isn't required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

  • Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.

  • Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as 0, which Windows interprets as 1% of one CPU.

" + "documentation":"

The number of cpu units reserved for the container. This parameter maps to CpuShares in the docker create-container commandand the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:

  • Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.

  • Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.

  • Agent versions greater than or equal to 1.84.0: CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as 0, which Windows interprets as 1% of one CPU.

" }, "memory":{ "shape":"BoxedInteger", - "documentation":"

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If using the Fargate launch type, this parameter is optional.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory and memoryReservation value, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory is used.

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.

" + "documentation":"

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in thethe docker create-container command and the --memory option to docker run.

If using the Fargate launch type, this parameter is optional.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory and memoryReservation value, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory is used.

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.

" }, "memoryReservation":{ "shape":"BoxedInteger", - "documentation":"

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory or memoryReservation in a container definition. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.

" + "documentation":"

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the the docker create-container command and the --memory-reservation option to docker run.

If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory or memoryReservation in a container definition. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.

" }, "links":{ "shape":"StringList", - "documentation":"

The links parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is bridge. The name:internalName construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to Legacy container links in the Docker documentation. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run.

This parameter is not supported for Windows containers.

Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

" + "documentation":"

The links parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is bridge. The name:internalName construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links in the docker create-container command and the --link option to docker run.

This parameter is not supported for Windows containers.

Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

" }, "portMappings":{ "shape":"PortMappingList", - "documentation":"

The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.

For task definitions that use the awsvpc network mode, only specify the containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Port mappings on Windows use the NetNAT gateway address rather than localhost. There's no loopback for port mappings on Windows, so you can't access a container's mapped port from the host itself.

This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run. If the network mode of a task definition is set to none, then you can't specify port mappings. If the network mode of a task definition is set to host, then host ports must either be undefined or they must match the container port in the port mapping.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings section DescribeTasks responses.

" + "documentation":"

The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.

For task definitions that use the awsvpc network mode, only specify the containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Port mappings on Windows use the NetNAT gateway address rather than localhost. There's no loopback for port mappings on Windows, so you can't access a container's mapped port from the host itself.

This parameter maps to PortBindings in the the docker create-container command and the --publish option to docker run. If the network mode of a task definition is set to none, then you can't specify port mappings. If the network mode of a task definition is set to host, then host ports must either be undefined or they must match the container port in the port mapping.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings section DescribeTasks responses.

" }, "essential":{ "shape":"BoxedBoolean", "documentation":"

If the essential parameter of a container is marked as true, and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the essential parameter of a container is marked as false, its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.

All tasks must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see Application Architecture in the Amazon Elastic Container Service Developer Guide.

" }, + "restartPolicy":{ + "shape":"ContainerRestartPolicy", + "documentation":"

The restart policy for a container. When you set up a restart policy, Amazon ECS can restart the container without needing to replace the task. For more information, see Restart individual containers in Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide.

" + }, "entryPoint":{ "shape":"StringList", - "documentation":"

Early versions of the Amazon ECS container agent don't properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

The entry point that's passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.

" + "documentation":"

Early versions of the Amazon ECS container agent don't properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

The entry point that's passed to the container. This parameter maps to Entrypoint in tthe docker create-container command and the --entrypoint option to docker run.

" }, "command":{ "shape":"StringList", - "documentation":"

The command that's passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each argument is a separated string in the array.

" + "documentation":"

The command that's passed to the container. This parameter maps to Cmd in the docker create-container command and the COMMAND parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.

" }, "environment":{ "shape":"EnvironmentVariables", - "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.

" + "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the docker create-container command and the --env option to docker run.

We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.

" }, "environmentFiles":{ "shape":"EnvironmentFiles", - "documentation":"

A list of files containing the environment variables to pass to a container. This parameter maps to the --env-file option to docker run.

You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file contains an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information about the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Specifying Environment Variables in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

A list of files containing the environment variables to pass to a container. This parameter maps to the --env-file option to docker run.

You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file contains an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Specifying Environment Variables in the Amazon Elastic Container Service Developer Guide.

" }, "mountPoints":{ "shape":"MountPointList", - "documentation":"

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers can't mount directories on a different drive, and mount point can't be across drives.

" + "documentation":"

The mount points for data volumes in your container.

This parameter maps to Volumes in the the docker create-container command and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers can't mount directories on a different drive, and mount point can't be across drives.

" }, "volumesFrom":{ "shape":"VolumeFromList", - "documentation":"

Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

" + "documentation":"

Data volumes to mount from another container. This parameter maps to VolumesFrom in tthe docker create-container command and the --volumes-from option to docker run.

" }, "linuxParameters":{ "shape":"LinuxParameters", @@ -1627,7 +1637,7 @@ }, "startTimeout":{ "shape":"BoxedInteger", - "documentation":"

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED state.

When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it's enforced independently from this start timeout value.

For tasks using the Fargate launch type, the task or service requires the following platforms:

  • Linux platform version 1.3.0 or later.

  • Windows platform version 1.0.0 or later.

For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

The valid values are 2-120 seconds.

" + "documentation":"

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED state.

When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it's enforced independently from this start timeout value.

For tasks using the Fargate launch type, the task or service requires the following platforms:

  • Linux platform version 1.3.0 or later.

  • Windows platform version 1.0.0 or later.

For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

The valid values for Fargate are 2-120 seconds.

" }, "stopTimeout":{ "shape":"BoxedInteger", @@ -1635,71 +1645,71 @@ }, "hostname":{ "shape":"String", - "documentation":"

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

The hostname parameter is not supported if you're using the awsvpc network mode.

" + "documentation":"

The hostname to use for your container. This parameter maps to Hostname in thethe docker create-container command and the --hostname option to docker run.

The hostname parameter is not supported if you're using the awsvpc network mode.

" }, "user":{ "shape":"String", - "documentation":"

The user to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

When running tasks using the host network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security.

You can specify the user using the following formats. If specifying a UID or GID, you must specify it as a positive integer.

  • user

  • user:group

  • uid

  • uid:gid

  • user:gid

  • uid:group

This parameter is not supported for Windows containers.

" + "documentation":"

The user to use inside the container. This parameter maps to User in the docker create-container command and the --user option to docker run.

When running tasks using the host network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security.

You can specify the user using the following formats. If specifying a UID or GID, you must specify it as a positive integer.

  • user

  • user:group

  • uid

  • uid:gid

  • user:gid

  • uid:group

This parameter is not supported for Windows containers.

" }, "workingDirectory":{ "shape":"String", - "documentation":"

The working directory to run commands inside the container in. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

" + "documentation":"

The working directory to run commands inside the container in. This parameter maps to WorkingDir in the docker create-container command and the --workdir option to docker run.

" }, "disableNetworking":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, networking is off within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

This parameter is not supported for Windows containers.

" + "documentation":"

When this parameter is true, networking is off within the container. This parameter maps to NetworkDisabled in the docker create-container command.

This parameter is not supported for Windows containers.

" }, "privileged":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks run on Fargate.

" + "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the the docker create-container command and the --privileged option to docker run

This parameter is not supported for Windows containers or tasks run on Fargate.

" }, "readonlyRootFilesystem":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the docker create-container command and the --read-only option to docker run.

This parameter is not supported for Windows containers.

" }, "dnsServers":{ "shape":"StringList", - "documentation":"

A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of DNS servers that are presented to the container. This parameter maps to Dns in the the docker create-container command and the --dns option to docker run.

This parameter is not supported for Windows containers.

" }, "dnsSearchDomains":{ "shape":"StringList", - "documentation":"

A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the docker create-container command and the --dns-search option to docker run.

This parameter is not supported for Windows containers.

" }, "extraHosts":{ "shape":"HostEntryList", - "documentation":"

A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

This parameter isn't supported for Windows containers or tasks that use the awsvpc network mode.

" + "documentation":"

A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. This parameter maps to ExtraHosts in the docker create-container command and the --add-host option to docker run.

This parameter isn't supported for Windows containers or tasks that use the awsvpc network mode.

" }, "dockerSecurityOptions":{ "shape":"StringList", - "documentation":"

A list of strings to provide custom configuration for multiple security systems. For more information about valid values, see Docker Run Security Configuration. This field isn't valid for containers in tasks using the Fargate launch type.

For Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.

For any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers in the Amazon Elastic Container Service Developer Guide.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

For more information about valid values, see Docker Run Security Configuration.

Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"

" + "documentation":"

A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.

For Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.

For any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers in the Amazon Elastic Container Service Developer Guide.

This parameter maps to SecurityOpt in the docker create-container command and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"

" }, "interactive":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, you can deploy containerized applications that require stdin or a tty to be allocated. This parameter maps to OpenStdin in the Create a container section of the Docker Remote API and the --interactive option to docker run.

" + "documentation":"

When this parameter is true, you can deploy containerized applications that require stdin or a tty to be allocated. This parameter maps to OpenStdin in the docker create-container command and the --interactive option to docker run.

" }, "pseudoTerminal":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, a TTY is allocated. This parameter maps to Tty in the Create a container section of the Docker Remote API and the --tty option to docker run.

" + "documentation":"

When this parameter is true, a TTY is allocated. This parameter maps to Tty in tthe docker create-container command and the --tty option to docker run.

" }, "dockerLabels":{ "shape":"DockerLabelsMap", - "documentation":"

A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" + "documentation":"

A key/value map of labels to add to the container. This parameter maps to Labels in the docker create-container command and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" }, "ulimits":{ "shape":"UlimitList", - "documentation":"

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type.

Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The nofile resource limit sets a restriction on the number of open files that a container can use. The default nofile soft limit is 1024 and the default hard limit is 65535.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" + "documentation":"

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits in tthe docker create-container command and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type.

Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The nofile resource limit sets a restriction on the number of open files that a container can use. The default nofile soft limit is 65535 and the default hard limit is 65535.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information about the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the docker create-container command and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options).

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "healthCheck":{ "shape":"HealthCheck", - "documentation":"

The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" + "documentation":"

The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the docker create-container command and the HEALTHCHECK parameter of docker run.

" }, "systemControls":{ "shape":"SystemControls", - "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain longer lived connections.

" + "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in tthe docker create-container command and the --sysctl option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain longer lived connections.

" }, "resourceRequirements":{ "shape":"ResourceRequirements", @@ -1901,6 +1911,25 @@ "type":"list", "member":{"shape":"ContainerOverride"} }, + "ContainerRestartPolicy":{ + "type":"structure", + "required":["enabled"], + "members":{ + "enabled":{ + "shape":"BoxedBoolean", + "documentation":"

Specifies whether a restart policy is enabled for the container.

" + }, + "ignoredExitCodes":{ + "shape":"IntegerList", + "documentation":"

A list of exit codes that Amazon ECS will ignore and not attempt a restart on. You can specify a maximum of 50 container exit codes. By default, Amazon ECS does not ignore any exit codes.

" + }, + "restartAttemptPeriod":{ + "shape":"BoxedInteger", + "documentation":"

A period of time (in seconds) that the container must run for before a restart can be attempted. A container can be restarted only once every restartAttemptPeriod seconds. If a container isn't able to run for this time period and exits early, it will not be restarted. You can set a minimum restartAttemptPeriod of 60 seconds and a maximum restartAttemptPeriod of 1800 seconds. By default, a container must run for 300 seconds before it can be restarted.

" + } + }, + "documentation":"

You can enable a restart policy for each container defined in your task definition, to overcome transient failures faster and maintain task availability. When you enable a restart policy for a container, Amazon ECS can restart the container if it exits, without needing to replace the task. For more information, see Restart individual containers in Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide.

" + }, "ContainerStateChange":{ "type":"structure", "members":{ @@ -2444,6 +2473,10 @@ "volumeConfigurations":{ "shape":"ServiceVolumeConfigurations", "documentation":"

The details of the volume that was configuredAtLaunch. You can configure different settings like the size, throughput, volumeType, and ecryption in ServiceManagedEBSVolumeConfiguration. The name of the volume must match the name from the task definition.

" + }, + "fargateEphemeralStorage":{ + "shape":"DeploymentEphemeralStorage", + "documentation":"

The Fargate ephemeral storage settings for the deployment.

" } }, "documentation":"

The details of an Amazon ECS service deployment. This is used only when a service uses the ECS deployment controller type.

" @@ -2530,6 +2563,16 @@ "EXTERNAL" ] }, + "DeploymentEphemeralStorage":{ + "type":"structure", + "members":{ + "kmsKeyId":{ + "shape":"String", + "documentation":"

Specify an Key Management Service key ID to encrypt the ephemeral storage for deployment.

" + } + }, + "documentation":"

The amount of ephemeral storage to allocate for the deployment.

" + }, "DeploymentRolloutState":{ "type":"string", "enum":[ @@ -2901,15 +2944,15 @@ }, "driver":{ "shape":"String", - "documentation":"

The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use docker plugin ls to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see Docker plugin discovery. This parameter maps to Driver in the Create a volume section of the Docker Remote API and the xxdriver option to docker volume create.

" + "documentation":"

The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use docker plugin ls to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to Driver in the docker create-container command and the xxdriver option to docker volume create.

" }, "driverOpts":{ "shape":"StringMap", - "documentation":"

A map of Docker driver-specific options passed through. This parameter maps to DriverOpts in the Create a volume section of the Docker Remote API and the xxopt option to docker volume create.

" + "documentation":"

A map of Docker driver-specific options passed through. This parameter maps to DriverOpts in the docker create-volume command and the xxopt option to docker volume create.

" }, "labels":{ "shape":"StringMap", - "documentation":"

Custom metadata to add to your Docker volume. This parameter maps to Labels in the Create a volume section of the Docker Remote API and the xxlabel option to docker volume create.

" + "documentation":"

Custom metadata to add to your Docker volume. This parameter maps to Labels in the docker create-container command and the xxlabel option to docker volume create.

" } }, "documentation":"

This parameter is specified when you're using Docker volumes. Docker volumes are only supported when you're using the EC2 launch type. Windows containers only support the use of the local driver. To use bind mounts, specify a host instead.

" @@ -3285,7 +3328,7 @@ "members":{ "command":{ "shape":"StringList", - "documentation":"

A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD to run the command arguments directly, or CMD-SHELL to run the command with the container's default shell.

When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list of commands in double quotes and brackets.

[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]

You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.

CMD-SHELL, curl -f http://localhost/ || exit 1

An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck in the Create a container section of the Docker Remote API.

" + "documentation":"

A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD to run the command arguments directly, or CMD-SHELL to run the command with the container's default shell.

When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list of commands in double quotes and brackets.

[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]

You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.

CMD-SHELL, curl -f http://localhost/ || exit 1

An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck in tthe docker create-container command

" }, "interval":{ "shape":"BoxedInteger", @@ -3304,7 +3347,7 @@ "documentation":"

The optional grace period to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the startPeriod is off.

If a health check succeeds within the startPeriod, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.

" } }, - "documentation":"

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.

You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.

Amazon ECS performs health checks on containers with the default that launched the container instance or the task.

The following describes the possible healthStatus values for a container:

  • HEALTHY-The container health check has passed successfully.

  • UNHEALTHY-The container health check has failed.

  • UNKNOWN-The container health check is being evaluated, there's no container health check defined, or Amazon ECS doesn't have the health status of the container.

The following describes the possible healthStatus values based on the container health checker status of essential containers in the task with the following priority order (high to low):

  • UNHEALTHY-One or more essential containers have failed their health check.

  • UNKNOWN-Any essential container running within the task is in an UNKNOWN state and no other essential containers have an UNHEALTHY state.

  • HEALTHY-All essential containers within the task have passed their health checks.

Consider the following task health example with 2 containers.

  • If Container1 is UNHEALTHY and Container2 is UNKNOWN, the task health is UNHEALTHY.

  • If Container1 is UNHEALTHY and Container2 is HEALTHY, the task health is UNHEALTHY.

  • If Container1 is HEALTHY and Container2 is UNKNOWN, the task health is UNKNOWN.

  • If Container1 is HEALTHY and Container2 is HEALTHY, the task health is HEALTHY.

Consider the following task health example with 3 containers.

  • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNHEALTHY.

  • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNHEALTHY.

  • If Container1 is UNHEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is UNHEALTHY.

  • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNKNOWN.

  • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNKNOWN.

  • If Container1 is HEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is HEALTHY.

If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

The following are notes about container health check support:

  • When the Amazon ECS agent cannot connect to the Amazon ECS service, the service reports the container as UNHEALTHY.

  • The health check statuses are the \"last heard from\" response from the Amazon ECS agent. There are no assumptions made about the status of the container health checks.

  • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.

  • Container health checks are supported for Fargate tasks if you're using platform version 1.1.0 or greater. For more information, see Fargate platform versions.

  • Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.

" + "documentation":"

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.

You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.

Amazon ECS performs health checks on containers with the default that launched the container instance or the task.

The following describes the possible healthStatus values for a container:

  • HEALTHY-The container health check has passed successfully.

  • UNHEALTHY-The container health check has failed.

  • UNKNOWN-The container health check is being evaluated, there's no container health check defined, or Amazon ECS doesn't have the health status of the container.

The following describes the possible healthStatus values based on the container health checker status of essential containers in the task with the following priority order (high to low):

  • UNHEALTHY-One or more essential containers have failed their health check.

  • UNKNOWN-Any essential container running within the task is in an UNKNOWN state and no other essential containers have an UNHEALTHY state.

  • HEALTHY-All essential containers within the task have passed their health checks.

Consider the following task health example with 2 containers.

  • If Container1 is UNHEALTHY and Container2 is UNKNOWN, the task health is UNHEALTHY.

  • If Container1 is UNHEALTHY and Container2 is HEALTHY, the task health is UNHEALTHY.

  • If Container1 is HEALTHY and Container2 is UNKNOWN, the task health is UNKNOWN.

  • If Container1 is HEALTHY and Container2 is HEALTHY, the task health is HEALTHY.

Consider the following task health example with 3 containers.

  • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNHEALTHY.

  • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNHEALTHY.

  • If Container1 is UNHEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is UNHEALTHY.

  • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNKNOWN.

  • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNKNOWN.

  • If Container1 is HEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is HEALTHY.

If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

The following are notes about container health check support:

  • If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this won't cause a container to transition to an UNHEALTHY status. This is by design, to ensure that containers remain running during agent restarts or temporary unavailability. The health check status is the \"last heard from\" response from the Amazon ECS agent, so if the container was considered HEALTHY prior to the disconnect, that status will remain until the agent reconnects and another health check occurs. There are no assumptions made about the status of the container health checks.

  • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.

  • Container health checks are supported for Fargate tasks if you're using platform version 1.1.0 or greater. For more information, see Fargate platform versions.

  • Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.

" }, "HealthStatus":{ "type":"string", @@ -3427,6 +3470,10 @@ "enum":["CONTAINER_RUNTIME"] }, "Integer":{"type":"integer"}, + "IntegerList":{ + "type":"list", + "member":{"shape":"BoxedInteger"} + }, "InvalidParameterException":{ "type":"structure", "members":{ @@ -3447,14 +3494,14 @@ "members":{ "add":{ "shape":"StringList", - "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

Tasks launched on Fargate only support adding the SYS_PTRACE kernel capability.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" + "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the docker create-container command and the --cap-add option to docker run.

Tasks launched on Fargate only support adding the SYS_PTRACE kernel capability.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" }, "drop":{ "shape":"StringList", - "documentation":"

The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop in the Create a container section of the Docker Remote API and the --cap-drop option to docker run.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" + "documentation":"

The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop in the docker create-container command and the --cap-drop option to docker run.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" } }, - "documentation":"

The Linux capabilities to add or remove from the default Docker configuration for a container defined in the task definition. For more information about the default capabilities and the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run reference. For more detailed information about these Linux capabilities, see the capabilities(7) Linux manual page.

" + "documentation":"

The Linux capabilities to add or remove from the default Docker configuration for a container defined in the task definition. For more detailed information about these Linux capabilities, see the capabilities(7) Linux manual page.

" }, "KeyValuePair":{ "type":"structure", @@ -3494,7 +3541,7 @@ }, "devices":{ "shape":"DevicesList", - "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

If you're using tasks that use the Fargate launch type, the devices parameter isn't supported.

" + "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in tthe docker create-container command and the --device option to docker run.

If you're using tasks that use the Fargate launch type, the devices parameter isn't supported.

" }, "initProcessEnabled":{ "shape":"BoxedBoolean", @@ -3502,11 +3549,11 @@ }, "sharedMemorySize":{ "shape":"BoxedInteger", - "documentation":"

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

If you are using tasks that use the Fargate launch type, the sharedMemorySize parameter is not supported.

" + "documentation":"

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

If you are using tasks that use the Fargate launch type, the sharedMemorySize parameter is not supported.

" }, "tmpfs":{ "shape":"TmpfsList", - "documentation":"

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

If you're using tasks that use the Fargate launch type, the tmpfs parameter isn't supported.

" + "documentation":"

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

If you're using tasks that use the Fargate launch type, the tmpfs parameter isn't supported.

" }, "maxSwap":{ "shape":"BoxedInteger", @@ -3514,7 +3561,7 @@ }, "swappiness":{ "shape":"BoxedInteger", - "documentation":"

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 will cause swapping to not happen unless absolutely necessary. A swappiness value of 100 will cause pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter is not specified, a default value of 60 is used. If a value is not specified for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness option to docker run.

If you're using tasks that use the Fargate launch type, the swappiness parameter isn't supported.

If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't supported.

" + "documentation":"

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 will cause swapping to not happen unless absolutely necessary. A swappiness value of 100 will cause pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter is not specified, a default value of 60 is used. If a value is not specified for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness option to docker run.

If you're using tasks that use the Fargate launch type, the swappiness parameter isn't supported.

If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't supported.

" } }, "documentation":"

The Linux-specific options that are applied to the container, such as Linux KernelCapabilities.

" @@ -3914,7 +3961,7 @@ "members":{ "logDriver":{ "shape":"LogDriver", - "documentation":"

The log driver to use for the container.

For tasks on Fargate, the supported log drivers are awslogs, splunk, and awsfirelens.

For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens.

For more information about using the awslogs log driver, see Using the awslogs log driver in the Amazon Elastic Container Service Developer Guide.

For more information about using the awsfirelens log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.

" + "documentation":"

The log driver to use for the container.

For tasks on Fargate, the supported log drivers are awslogs, splunk, and awsfirelens.

For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, syslog, splunk, and awsfirelens.

For more information about using the awslogs log driver, see Send Amazon ECS logs to CloudWatch in the Amazon Elastic Container Service Developer Guide.

For more information about using the awsfirelens log driver, see Send Amazon ECS logs to an Amazon Web Services service or Amazon Web Services Partner.

If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.

" }, "options":{ "shape":"LogConfigurationOptionsMap", @@ -3925,7 +3972,7 @@ "documentation":"

The secrets to pass to the log configuration. For more information, see Specifying sensitive data in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

The log configuration for the container. This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run .

By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Understand the following when specifying a log configuration for your containers.

  • Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.

    For tasks on Fargate, the supported log drivers are awslogs, splunk, and awsfirelens.

    For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens.

  • This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.

  • For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

  • For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.

" + "documentation":"

The log configuration for the container. This parameter maps to LogConfig in the docker create-container command and the --log-driver option to docker run.

By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.

Understand the following when specifying a log configuration for your containers.

  • Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.

    For tasks on Fargate, the supported log drivers are awslogs, splunk, and awsfirelens.

    For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald,syslog, splunk, and awsfirelens.

  • This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.

  • For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

  • For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.

" }, "LogConfigurationOptionsMap":{ "type":"map", @@ -4062,6 +4109,20 @@ "max":100, "min":1 }, + "ManagedStorageConfiguration":{ + "type":"structure", + "members":{ + "kmsKeyId":{ + "shape":"String", + "documentation":"

Specify a Key Management Service key ID to encrypt the managed storage.

" + }, + "fargateEphemeralStorageKmsKeyId":{ + "shape":"String", + "documentation":"

Specify the Key Management Service key ID for the Fargate ephemeral storage.

" + } + }, + "documentation":"

The managed storage configuration for the cluster.

" + }, "ManagedTerminationProtection":{ "type":"string", "enum":[ @@ -4326,7 +4387,7 @@ "documentation":"

The port number range on the container that's bound to the dynamically mapped host port range.

The following rules apply when you specify a containerPortRange:

  • You must use either the bridge network mode or the awsvpc network mode.

  • This parameter is available for both the EC2 and Fargate launch types.

  • This parameter is available for both the Linux and Windows operating systems.

  • The container instance must have at least version 1.67.0 of the container agent and at least version 1.67.0-1 of the ecs-init package

  • You can specify a maximum of 100 port ranges per container.

  • You do not specify a hostPortRange. The value of the hostPortRange is set as follows:

    • For containers in a task with the awsvpc network mode, the hostPortRange is set to the same value as the containerPortRange. This is a static mapping strategy.

    • For containers in a task with the bridge network mode, the Amazon ECS agent finds open host ports from the default ephemeral range and passes it to docker to bind them to the container ports.

  • The containerPortRange valid values are between 1 and 65535.

  • A port can only be included in one port mapping per container.

  • You cannot specify overlapping port ranges.

  • The first port in the range must be less than last port in the range.

  • Docker recommends that you turn off the docker-proxy in the Docker daemon config file when you have a large number of ports.

    For more information, see Issue #11185 on the Github website.

    For information about how to turn off the docker-proxy in the Docker daemon config file, see Docker daemon in the Amazon ECS Developer Guide.

You can call DescribeTasks to view the hostPortRange which are the host ports that are bound to the container ports.

" } }, - "documentation":"

Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition.

If you use containers in a task with the awsvpc or host network mode, specify the exposed ports using containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Most fields of this parameter (containerPort, hostPort, protocol) maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run . If the network mode of a task definition is set to host, host ports must either be undefined or match the container port in the port mapping.

You can't expose the same container port for multiple protocols. If you attempt this, an error is returned.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the networkBindings section of DescribeTasks API responses.

" + "documentation":"

Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition.

If you use containers in a task with the awsvpc or host network mode, specify the exposed ports using containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Most fields of this parameter (containerPort, hostPort, protocol) maps to PortBindings in the docker create-container command and the --publish option to docker run. If the network mode of a task definition is set to host, host ports must either be undefined or match the container port in the port mapping.

You can't expose the same container port for multiple protocols. If you attempt this, an error is returned.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the networkBindings section of DescribeTasks API responses.

" }, "PortMappingList":{ "type":"list", @@ -4572,11 +4633,11 @@ }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" }, "networkMode":{ "shape":"NetworkMode", - "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge.

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, <default> or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

For more information, see Network settings in the Docker run reference.

" + "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge.

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, <default> or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

" }, "containerDefinitions":{ "shape":"ContainerDefinitions", @@ -4608,11 +4669,11 @@ }, "pidMode":{ "shape":"PidMode", - "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task.

If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.

If task is specified, all containers within the specified task share the same process namespace.

If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security.

This parameter is not supported for Windows containers.

This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

" + "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task.

If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.

If task is specified, all containers within the specified task share the same process namespace.

If no value is specified, the default is a private namespace for each container.

If the host PID mode is used, there's a heightened risk of undesired process namespace exposure.

This parameter is not supported for Windows containers.

This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

" }, "ipcMode":{ "shape":"IpcMode", - "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

  • For tasks that use the host IPC mode, IPC namespace related systemControls are not supported.

  • For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task.

This parameter is not supported for Windows containers or tasks run on Fargate.

" + "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

  • For tasks that use the host IPC mode, IPC namespace related systemControls are not supported.

  • For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task.

This parameter is not supported for Windows containers or tasks run on Fargate.

" }, "proxyConfiguration":{ "shape":"ProxyConfiguration", @@ -4628,7 +4689,7 @@ }, "runtimePlatform":{ "shape":"RuntimePlatform", - "documentation":"

The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.

When you specify a task definition in a service, this value must match the runtimePlatform value of the service.

" + "documentation":"

The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.

" } } }, @@ -4717,11 +4778,11 @@ "members":{ "value":{ "shape":"String", - "documentation":"

The value for the specified resource type.

If the GPU type is used, the value is the number of physical GPUs the Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for all containers in a task can't exceed the number of available GPUs on the container instance that the task is launched on.

If the InferenceAccelerator type is used, the value matches the deviceName for an InferenceAccelerator specified in a task definition.

" + "documentation":"

The value for the specified resource type.

When the type is GPU, the value is the number of physical GPUs the Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for all containers in a task can't exceed the number of available GPUs on the container instance that the task is launched on.

When the type is InferenceAccelerator, the value matches the deviceName for an InferenceAccelerator specified in a task definition.

" }, "type":{ "shape":"ResourceType", - "documentation":"

The type of resource to assign to a container. The supported values are GPU or InferenceAccelerator.

" + "documentation":"

The type of resource to assign to a container.

" } }, "documentation":"

The type and amount of a resource to assign to a container. The supported resource types are GPUs and Elastic Inference accelerators. For more information, see Working with GPUs on Amazon ECS or Working with Amazon Elastic Inference on Amazon ECS in the Amazon Elastic Container Service Developer Guide

" @@ -4803,7 +4864,7 @@ }, "startedBy":{ "shape":"String", - "documentation":"

An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 128 letters (uppercase and lowercase), numbers, hyphens (-), and underscores (_) are allowed.

If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

" + "documentation":"

An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 128 letters (uppercase and lowercase), numbers, hyphens (-), forward slash (/), and underscores (_) are allowed.

If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

" }, "tags":{ "shape":"Tags", @@ -4829,7 +4890,7 @@ "members":{ "tasks":{ "shape":"Tasks", - "documentation":"

A full description of the tasks that were run. The tasks that were successfully placed on your cluster are described here.

" + "documentation":"

A full description of the tasks that were run. The tasks that were successfully placed on your cluster are described here.

" }, "failures":{ "shape":"Failures", @@ -5151,7 +5212,7 @@ "documentation":"

The ARN of the Amazon Web Services Private Certificate Authority certificate.

" } }, - "documentation":"

An object that represents the Amazon Web Services Private Certificate Authority certificate.

" + "documentation":"

The certificate root authority that secures your service.

" }, "ServiceConnectTlsConfiguration":{ "type":"structure", @@ -5170,7 +5231,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the IAM role that's associated with the Service Connect TLS.

" } }, - "documentation":"

An object that represents the configuration for Service Connect TLS.

" + "documentation":"

The key that encrypts and decrypts your resources for Service Connect TLS.

" }, "ServiceEvent":{ "type":"structure", @@ -5436,7 +5497,7 @@ }, "startedBy":{ "shape":"String", - "documentation":"

An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens (-), and underscores (_) are allowed.

If a task is started by an Amazon ECS service, the startedBy parameter contains the deployment ID of the service that starts it.

" + "documentation":"

An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens (-), forward slash (/), and underscores (_) are allowed.

If a task is started by an Amazon ECS service, the startedBy parameter contains the deployment ID of the service that starts it.

" }, "tags":{ "shape":"Tags", @@ -5641,7 +5702,7 @@ "documentation":"

The namespaced kernel parameter to set a value for.

Valid IPC namespace values: \"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\", and Sysctls that start with \"fs.mqueue.*\"

Valid network namespace values: Sysctls that start with \"net.*\"

All of these values are supported by Fargate.

" } }, - "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain longer lived connections.

We don't recommend that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network mode. Doing this has the following disadvantages:

  • For tasks that use the awsvpc network mode including Fargate, if you set systemControls for any container, it applies to all containers in the task. If you set different systemControls for multiple containers in a single task, the container that's started last determines which systemControls take effect.

  • For tasks that use the host network mode, the network namespace systemControls aren't supported.

If you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see IPC mode.

  • For tasks that use the host IPC mode, IPC namespace systemControls aren't supported.

  • For tasks that use the task IPC mode, IPC namespace systemControls values apply to all containers within a task.

This parameter is not supported for Windows containers.

This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

" + "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in tthe docker create-container command and the --sysctl option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain longer lived connections.

We don't recommend that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network mode. Doing this has the following disadvantages:

  • For tasks that use the awsvpc network mode including Fargate, if you set systemControls for any container, it applies to all containers in the task. If you set different systemControls for multiple containers in a single task, the container that's started last determines which systemControls take effect.

  • For tasks that use the host network mode, the network namespace systemControls aren't supported.

If you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see IPC mode.

  • For tasks that use the host IPC mode, IPC namespace systemControls aren't supported.

  • For tasks that use the task IPC mode, IPC namespace systemControls values apply to all containers within a task.

This parameter is not supported for Windows containers.

This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

" }, "SystemControls":{ "type":"list", @@ -5716,7 +5777,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified target wasn't found. You can view your available container instances with ListContainerInstances. Amazon ECS container instances are cluster-specific and Region-specific.

", + "documentation":"

The specified target wasn't found. You can view your available container instances with ListContainerInstances. Amazon ECS container instances are cluster-specific and Region-specific.

", "exception":true }, "TargetType":{ @@ -5869,6 +5930,10 @@ "ephemeralStorage":{ "shape":"EphemeralStorage", "documentation":"

The ephemeral storage settings for the task.

" + }, + "fargateEphemeralStorage":{ + "shape":"TaskEphemeralStorage", + "documentation":"

The Fargate ephemeral storage settings for the task.

" } }, "documentation":"

Details on a task in a cluster.

" @@ -5890,15 +5955,15 @@ }, "taskRoleArn":{ "shape":"String", - "documentation":"

The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the task permission to call Amazon Web Services APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code to use the feature. For more information, see Windows IAM roles for tasks in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the task permission to call Amazon Web Services APIs on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" }, "networkMode":{ "shape":"NetworkMode", - "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge.

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, <default> or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

For more information, see Network settings in the Docker run reference.

" + "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge.

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, <default> or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

" }, "revision":{ "shape":"Integer", @@ -5934,7 +5999,7 @@ }, "cpu":{ "shape":"String", - "documentation":"

The number of cpu units used by the task. If you use the EC2 launch type, this field is optional. Any value can be used. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines your range of valid values for the memory parameter.

The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate.

  • 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

  • 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

  • 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

  • 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

  • 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

  • 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments

    This option requires Linux platform 1.4.0 or later.

  • 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments

    This option requires Linux platform 1.4.0 or later.

" + "documentation":"

The number of cpu units used by the task. If you use the EC2 launch type, this field is optional. Any value can be used. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines your range of valid values for the memory parameter.

If you use the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs).

The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate.

  • 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

  • 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

  • 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

  • 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

  • 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

  • 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments

    This option requires Linux platform 1.4.0 or later.

  • 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments

    This option requires Linux platform 1.4.0 or later.

" }, "memory":{ "shape":"String", @@ -5946,11 +6011,11 @@ }, "pidMode":{ "shape":"PidMode", - "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task.

If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.

If task is specified, all containers within the specified task share the same process namespace.

If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security.

This parameter is not supported for Windows containers.

This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

" + "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task.

If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.

If task is specified, all containers within the specified task share the same process namespace.

If no value is specified, the default is a private namespace for each container.

If the host PID mode is used, there's a heightened risk of undesired process namespace exposure.

This parameter is not supported for Windows containers.

This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

" }, "ipcMode":{ "shape":"IpcMode", - "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

  • For tasks that use the host IPC mode, IPC namespace related systemControls are not supported.

  • For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task.

This parameter is not supported for Windows containers or tasks run on Fargate.

" + "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

  • For tasks that use the host IPC mode, IPC namespace related systemControls are not supported.

  • For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task.

This parameter is not supported for Windows containers or tasks run on Fargate.

" }, "proxyConfiguration":{ "shape":"ProxyConfiguration", @@ -6025,6 +6090,20 @@ "DELETE_IN_PROGRESS" ] }, + "TaskEphemeralStorage":{ + "type":"structure", + "members":{ + "sizeInGiB":{ + "shape":"Integer", + "documentation":"

The total amount, in GiB, of the ephemeral storage to set for the task. The minimum supported value is 20 GiB and the maximum supported value is
 200 GiB.

" + }, + "kmsKeyId":{ + "shape":"String", + "documentation":"

Specify an Key Management Service key ID to encrypt the ephemeral storage for the task.

" + } + }, + "documentation":"

The amount of ephemeral storage to allocate for the task.

" + }, "TaskField":{ "type":"string", "enum":["TAGS"] @@ -6235,6 +6314,10 @@ "tags":{ "shape":"Tags", "documentation":"

The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + }, + "fargateEphemeralStorage":{ + "shape":"DeploymentEphemeralStorage", + "documentation":"

The Fargate ephemeral storage settings for the task set.

" } }, "documentation":"

Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL deployment. An Amazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic.

" @@ -6361,7 +6444,7 @@ "documentation":"

The hard limit for the ulimit type.

" } }, - "documentation":"

The ulimit settings to pass to the container.

Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The nofile resource limit sets a restriction on the number of open files that a container can use. The default nofile soft limit is 1024 and the default hard limit is 65535.

You can specify the ulimit settings for a container in a task definition.

" + "documentation":"

The ulimit settings to pass to the container.

Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The nofile resource limit sets a restriction on the number of open files that a container can use. The default nofile soft limit is 65535 and the default hard limit is 65535.

You can specify the ulimit settings for a container in a task definition.

" }, "UlimitList":{ "type":"list", diff --git a/botocore/data/efs/2015-02-01/service-2.json b/botocore/data/efs/2015-02-01/service-2.json index 0d837216ad..3224d1ed1a 100644 --- a/botocore/data/efs/2015-02-01/service-2.json +++ b/botocore/data/efs/2015-02-01/service-2.json @@ -4,11 +4,13 @@ "apiVersion":"2015-02-01", "endpointPrefix":"elasticfilesystem", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"EFS", "serviceFullName":"Amazon Elastic File System", "serviceId":"EFS", "signatureVersion":"v4", - "uid":"elasticfilesystem-2015-02-01" + "uid":"elasticfilesystem-2015-02-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateAccessPoint":{ diff --git a/botocore/data/eks/2017-11-01/service-2.json b/botocore/data/eks/2017-11-01/service-2.json index 49df241876..840ed07ec3 100644 --- a/botocore/data/eks/2017-11-01/service-2.json +++ b/botocore/data/eks/2017-11-01/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"eks", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"Amazon EKS", "serviceFullName":"Amazon Elastic Kubernetes Service", "serviceId":"EKS", "signatureVersion":"v4", "signingName":"eks", - "uid":"eks-2017-11-01" + "uid":"eks-2017-11-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateAccessPolicy":{ @@ -118,7 +120,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"UnsupportedAvailabilityZoneException"} ], - "documentation":"

Creates an Amazon EKS control plane.

The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single tenant and unique. It runs on its own set of Amazon EC2 instances.

The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support kubectl exec, logs, and proxy data flows).

Amazon EKS nodes run in your Amazon Web Services account and connect to your cluster's control plane over the Kubernetes API server endpoint and a certificate file that is created for your cluster.

You can use the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

You can use the logging parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.

In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch nodes into your cluster. For more information, see Managing Cluster Authentication and Launching Amazon EKS nodes in the Amazon EKS User Guide.

" + "documentation":"

Creates an Amazon EKS control plane.

The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single tenant and unique. It runs on its own set of Amazon EC2 instances.

The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support kubectl exec, logs, and proxy data flows).

Amazon EKS nodes run in your Amazon Web Services account and connect to your cluster's control plane over the Kubernetes API server endpoint and a certificate file that is created for your cluster.

You can use the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

You can use the logging parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.

In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch nodes into your cluster. For more information, see Allowing users to access your cluster and Launching Amazon EKS nodes in the Amazon EKS User Guide.

" }, "CreateEksAnywhereSubscription":{ "name":"CreateEksAnywhereSubscription", @@ -172,7 +174,7 @@ {"shape":"ServerException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates a managed node group for an Amazon EKS cluster.

You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using a launch template. For more information about using launch templates, see Launch template support.

An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by Amazon Web Services for an Amazon EKS cluster. For more information, see Managed node groups in the Amazon EKS User Guide.

Windows AMI types are only supported for commercial Amazon Web Services Regions that support Windows on Amazon EKS.

" + "documentation":"

Creates a managed node group for an Amazon EKS cluster.

You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using a launch template. For more information about using launch templates, see Customizing managed nodes with launch templates.

An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by Amazon Web Services for an Amazon EKS cluster. For more information, see Managed node groups in the Amazon EKS User Guide.

Windows AMI types are only supported for commercial Amazon Web Services Regions that support Windows on Amazon EKS.

" }, "CreatePodIdentityAssociation":{ "name":"CreatePodIdentityAssociation", @@ -960,7 +962,9 @@ "WINDOWS_CORE_2022_x86_64", "WINDOWS_FULL_2022_x86_64", "AL2023_x86_64_STANDARD", - "AL2023_ARM_64_STANDARD" + "AL2023_ARM_64_STANDARD", + "AL2023_x86_64_NEURON", + "AL2023_x86_64_NVIDIA" ] }, "AccessConfigResponse":{ @@ -1133,6 +1137,10 @@ "configurationValues":{ "shape":"String", "documentation":"

The configuration values that you provided.

" + }, + "podIdentityAssociations":{ + "shape":"StringList", + "documentation":"

An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster.

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" } }, "documentation":"

An Amazon EKS add-on. For more information, see Amazon EKS add-ons in the Amazon EKS User Guide.

" @@ -1205,13 +1213,55 @@ "ConfigurationConflict", "AdmissionRequestDenied", "UnsupportedAddonModification", - "K8sResourceNotFound" + "K8sResourceNotFound", + "AddonSubscriptionNeeded", + "AddonPermissionFailure" ] }, "AddonIssueList":{ "type":"list", "member":{"shape":"AddonIssue"} }, + "AddonPodIdentityAssociations":{ + "type":"structure", + "required":[ + "serviceAccount", + "roleArn" + ], + "members":{ + "serviceAccount":{ + "shape":"String", + "documentation":"

The name of a Kubernetes Service Account.

" + }, + "roleArn":{ + "shape":"String", + "documentation":"

The ARN of an IAM Role.

" + } + }, + "documentation":"

A type of Pod Identity Association owned by an Amazon EKS Add-on.

Each EKS Pod Identity Association maps a role to a service account in a namespace in the cluster.

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" + }, + "AddonPodIdentityAssociationsList":{ + "type":"list", + "member":{"shape":"AddonPodIdentityAssociations"} + }, + "AddonPodIdentityConfiguration":{ + "type":"structure", + "members":{ + "serviceAccount":{ + "shape":"String", + "documentation":"

The Kubernetes Service Account name used by the addon.

" + }, + "recommendedManagedPolicies":{ + "shape":"StringList", + "documentation":"

A suggested IAM Policy for the addon.

" + } + }, + "documentation":"

Information about how to configure IAM for an Addon.

" + }, + "AddonPodIdentityConfigurationList":{ + "type":"list", + "member":{"shape":"AddonPodIdentityConfiguration"} + }, "AddonStatus":{ "type":"string", "enum":[ @@ -1243,6 +1293,10 @@ "requiresConfiguration":{ "shape":"Boolean", "documentation":"

Whether the add-on requires configuration.

" + }, + "requiresIamPermissions":{ + "shape":"Boolean", + "documentation":"

Indicates if the Addon requires IAM Permissions to operate, such as networking permissions.

" } }, "documentation":"

Information about an add-on version.

" @@ -1449,7 +1503,8 @@ "type":"string", "enum":[ "ON_DEMAND", - "SPOT" + "SPOT", + "CAPACITY_BLOCK" ] }, "Category":{ @@ -1597,7 +1652,7 @@ }, "health":{ "shape":"ClusterHealth", - "documentation":"

An object representing the health of your local Amazon EKS cluster on an Amazon Web Services Outpost. This object isn't available for clusters on the Amazon Web Services cloud.

" + "documentation":"

An object representing the health of your Amazon EKS cluster.

" }, "outpostConfig":{ "shape":"OutpostConfigResponse", @@ -1606,6 +1661,10 @@ "accessConfig":{ "shape":"AccessConfigResponse", "documentation":"

The access configuration for the cluster.

" + }, + "upgradePolicy":{ + "shape":"UpgradePolicyResponse", + "documentation":"

This value indicates if extended support is enabled or disabled for the cluster.

Learn more about EKS Extended Support in the EKS User Guide.

" } }, "documentation":"

An object representing an Amazon EKS cluster.

" @@ -1615,10 +1674,10 @@ "members":{ "issues":{ "shape":"ClusterIssueList", - "documentation":"

An object representing the health issues of your local Amazon EKS cluster on an Amazon Web Services Outpost.

" + "documentation":"

An object representing the health issues of your Amazon EKS cluster.

" } }, - "documentation":"

An object representing the health of your local Amazon EKS cluster on an Amazon Web Services Outpost. You can't use this API with an Amazon EKS cluster on the Amazon Web Services cloud.

" + "documentation":"

An object representing the health of your Amazon EKS cluster.

" }, "ClusterIssue":{ "type":"structure", @@ -1636,7 +1695,7 @@ "documentation":"

The resource IDs that the issue relates to.

" } }, - "documentation":"

An issue with your local Amazon EKS cluster on an Amazon Web Services Outpost. You can't use this API with an Amazon EKS cluster on the Amazon Web Services cloud.

" + "documentation":"

An issue with your Amazon EKS cluster.

" }, "ClusterIssueCode":{ "type":"string", @@ -1884,6 +1943,10 @@ "configurationValues":{ "shape":"String", "documentation":"

The set of configuration values for the add-on that's created. The values that you provide are validated against the schema returned by DescribeAddonConfiguration.

" + }, + "podIdentityAssociations":{ + "shape":"AddonPodIdentityAssociationsList", + "documentation":"

An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role.

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" } } }, @@ -1945,6 +2008,14 @@ "accessConfig":{ "shape":"CreateAccessConfigRequest", "documentation":"

The access configuration for the cluster.

" + }, + "bootstrapSelfManagedAddons":{ + "shape":"BoxedBoolean", + "documentation":"

If you set this value to False when creating a cluster, the default networking add-ons will not be installed.

The default networking addons include vpc-cni, coredns, and kube-proxy.

Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.

" + }, + "upgradePolicy":{ + "shape":"UpgradePolicyRequest", + "documentation":"

New clusters, by default, have extended support enabled. You can disable extended support when creating a cluster by setting this value to STANDARD.

" } } }, @@ -2079,27 +2150,27 @@ }, "diskSize":{ "shape":"BoxedInteger", - "documentation":"

The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows. If you specify launchTemplate, then don't specify diskSize, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows. If you specify launchTemplate, then don't specify diskSize, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" }, "subnets":{ "shape":"StringList", - "documentation":"

The subnets to use for the Auto Scaling group that is created for your node group. If you specify launchTemplate, then don't specify SubnetId in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The subnets to use for the Auto Scaling group that is created for your node group. If you specify launchTemplate, then don't specify SubnetId in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" }, "instanceTypes":{ "shape":"StringList", - "documentation":"

Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the amiType parameter. If you specify launchTemplate, then you can specify zero or one instance type in your launch template or you can specify 0-20 instance types for instanceTypes. If however, you specify an instance type in your launch template and specify any instanceTypes, the node group deployment will fail. If you don't specify an instance type in a launch template or for instanceTypes, then t3.medium is used, by default. If you specify Spot for capacityType, then we recommend specifying multiple values for instanceTypes. For more information, see Managed node group capacity types and Launch template support in the Amazon EKS User Guide.

" + "documentation":"

Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the amiType parameter. If you specify launchTemplate, then you can specify zero or one instance type in your launch template or you can specify 0-20 instance types for instanceTypes. If however, you specify an instance type in your launch template and specify any instanceTypes, the node group deployment will fail. If you don't specify an instance type in a launch template or for instanceTypes, then t3.medium is used, by default. If you specify Spot for capacityType, then we recommend specifying multiple values for instanceTypes. For more information, see Managed node group capacity types and Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" }, "amiType":{ "shape":"AMITypes", - "documentation":"

The AMI type for your node group. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify amiType, or the node group deployment will fail. If your launch template uses a Windows custom AMI, then add eks:kube-proxy-windows to your Windows nodes rolearn in the aws-auth ConfigMap. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The AMI type for your node group. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify amiType, or the node group deployment will fail. If your launch template uses a Windows custom AMI, then add eks:kube-proxy-windows to your Windows nodes rolearn in the aws-auth ConfigMap. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" }, "remoteAccess":{ "shape":"RemoteAccessConfig", - "documentation":"

The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify launchTemplate, then don't specify remoteAccess, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify launchTemplate, then don't specify remoteAccess, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" }, "nodeRole":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide . If you specify launchTemplate, then don't specify IamInstanceProfile in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide . If you specify launchTemplate, then don't specify IamInstanceProfile in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" }, "labels":{ "shape":"labelsMap", @@ -2120,7 +2191,7 @@ }, "launchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"

An object representing a node group's launch template specification. If specified, then do not specify instanceTypes, diskSize, or remoteAccess and make sure that the launch template meets the requirements in launchTemplateSpecification.

" + "documentation":"

An object representing a node group's launch template specification. When using this object, don't directly specify instanceTypes, diskSize, or remoteAccess. Make sure that the launch template meets the requirements in launchTemplateSpecification. Also refer to Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" }, "updateConfig":{ "shape":"NodegroupUpdateConfig", @@ -2132,11 +2203,11 @@ }, "version":{ "shape":"String", - "documentation":"

The Kubernetes version to use for your managed nodes. By default, the Kubernetes version of the cluster is used, and this is the only accepted specified value. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The Kubernetes version to use for your managed nodes. By default, the Kubernetes version of the cluster is used, and this is the only accepted specified value. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" }, "releaseVersion":{ "shape":"String", - "documentation":"

The AMI version of the Amazon EKS optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide.

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The AMI version of the Amazon EKS optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide.

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" } } }, @@ -2500,6 +2571,10 @@ "configurationSchema":{ "shape":"String", "documentation":"

A JSON schema that's used to validate the configuration values you provide when an add-on is created or updated.

" + }, + "podIdentityConfiguration":{ + "shape":"AddonPodIdentityConfigurationList", + "documentation":"

The Kubernetes service account name used by the addon, and any suggested IAM policies. Use this information to create an IAM Role for the Addon.

" } } }, @@ -3094,10 +3169,55 @@ "tags":{ "shape":"TagMap", "documentation":"

Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources.

" + }, + "health":{ + "shape":"FargateProfileHealth", + "documentation":"

The health status of the Fargate profile. If there are issues with your Fargate profile's health, they are listed here.

" } }, "documentation":"

An object representing an Fargate profile.

" }, + "FargateProfileHealth":{ + "type":"structure", + "members":{ + "issues":{ + "shape":"FargateProfileIssueList", + "documentation":"

Any issues that are associated with the Fargate profile.

" + } + }, + "documentation":"

The health status of the Fargate profile. If there are issues with your Fargate profile's health, they are listed here.

" + }, + "FargateProfileIssue":{ + "type":"structure", + "members":{ + "code":{ + "shape":"FargateProfileIssueCode", + "documentation":"

A brief description of the error.

" + }, + "message":{ + "shape":"String", + "documentation":"

The error message associated with the issue.

" + }, + "resourceIds":{ + "shape":"StringList", + "documentation":"

The Amazon Web Services resources that are affected by this issue.

" + } + }, + "documentation":"

An issue that is associated with the Fargate profile.

" + }, + "FargateProfileIssueCode":{ + "type":"string", + "enum":[ + "PodExecutionRoleAlreadyInUse", + "AccessDenied", + "ClusterUnreachable", + "InternalFailure" + ] + }, + "FargateProfileIssueList":{ + "type":"list", + "member":{"shape":"FargateProfileIssue"} + }, "FargateProfileLabel":{ "type":"map", "key":{"shape":"String"}, @@ -3494,7 +3614,7 @@ "documentation":"

The ID of the launch template.

You must specify either the launch template ID or the launch template name in the request, but not both.

" } }, - "documentation":"

An object representing a node group launch template specification. The launch template can't include SubnetId , IamInstanceProfile , RequestSpotInstances , HibernationOptions , or TerminateInstances , or the node group deployment or update will fail. For more information about launch templates, see CreateLaunchTemplate in the Amazon EC2 API Reference. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

You must specify either the launch template ID or the launch template name in the request, but not both.

" + "documentation":"

An object representing a node group launch template specification. The launch template can't include SubnetId , IamInstanceProfile , RequestSpotInstances , HibernationOptions , or TerminateInstances , or the node group deployment or update will fail. For more information about launch templates, see CreateLaunchTemplate in the Amazon EC2 API Reference. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

You must specify either the launch template ID or the launch template name in the request, but not both.

" }, "ListAccessEntriesRequest":{ "type":"structure", @@ -4539,6 +4659,10 @@ "modifiedAt":{ "shape":"Timestamp", "documentation":"

The most recent timestamp that the association was modified at

" + }, + "ownerArn":{ + "shape":"String", + "documentation":"

If defined, the Pod Identity Association is owned by an Amazon EKS Addon.

" } }, "documentation":"

Amazon EKS Pod Identity associations provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.

" @@ -4569,6 +4693,10 @@ "associationId":{ "shape":"String", "documentation":"

The ID of the association.

" + }, + "ownerArn":{ + "shape":"String", + "documentation":"

If defined, the Pod Identity Association is owned by an Amazon EKS Addon.

" } }, "documentation":"

The summarized description of the association.

Each summary is simplified by removing these fields compared to the full PodIdentityAssociation :

  • The IAM role: roleArn

  • The timestamp that the association was created at: createdAt

  • The most recent timestamp that the association was modified at:. modifiedAt

  • The tags on the association: tags

" @@ -4781,6 +4909,13 @@ "type":"list", "member":{"shape":"String"} }, + "SupportType":{ + "type":"string", + "enum":[ + "STANDARD", + "EXTENDED" + ] + }, "TagKey":{ "type":"string", "documentation":"

One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.

", @@ -5029,6 +5164,10 @@ "configurationValues":{ "shape":"String", "documentation":"

The set of configuration values for the add-on that's created. The values that you provide are validated against the schema returned by DescribeAddonConfiguration.

" + }, + "podIdentityAssociations":{ + "shape":"AddonPodIdentityAssociationsList", + "documentation":"

An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted.

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" } } }, @@ -5061,6 +5200,10 @@ "accessConfig":{ "shape":"UpdateAccessConfigRequest", "documentation":"

The access configuration for the cluster.

" + }, + "upgradePolicy":{ + "shape":"UpgradePolicyRequest", + "documentation":"

You can enable or disable extended support for clusters currently on standard support. You cannot disable extended support once it starts. You must enable extended support before your cluster exits standard support.

" } } }, @@ -5219,11 +5362,11 @@ }, "version":{ "shape":"String", - "documentation":"

The Kubernetes version to update to. If no version is specified, then the Kubernetes version of the node group does not change. You can specify the Kubernetes version of the cluster to update the node group to the latest AMI version of the cluster's Kubernetes version. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version, or the node group update will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The Kubernetes version to update to. If no version is specified, then the Kubernetes version of the node group does not change. You can specify the Kubernetes version of the cluster to update the node group to the latest AMI version of the cluster's Kubernetes version. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version, or the node group update will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" }, "releaseVersion":{ "shape":"String", - "documentation":"

The AMI version of the Amazon EKS optimized AMI to use for the update. By default, the latest available AMI version for the node group's Kubernetes version is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide.

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group update will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The AMI version of the Amazon EKS optimized AMI to use for the update. By default, the latest available AMI version for the node group's Kubernetes version is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide.

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group update will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" }, "launchTemplate":{ "shape":"LaunchTemplateSpecification", @@ -5289,7 +5432,9 @@ "ConfigurationValues", "SecurityGroups", "Subnets", - "AuthenticationMode" + "AuthenticationMode", + "PodIdentityAssociations", + "UpgradePolicy" ] }, "UpdateParams":{ @@ -5370,9 +5515,30 @@ "AssociateEncryptionConfig", "AddonUpdate", "VpcConfigUpdate", - "AccessConfigUpdate" + "AccessConfigUpdate", + "UpgradePolicyUpdate" ] }, + "UpgradePolicyRequest":{ + "type":"structure", + "members":{ + "supportType":{ + "shape":"SupportType", + "documentation":"

If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support.

Learn more about EKS Extended Support in the EKS User Guide.

" + } + }, + "documentation":"

The support policy to use for the cluster. Extended support allows you to remain on specific Kubernetes versions for longer. Clusters in extended support have higher costs. The default value is EXTENDED. Use STANDARD to disable extended support.

Learn more about EKS Extended Support in the EKS User Guide.

" + }, + "UpgradePolicyResponse":{ + "type":"structure", + "members":{ + "supportType":{ + "shape":"SupportType", + "documentation":"

If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support.

Learn more about EKS Extended Support in the EKS User Guide.

" + } + }, + "documentation":"

This value indicates if extended support is enabled or disabled for the cluster.

Learn more about EKS Extended Support in the EKS User Guide.

" + }, "VpcConfigRequest":{ "type":"structure", "members":{ diff --git a/botocore/data/elasticache/2015-02-02/service-2.json b/botocore/data/elasticache/2015-02-02/service-2.json index 2b9c916def..2ccfdc1d30 100644 --- a/botocore/data/elasticache/2015-02-02/service-2.json +++ b/botocore/data/elasticache/2015-02-02/service-2.json @@ -4,11 +4,13 @@ "apiVersion":"2015-02-02", "endpointPrefix":"elasticache", "protocol":"query", + "protocols":["query"], "serviceFullName":"Amazon ElastiCache", "serviceId":"ElastiCache", "signatureVersion":"v4", "uid":"elasticache-2015-02-02", - "xmlNamespace":"http://elasticache.amazonaws.com/doc/2015-02-02/" + "xmlNamespace":"http://elasticache.amazonaws.com/doc/2015-02-02/", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddTagsToResource":{ @@ -135,7 +137,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a copy of an existing serverless cache’s snapshot. Available for Redis only.

" + "documentation":"

Creates a copy of an existing serverless cache’s snapshot. Available for Redis OSS and Serverless Memcached only.

" }, "CopySnapshot":{ "name":"CopySnapshot", @@ -157,7 +159,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Makes a copy of an existing snapshot.

This operation is valid for Redis only.

Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control.

You could receive the following error messages.

Error Messages

  • Error Message: The S3 bucket %s is outside of the region.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s does not exist.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s is not owned by the authenticated user.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.

    Solution: Contact your system administrator to get the needed permissions.

  • Error Message: The S3 bucket %s already contains an object with key %s.

    Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName.

  • Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket.

    Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket.

    Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket.

    Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

" + "documentation":"

Makes a copy of an existing snapshot.

This operation is valid for Redis OSS only.

Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control.

You could receive the following error messages.

Error Messages

  • Error Message: The S3 bucket %s is outside of the region.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s does not exist.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s is not owned by the authenticated user.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.

    Solution: Contact your system administrator to get the needed permissions.

  • Error Message: The S3 bucket %s already contains an object with key %s.

    Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName.

  • Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket.

    Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket.

    Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket.

    Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

" }, "CreateCacheCluster":{ "name":"CreateCacheCluster", @@ -186,7 +188,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached or Redis.

This operation is not supported for Redis (cluster mode enabled) clusters.

" + "documentation":"

Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached or Redis OSS.

This operation is not supported for Redis OSS (cluster mode enabled) clusters.

" }, "CreateCacheParameterGroup":{ "name":"CreateCacheParameterGroup", @@ -268,7 +270,7 @@ {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis, you can create cross-region read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore.

  • The GlobalReplicationGroupIdSuffix is the name of the Global datastore.

  • The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster.

" + "documentation":"

Global Datastore for Redis OSS offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis OSS, you can create cross-region read replica clusters for ElastiCache (Redis OSS) to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore.

  • The GlobalReplicationGroupIdSuffix is the name of the Global datastore.

  • The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster.

" }, "CreateReplicationGroup":{ "name":"CreateReplicationGroup", @@ -302,7 +304,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore.

A Redis (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed.

The node or shard limit can be increased to a maximum of 500 per cluster if the Redis engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster.

To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type.

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' scaling. For more information, see Scaling ElastiCache for Redis Clusters in the ElastiCache User Guide.

This operation is valid for Redis only.

" + "documentation":"

Creates a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication group.

This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore.

A Redis OSS (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed.

The node or shard limit can be increased to a maximum of 500 per cluster if the Redis OSS engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster.

To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type.

When a Redis OSS (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can use ElastiCache (Redis OSS) scaling. For more information, see Scaling ElastiCache (Redis OSS) Clusters in the ElastiCache User Guide.

This operation is valid for Redis OSS only.

" }, "CreateServerlessCache":{ "name":"CreateServerlessCache", @@ -351,7 +353,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis only.

" + "documentation":"

This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis OSS and Serverless Memcached only.

" }, "CreateSnapshot":{ "name":"CreateSnapshot", @@ -376,7 +378,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Creates a copy of an entire cluster or replication group at a specific moment in time.

This operation is valid for Redis only.

" + "documentation":"

Creates a copy of an entire cluster or replication group at a specific moment in time.

This operation is valid for Redis OSS only.

" }, "CreateUser":{ "name":"CreateUser", @@ -398,7 +400,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"TagQuotaPerResourceExceeded"} ], - "documentation":"

For Redis engine version 6.0 onwards: Creates a Redis user. For more information, see Using Role Based Access Control (RBAC).

" + "documentation":"

For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user. For more information, see Using Role Based Access Control (RBAC).

" }, "CreateUserGroup":{ "name":"CreateUserGroup", @@ -421,7 +423,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TagQuotaPerResourceExceeded"} ], - "documentation":"

For Redis engine version 6.0 onwards: Creates a Redis user group. For more information, see Using Role Based Access Control (RBAC)

" + "documentation":"

For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user group. For more information, see Using Role Based Access Control (RBAC)

" }, "DecreaseNodeGroupsInGlobalReplicationGroup":{ "name":"DecreaseNodeGroupsInGlobalReplicationGroup", @@ -467,7 +469,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Dynamically decreases the number of replicas in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time.

" + "documentation":"

Dynamically decreases the number of replicas in a Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time.

" }, "DeleteCacheCluster":{ "name":"DeleteCacheCluster", @@ -489,7 +491,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation.

This operation is not valid for:

  • Redis (cluster mode enabled) clusters

  • Redis (cluster mode disabled) clusters

  • A cluster that is the last read replica of a replication group

  • A cluster that is the primary node of a replication group

  • A node group (shard) that has Multi-AZ mode enabled

  • A cluster from a Redis (cluster mode enabled) replication group

  • A cluster that is not in the available state

" + "documentation":"

Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation.

This operation is not valid for:

  • Redis OSS (cluster mode enabled) clusters

  • Redis OSS (cluster mode disabled) clusters

  • A cluster that is the last read replica of a replication group

  • A cluster that is the primary node of a replication group

  • A node group (shard) that has Multi-AZ mode enabled

  • A cluster from a Redis OSS (cluster mode enabled) replication group

  • A cluster that is not in the available state

" }, "DeleteCacheParameterGroup":{ "name":"DeleteCacheParameterGroup", @@ -572,7 +574,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Deletes an existing replication group. By default, this operation deletes the entire replication group, including the primary/primaries and all of the read replicas. If the replication group has only one primary, you can optionally delete only the read replicas, while retaining the primary by setting RetainPrimaryCluster=true.

When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this operation.

This operation is valid for Redis only.

" + "documentation":"

Deletes an existing replication group. By default, this operation deletes the entire replication group, including the primary/primaries and all of the read replicas. If the replication group has only one primary, you can optionally delete only the read replicas, while retaining the primary by setting RetainPrimaryCluster=true.

When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this operation.

  • CreateSnapshot permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied exception.

  • This operation is valid for Redis OSS only.

" }, "DeleteServerlessCache":{ "name":"DeleteServerlessCache", @@ -594,7 +596,7 @@ {"shape":"InvalidCredentialsException"}, {"shape":"ServiceLinkedRoleNotFoundFault"} ], - "documentation":"

Deletes a specified existing serverless cache.

" + "documentation":"

Deletes a specified existing serverless cache.

CreateServerlessCacheSnapshot permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied exception.

" }, "DeleteServerlessCacheSnapshot":{ "name":"DeleteServerlessCacheSnapshot", @@ -613,7 +615,7 @@ {"shape":"InvalidServerlessCacheSnapshotStateFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Deletes an existing serverless cache snapshot. Available for Redis only.

" + "documentation":"

Deletes an existing serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" }, "DeleteSnapshot":{ "name":"DeleteSnapshot", @@ -632,7 +634,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this operation.

This operation is valid for Redis only.

" + "documentation":"

Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this operation.

This operation is valid for Redis OSS only.

" }, "DeleteUser":{ "name":"DeleteUser", @@ -652,7 +654,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"DefaultUserAssociatedToUserGroupFault"} ], - "documentation":"

For Redis engine version 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC).

" + "documentation":"

For Redis OSS engine version 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC).

" }, "DeleteUserGroup":{ "name":"DeleteUserGroup", @@ -671,7 +673,7 @@ {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

For Redis engine version 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC).

" + "documentation":"

For Redis OSS engine version 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC).

" }, "DescribeCacheClusters":{ "name":"DescribeCacheClusters", @@ -842,7 +844,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups.

This operation is valid for Redis only.

" + "documentation":"

Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups.

This operation is valid for Redis OSS only.

" }, "DescribeReservedCacheNodes":{ "name":"DescribeReservedCacheNodes", @@ -897,7 +899,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Redis only.

" + "documentation":"

Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Redis OSS and Serverless Memcached only.

" }, "DescribeServerlessCaches":{ "name":"DescribeServerlessCaches", @@ -952,7 +954,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster.

This operation is valid for Redis only.

" + "documentation":"

Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster.

This operation is valid for Redis OSS only.

" }, "DescribeUpdateActions":{ "name":"DescribeUpdateActions", @@ -1043,7 +1045,7 @@ {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis only.

" + "documentation":"

Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis OSS only.

" }, "FailoverGlobalReplicationGroup":{ "name":"FailoverGlobalReplicationGroup", @@ -1107,7 +1109,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Dynamically increases the number of replicas in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time.

" + "documentation":"

Dynamically increases the number of replicas in a Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time.

" }, "ListAllowedNodeTypeModifications":{ "name":"ListAllowedNodeTypeModifications", @@ -1126,7 +1128,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Lists all available node types that you can scale your Redis cluster's or replication group's current node type.

When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation.

" + "documentation":"

Lists all available node types that you can scale your Redis OSS cluster's or replication group's current node type.

When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1271,7 +1273,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Modifies the settings for a replication group. This is limited to Redis 7 and newer.

This operation is valid for Redis only.

" + "documentation":"

Modifies the settings for a replication group. This is limited to Redis OSS 7 and newer.

This operation is valid for Redis OSS only.

" }, "ModifyReplicationGroupShardConfiguration":{ "name":"ModifyReplicationGroupShardConfiguration", @@ -1383,7 +1385,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis or Managing Costs with Reserved Nodes for Memcached.

" + "documentation":"

Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis OSS or Managing Costs with Reserved Nodes for Memcached.

" }, "RebalanceSlotsInGlobalReplicationGroup":{ "name":"RebalanceSlotsInGlobalReplicationGroup", @@ -1418,7 +1420,7 @@ {"shape":"InvalidCacheClusterStateFault"}, {"shape":"CacheClusterNotFoundFault"} ], - "documentation":"

Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING.

The reboot causes the contents of the cache (for each cache node being rebooted) to be lost.

When the reboot is complete, a cluster event is created.

Rebooting a cluster is currently supported on Memcached and Redis (cluster mode disabled) clusters. Rebooting is not supported on Redis (cluster mode enabled) clusters.

If you make changes to parameters that require a Redis (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.

" + "documentation":"

Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING.

The reboot causes the contents of the cache (for each cache node being rebooted) to be lost.

When the reboot is complete, a cluster event is created.

Rebooting a cluster is currently supported on Memcached and Redis OSS (cluster mode disabled) clusters. Rebooting is not supported on Redis OSS (cluster mode enabled) clusters.

If you make changes to parameters that require a Redis OSS (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.

" }, "RemoveTagsFromResource":{ "name":"RemoveTagsFromResource", @@ -1532,7 +1534,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).

This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API.

Note the following

  • A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period.

  • If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.

  • If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.

  • To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:

    1. Replication group message: Test Failover API called for node group <node-group-id>

    2. Cache cluster message: Failover from primary node <primary-node-id> to replica node <node-id> completed

    3. Replication group message: Failover from primary node <primary-node-id> to replica node <node-id> completed

    4. Cache cluster message: Recovering cache nodes <node-id>

    5. Cache cluster message: Finished recovery for cache nodes <node-id>

    For more information see:

Also see, Testing Multi-AZ in the ElastiCache User Guide.

" + "documentation":"

Represents the input of a TestFailover operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).

This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API.

Note the following

  • A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period.

  • If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.

  • If calling this operation multiple times on different shards in the same Redis OSS (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.

  • To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:

    1. Replication group message: Test Failover API called for node group <node-group-id>

    2. Cache cluster message: Failover from primary node <primary-node-id> to replica node <node-id> completed

    3. Replication group message: Failover from primary node <primary-node-id> to replica node <node-id> completed

    4. Cache cluster message: Recovering cache nodes <node-id>

    5. Cache cluster message: Finished recovery for cache nodes <node-id>

    For more information see:

Also see, Testing Multi-AZ in the ElastiCache User Guide.

" }, "TestMigration":{ "name":"TestMigration", @@ -1607,11 +1609,11 @@ "members":{ "ScaleUpModifications":{ "shape":"NodeTypeList", - "documentation":"

A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group.

When scaling up a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.

" + "documentation":"

A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group.

When scaling up a Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.

" }, "ScaleDownModifications":{ "shape":"NodeTypeList", - "documentation":"

A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling down a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.

" + "documentation":"

A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling down a Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.

" } }, "documentation":"

Represents the allowed node types you can use to modify your cluster or replication group.

" @@ -1803,7 +1805,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later.

" }, "Engine":{ "shape":"String", @@ -1819,7 +1821,7 @@ }, "NumCacheNodes":{ "shape":"IntegerOptional", - "documentation":"

The number of cache nodes in the cluster.

For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.

" + "documentation":"

The number of cache nodes in the cluster.

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.

" }, "PreferredAvailabilityZone":{ "shape":"String", @@ -1860,7 +1862,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"Boolean", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SecurityGroups":{ "shape":"SecurityGroupMembershipList", @@ -1880,7 +1882,7 @@ }, "AuthTokenEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables using an AuthToken (password) when issuing Redis commands.

Default: false

" + "documentation":"

A flag that enables using an AuthToken (password) when issuing Redis OSS commands.

Default: false

" }, "AuthTokenLastModifiedDate":{ "shape":"TStamp", @@ -1888,11 +1890,11 @@ }, "TransitEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables in-transit encryption when set to true.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

" + "documentation":"

A flag that enables in-transit encryption when set to true.

Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later.

Default: false

" }, "AtRestEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

" + "documentation":"

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later.

Default: false

" }, "ARN":{ "shape":"String", @@ -1908,11 +1910,11 @@ }, "NetworkType":{ "shape":"NetworkType", - "documentation":"

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" }, "IpDiscovery":{ "shape":"IpDiscovery", - "documentation":"

The network type associated with the cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

The network type associated with the cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" }, "TransitEncryptionMode":{ "shape":"TransitEncryptionMode", @@ -2055,7 +2057,7 @@ "documentation":"

The customer outpost ARN of the cache node.

" } }, - "documentation":"

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis OSS.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later.

" }, "CacheNodeIdsList":{ "type":"list", @@ -2111,7 +2113,7 @@ "documentation":"

Indicates whether a change to the parameter is applied immediately or requires a reboot for the change to be applied. You can force a reboot or wait until the next maintenance window's reboot. For more information, see Rebooting a Cluster.

" } }, - "documentation":"

A parameter that has a different value for each cache node type it is applied to. For example, in a Redis cluster, a cache.m1.large cache node type would have a larger maxmemory value than a cache.m1.small type.

" + "documentation":"

A parameter that has a different value for each cache node type it is applied to. For example, in a Redis OSS cluster, a cache.m1.large cache node type would have a larger maxmemory value than a cache.m1.small type.

" }, "CacheNodeTypeSpecificParametersList":{ "type":"list", @@ -2453,7 +2455,7 @@ }, "SupportedNetworkTypes":{ "shape":"NetworkTypeList", - "documentation":"

Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } }, "documentation":"

Represents the output of one of the following operations:

  • CreateCacheSubnetGroup

  • ModifyCacheSubnetGroup

", @@ -2624,15 +2626,15 @@ "members":{ "NodeGroupId":{ "shape":"AllowedNodeGroupId", - "documentation":"

The 4-digit id for the node group you are configuring. For Redis (cluster mode disabled) replication groups, the node group id is always 0001. To find a Redis (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's Id.

" + "documentation":"

The 4-digit id for the node group you are configuring. For Redis OSS (cluster mode disabled) replication groups, the node group id is always 0001. To find a Redis OSS (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's Id.

" }, "NewReplicaCount":{ "shape":"Integer", - "documentation":"

The number of replicas you want in this node group at the end of this operation. The maximum value for NewReplicaCount is 5. The minimum value depends upon the type of Redis replication group you are working with.

The minimum number of replicas in a shard or replication group is:

  • Redis (cluster mode disabled)

    • If Multi-AZ: 1

    • If Multi-AZ: 0

  • Redis (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)

" + "documentation":"

The number of replicas you want in this node group at the end of this operation. The maximum value for NewReplicaCount is 5. The minimum value depends upon the type of Redis OSS replication group you are working with.

The minimum number of replicas in a shard or replication group is:

  • Redis OSS (cluster mode disabled)

    • If Multi-AZ: 1

    • If Multi-AZ: 0

  • Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)

" }, "PreferredAvailabilityZones":{ "shape":"PreferredAvailabilityZoneList", - "documentation":"

A list of PreferredAvailabilityZone strings that specify which availability zones the replication group's nodes are to be in. The nummber of PreferredAvailabilityZone values must equal the value of NewReplicaCount plus 1 to account for the primary node. If this member of ReplicaConfiguration is omitted, ElastiCache for Redis selects the availability zone for each of the replicas.

" + "documentation":"

A list of PreferredAvailabilityZone strings that specify which availability zones the replication group's nodes are to be in. The nummber of PreferredAvailabilityZone values must equal the value of NewReplicaCount plus 1 to account for the primary node. If this member of ReplicaConfiguration is omitted, ElastiCache (Redis OSS) selects the availability zone for each of the replicas.

" }, "PreferredOutpostArns":{ "shape":"PreferredOutpostArnList", @@ -2650,19 +2652,19 @@ "members":{ "SourceServerlessCacheSnapshotName":{ "shape":"String", - "documentation":"

The identifier of the existing serverless cache’s snapshot to be copied. Available for Redis only.

" + "documentation":"

The identifier of the existing serverless cache’s snapshot to be copied. Available for Redis OSS and Serverless Memcached only.

" }, "TargetServerlessCacheSnapshotName":{ "shape":"String", - "documentation":"

The identifier for the snapshot to be created. Available for Redis only.

" + "documentation":"

The identifier for the snapshot to be created. Available for Redis OSS and Serverless Memcached only.

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

The identifier of the KMS key used to encrypt the target snapshot. Available for Redis only.

" + "documentation":"

The identifier of the KMS key used to encrypt the target snapshot. Available for Redis OSS and Serverless Memcached only.

" }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis only. Default: NULL

" + "documentation":"

A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only. Default: NULL

" } } }, @@ -2671,7 +2673,7 @@ "members":{ "ServerlessCacheSnapshot":{ "shape":"ServerlessCacheSnapshot", - "documentation":"

The response for the attempt to copy the serverless cache snapshot. Available for Redis only.

" + "documentation":"

The response for the attempt to copy the serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -2737,11 +2739,11 @@ }, "NumCacheNodes":{ "shape":"IntegerOptional", - "documentation":"

The initial number of cache nodes that the cluster has.

For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.

If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/.

" + "documentation":"

The initial number of cache nodes that the cluster has.

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.

If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/.

" }, "CacheNodeType":{ "shape":"String", - "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later.

" }, "Engine":{ "shape":"String", @@ -2773,11 +2775,11 @@ }, "SnapshotArns":{ "shape":"SnapshotArnsList", - "documentation":"

A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.

This parameter is only valid if the Engine parameter is redis.

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb

" + "documentation":"

A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.

This parameter is only valid if the Engine parameter is redis.

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb

" }, "SnapshotName":{ "shape":"String", - "documentation":"

The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created.

This parameter is only valid if the Engine parameter is redis.

" + "documentation":"

The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created.

This parameter is only valid if the Engine parameter is redis.

" }, "PreferredMaintenanceWindow":{ "shape":"String", @@ -2793,7 +2795,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", @@ -2829,11 +2831,11 @@ }, "NetworkType":{ "shape":"NetworkType", - "documentation":"

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" }, "IpDiscovery":{ "shape":"IpDiscovery", - "documentation":"

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } }, "documentation":"

Represents the input of a CreateCacheCluster operation.

" @@ -2990,7 +2992,7 @@ }, "AutomaticFailoverEnabled":{ "shape":"BooleanOptional", - "documentation":"

Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.

AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups.

Default: false

" + "documentation":"

Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.

AutomaticFailoverEnabled must be enabled for Redis OSS (cluster mode enabled) replication groups.

Default: false

" }, "MultiAZEnabled":{ "shape":"BooleanOptional", @@ -3006,7 +3008,7 @@ }, "NumNodeGroups":{ "shape":"IntegerOptional", - "documentation":"

An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.

Default: 1

" + "documentation":"

An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.

Default: 1

" }, "ReplicasPerNodeGroup":{ "shape":"IntegerOptional", @@ -3014,11 +3016,11 @@ }, "NodeGroupConfiguration":{ "shape":"NodeGroupConfigurationList", - "documentation":"

A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots.

If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, it is required when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group (shard) using this parameter because you must specify the slots for each node group.

" + "documentation":"

A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots.

If you're creating a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, it is required when seeding a Redis OSS (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group (shard) using this parameter because you must specify the slots for each node group.

" }, "CacheNodeType":{ "shape":"String", - "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later.

" }, "Engine":{ "shape":"String", @@ -3030,7 +3032,7 @@ }, "CacheParameterGroupName":{ "shape":"String", - "documentation":"

The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.

If you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.

  • To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2.

  • To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on.

" + "documentation":"

The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.

If you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.

  • To create a Redis OSS (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2.

  • To create a Redis OSS (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on.

" }, "CacheSubnetGroupName":{ "shape":"String", @@ -3050,7 +3052,7 @@ }, "SnapshotArns":{ "shape":"SnapshotArnsList", - "documentation":"

A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here.

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb

" + "documentation":"

A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here.

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb

" }, "SnapshotName":{ "shape":"String", @@ -3070,7 +3072,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", @@ -3086,11 +3088,11 @@ }, "TransitEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables in-transit encryption when set to true.

This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC.

If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup.

" + "documentation":"

A flag that enables in-transit encryption when set to true.

This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC.

If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup.

Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later.

Default: false

For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup.

" }, "AtRestEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables encryption at rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

" + "documentation":"

A flag that enables encryption at rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group.

Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later.

Default: false

" }, "KmsKeyId":{ "shape":"String", @@ -3110,23 +3112,23 @@ }, "NetworkType":{ "shape":"NetworkType", - "documentation":"

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" }, "IpDiscovery":{ "shape":"IpDiscovery", - "documentation":"

The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" }, "TransitEncryptionMode":{ "shape":"TransitEncryptionMode", - "documentation":"

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to required to allow encrypted connections only.

Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required.

This process will not trigger the replacement of the replication group.

" + "documentation":"

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to required to allow encrypted connections only.

Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required.

This process will not trigger the replacement of the replication group.

" }, "ClusterMode":{ "shape":"ClusterMode", - "documentation":"

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

" + "documentation":"

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

" }, "ServerlessCacheSnapshotName":{ "shape":"String", - "documentation":"

The name of the snapshot used to create a replication group. Available for Redis only.

" + "documentation":"

The name of the snapshot used to create a replication group. Available for Redis OSS only.

" } }, "documentation":"

Represents the input of a CreateReplicationGroup operation.

" @@ -3174,7 +3176,7 @@ }, "SnapshotArnsToRestore":{ "shape":"SnapshotArnsList", - "documentation":"

The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis only.

" + "documentation":"

The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis OSS and Serverless Memcached only.

" }, "Tags":{ "shape":"TagList", @@ -3182,7 +3184,7 @@ }, "UserGroupId":{ "shape":"String", - "documentation":"

The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. Default is NULL.

" + "documentation":"

The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL.

" }, "SubnetIds":{ "shape":"SubnetIdsList", @@ -3190,11 +3192,11 @@ }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", - "documentation":"

The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis only.

" + "documentation":"

The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis OSS and Serverless Memcached only.

" }, "DailySnapshotTime":{ "shape":"String", - "documentation":"

The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis only.

" + "documentation":"

The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -3216,19 +3218,19 @@ "members":{ "ServerlessCacheSnapshotName":{ "shape":"String", - "documentation":"

The name for the snapshot being created. Must be unique for the customer account. Available for Redis only. Must be between 1 and 255 characters.

" + "documentation":"

The name for the snapshot being created. Must be unique for the customer account. Available for Redis OSS and Serverless Memcached only. Must be between 1 and 255 characters.

" }, "ServerlessCacheName":{ "shape":"String", - "documentation":"

The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis only.

" + "documentation":"

The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis OSS and Serverless Memcached only.

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

The ID of the KMS key used to encrypt the snapshot. Available for Redis only. Default: NULL

" + "documentation":"

The ID of the KMS key used to encrypt the snapshot. Available for Redis OSS and Serverless Memcached only. Default: NULL

" }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis only.

" + "documentation":"

A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -3237,7 +3239,7 @@ "members":{ "ServerlessCacheSnapshot":{ "shape":"ServerlessCacheSnapshot", - "documentation":"

The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Redis only.

" + "documentation":"

The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -3287,7 +3289,7 @@ }, "Engine":{ "shape":"EngineType", - "documentation":"

The current supported value is Redis.

" + "documentation":"

The current supported value is Redis user.

" }, "UserIds":{ "shape":"UserIdListInput", @@ -3295,7 +3297,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted. Available for Redis only.

" + "documentation":"

A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted. Available for Redis OSS only.

" } } }, @@ -3408,11 +3410,11 @@ }, "GlobalNodeGroupsToRemove":{ "shape":"GlobalNodeGroupIdList", - "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for Redis will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster.

" + "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster.

" }, "GlobalNodeGroupsToRetain":{ "shape":"GlobalNodeGroupIdList", - "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. ElastiCache for Redis will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster.

" + "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. ElastiCache (Redis OSS) will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster.

" }, "ApplyImmediately":{ "shape":"Boolean", @@ -3439,11 +3441,11 @@ }, "NewReplicaCount":{ "shape":"IntegerOptional", - "documentation":"

The number of read replica nodes you want at the completion of this operation. For Redis (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups.

The minimum number of replicas in a shard or replication group is:

  • Redis (cluster mode disabled)

    • If Multi-AZ is enabled: 1

    • If Multi-AZ is not enabled: 0

  • Redis (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)

" + "documentation":"

The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups.

The minimum number of replicas in a shard or replication group is:

  • Redis OSS (cluster mode disabled)

    • If Multi-AZ is enabled: 1

    • If Multi-AZ is not enabled: 0

  • Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)

" }, "ReplicaConfiguration":{ "shape":"ReplicaConfigurationList", - "documentation":"

A list of ConfigureShard objects that can be used to configure each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones.

" + "documentation":"

A list of ConfigureShard objects that can be used to configure each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones.

" }, "ReplicasToRemove":{ "shape":"RemoveReplicasList", @@ -3597,7 +3599,7 @@ }, "FinalSnapshotName":{ "shape":"String", - "documentation":"

Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis only. Default: NULL, i.e. a final snapshot is not taken.

" + "documentation":"

Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis OSS and Serverless Memcached only. Default: NULL, i.e. a final snapshot is not taken.

" } } }, @@ -3616,7 +3618,7 @@ "members":{ "ServerlessCacheSnapshotName":{ "shape":"String", - "documentation":"

Idenfitier of the snapshot to be deleted. Available for Redis only.

" + "documentation":"

Idenfitier of the snapshot to be deleted. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -3625,7 +3627,7 @@ "members":{ "ServerlessCacheSnapshot":{ "shape":"ServerlessCacheSnapshot", - "documentation":"

The snapshot to be deleted. Available for Redis only.

" + "documentation":"

The snapshot to be deleted. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -3687,7 +3689,7 @@ }, "ShowCacheClustersNotInReplicationGroups":{ "shape":"BooleanOptional", - "documentation":"

An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis clusters.

" + "documentation":"

An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters.

" } }, "documentation":"

Represents the input of a DescribeCacheClusters operation.

" @@ -3923,7 +3925,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later.

" }, "Duration":{ "shape":"String", @@ -3957,7 +3959,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later.

" }, "Duration":{ "shape":"String", @@ -3987,23 +3989,23 @@ "members":{ "ServerlessCacheName":{ "shape":"String", - "documentation":"

The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Redis only.

" + "documentation":"

The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Redis OSS and Serverless Memcached only.

" }, "ServerlessCacheSnapshotName":{ "shape":"String", - "documentation":"

The identifier of the serverless cache’s snapshot. If this parameter is specified, only this snapshot is described. Available for Redis only.

" + "documentation":"

The identifier of the serverless cache’s snapshot. If this parameter is specified, only this snapshot is described. Available for Redis OSS and Serverless Memcached only.

" }, "SnapshotType":{ "shape":"String", - "documentation":"

The type of snapshot that is being described. Available for Redis only.

" + "documentation":"

The type of snapshot that is being described. Available for Redis OSS and Serverless Memcached only.

" }, "NextToken":{ "shape":"String", - "documentation":"

An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis only.

" + "documentation":"

An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only.

" }, "MaxResults":{ "shape":"IntegerOptional", - "documentation":"

The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Redis only.The default is 50. The Validation Constraints are a maximum of 50.

" + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50.

" } } }, @@ -4012,11 +4014,11 @@ "members":{ "NextToken":{ "shape":"String", - "documentation":"

An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis only.

" + "documentation":"

An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only.

" }, "ServerlessCacheSnapshots":{ "shape":"ServerlessCacheSnapshotList", - "documentation":"

The serverless caches snapshots associated with a given description request. Available for Redis only.

" + "documentation":"

The serverless caches snapshots associated with a given description request. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -4136,7 +4138,7 @@ }, "Engine":{ "shape":"String", - "documentation":"

The Elasticache engine to which the update applies. Either Redis or Memcached

" + "documentation":"

The Elasticache engine to which the update applies. Either Redis OSS or Memcached.

" }, "ServiceUpdateStatus":{ "shape":"ServiceUpdateStatusList", @@ -4190,7 +4192,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. >

" + "documentation":"

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.>

" } } }, @@ -4199,7 +4201,7 @@ "members":{ "Engine":{ "shape":"EngineType", - "documentation":"

The Redis engine.

" + "documentation":"

The Redis OSS engine.

" }, "UserId":{ "shape":"UserId", @@ -4426,11 +4428,11 @@ "members":{ "ServerlessCacheSnapshotName":{ "shape":"String", - "documentation":"

The identifier of the serverless cache snapshot to be exported to S3. Available for Redis only.

" + "documentation":"

The identifier of the serverless cache snapshot to be exported to S3. Available for Redis OSS only.

" }, "S3BucketName":{ "shape":"String", - "documentation":"

Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region as the snapshot. Available for Redis only.

" + "documentation":"

Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region as the snapshot. Available for Redis OSS only.

" } } }, @@ -4439,7 +4441,7 @@ "members":{ "ServerlessCacheSnapshot":{ "shape":"ServerlessCacheSnapshot", - "documentation":"

The state of a serverless cache at a specific point in time, to the millisecond. Available for Redis only.

" + "documentation":"

The state of a serverless cache at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only.

" } } }, @@ -4555,11 +4557,11 @@ }, "Engine":{ "shape":"String", - "documentation":"

The Elasticache engine. For Redis only.

" + "documentation":"

The Elasticache engine. For Redis OSS only.

" }, "EngineVersion":{ "shape":"String", - "documentation":"

The Elasticache Redis engine version.

" + "documentation":"

The Elasticache (Redis OSS) engine version.

" }, "Members":{ "shape":"GlobalReplicationGroupMemberList", @@ -4575,15 +4577,15 @@ }, "AuthTokenEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables using an AuthToken (password) when issuing Redis commands.

Default: false

" + "documentation":"

A flag that enables using an AuthToken (password) when issuing Redis OSS commands.

Default: false

" }, "TransitEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables in-transit encryption when set to true.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

" + "documentation":"

A flag that enables in-transit encryption when set to true.

Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later.

" }, "AtRestEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables encryption at rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

" + "documentation":"

A flag that enables encryption at rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group.

Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later.

" }, "ARN":{ "shape":"String", @@ -4717,11 +4719,11 @@ }, "NewReplicaCount":{ "shape":"IntegerOptional", - "documentation":"

The number of read replica nodes you want at the completion of this operation. For Redis (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups.

" + "documentation":"

The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups.

" }, "ReplicaConfiguration":{ "shape":"ReplicaConfigurationList", - "documentation":"

A list of ConfigureShard objects that can be used to configure each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones.

" + "documentation":"

A list of ConfigureShard objects that can be used to configure each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones.

" }, "ApplyImmediately":{ "shape":"Boolean", @@ -4891,7 +4893,7 @@ "type":"structure", "members":{ }, - "documentation":"

The state of the serverless cache snapshot was not received. Available for Redis only.

", + "documentation":"

The state of the serverless cache snapshot was not received. Available for Redis OSS and Serverless Memcached only.

", "error":{ "code":"InvalidServerlessCacheSnapshotStateFault", "httpStatusCode":400, @@ -5121,7 +5123,7 @@ }, "NumCacheNodes":{ "shape":"IntegerOptional", - "documentation":"

The number of cache nodes that the cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled.

If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove.

For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.

Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately).

A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cluster.

" + "documentation":"

The number of cache nodes that the cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled.

If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove.

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.

Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately).

A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cluster.

" }, "CacheNodeIdsToRemove":{ "shape":"CacheNodeIdsList", @@ -5169,7 +5171,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", @@ -5189,7 +5191,7 @@ }, "AuthTokenUpdateStrategy":{ "shape":"AuthTokenUpdateStrategyType", - "documentation":"

Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values:

  • Rotate

  • Set

For more information, see Authenticating Users with Redis AUTH

" + "documentation":"

Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values:

  • ROTATE - default, if no update strategy is provided

  • SET - allowed only after ROTATE

  • DELETE - allowed only when transitioning to RBAC

For more information, see Authenticating Users with Redis OSS AUTH

" }, "LogDeliveryConfigurations":{ "shape":"LogDeliveryConfigurationRequestList", @@ -5197,7 +5199,7 @@ }, "IpDiscovery":{ "shape":"IpDiscovery", - "documentation":"

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } }, "documentation":"

Represents the input of a ModifyCacheCluster operation.

" @@ -5312,7 +5314,7 @@ }, "SnapshottingClusterId":{ "shape":"String", - "documentation":"

The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.

" + "documentation":"

The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.

" }, "AutomaticFailoverEnabled":{ "shape":"BooleanOptional", @@ -5361,7 +5363,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", @@ -5381,7 +5383,7 @@ }, "AuthTokenUpdateStrategy":{ "shape":"AuthTokenUpdateStrategyType", - "documentation":"

Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values:

  • Rotate

  • Set

For more information, see Authenticating Users with Redis AUTH

" + "documentation":"

Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values:

  • ROTATE - default, if no update strategy is provided

  • SET - allowed only after ROTATE

  • DELETE - allowed only when transitioning to RBAC

For more information, see Authenticating Users with Redis OSS AUTH

" }, "UserGroupIdsToAdd":{ "shape":"UserGroupIdList", @@ -5401,7 +5403,7 @@ }, "IpDiscovery":{ "shape":"IpDiscovery", - "documentation":"

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" }, "TransitEncryptionEnabled":{ "shape":"BooleanOptional", @@ -5409,11 +5411,11 @@ }, "TransitEncryptionMode":{ "shape":"TransitEncryptionMode", - "documentation":"

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can set the value to required to allow encrypted connections only.

Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required.

" + "documentation":"

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can set the value to required to allow encrypted connections only.

Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required.

" }, "ClusterMode":{ "shape":"ClusterMode", - "documentation":"

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

" + "documentation":"

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

" } }, "documentation":"

Represents the input of a ModifyReplicationGroups operation.

" @@ -5434,7 +5436,7 @@ "members":{ "ReplicationGroupId":{ "shape":"String", - "documentation":"

The name of the Redis (cluster mode enabled) cluster (replication group) on which the shards are to be configured.

" + "documentation":"

The name of the Redis OSS (cluster mode enabled) cluster (replication group) on which the shards are to be configured.

" }, "NodeGroupCount":{ "shape":"Integer", @@ -5450,11 +5452,11 @@ }, "NodeGroupsToRemove":{ "shape":"NodeGroupsToRemoveList", - "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster.

ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster.

" + "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster.

ElastiCache (Redis OSS) will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster.

" }, "NodeGroupsToRetain":{ "shape":"NodeGroupsToRetainList", - "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster.

ElastiCache for Redis will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster.

" + "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster.

ElastiCache (Redis OSS) will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster.

" } }, "documentation":"

Represents the input for a ModifyReplicationGroupShardConfiguration operation.

" @@ -5483,11 +5485,11 @@ }, "RemoveUserGroup":{ "shape":"BooleanOptional", - "documentation":"

The identifier of the UserGroup to be removed from association with the Redis serverless cache. Available for Redis only. Default is NULL.

" + "documentation":"

The identifier of the UserGroup to be removed from association with the Redis OSS serverless cache. Available for Redis OSS only. Default is NULL.

" }, "UserGroupId":{ "shape":"String", - "documentation":"

The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. Default is NULL - the existing UserGroup is not removed.

" + "documentation":"

The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL - the existing UserGroup is not removed.

" }, "SecurityGroupIds":{ "shape":"SecurityGroupIdsList", @@ -5495,11 +5497,11 @@ }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", - "documentation":"

The number of days for which Elasticache retains automatic snapshots before deleting them. Available for Redis only. Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 days.

" + "documentation":"

The number of days for which Elasticache retains automatic snapshots before deleting them. Available for Redis OSS and Serverless Memcached only. Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 days.

" }, "DailySnapshotTime":{ "shape":"String", - "documentation":"

The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis only. The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed.

" + "documentation":"

The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis OSS and Serverless Memcached only. The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed.

" } } }, @@ -5596,7 +5598,7 @@ "members":{ "NodeGroupId":{ "shape":"String", - "documentation":"

The identifier for the node group (shard). A Redis (cluster mode disabled) replication group contains only 1 node group; therefore, the node group ID is 0001. A Redis (cluster mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090. Optionally, the user can provide the id for a node group.

" + "documentation":"

The identifier for the node group (shard). A Redis OSS (cluster mode disabled) replication group contains only 1 node group; therefore, the node group ID is 0001. A Redis OSS (cluster mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090. Optionally, the user can provide the id for a node group.

" }, "Status":{ "shape":"String", @@ -5626,7 +5628,7 @@ "members":{ "NodeGroupId":{ "shape":"AllowedNodeGroupId", - "documentation":"

Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.

" + "documentation":"

Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.

" }, "Slots":{ "shape":"String", @@ -5682,7 +5684,7 @@ }, "ReadEndpoint":{ "shape":"Endpoint", - "documentation":"

The information required for client programs to connect to a node for read operations. The read endpoint is only applicable on Redis (cluster mode disabled) clusters.

" + "documentation":"

The information required for client programs to connect to a node for read operations. The read endpoint is only applicable on Redis OSS (cluster mode disabled) clusters.

" }, "PreferredAvailabilityZone":{ "shape":"String", @@ -5694,7 +5696,7 @@ }, "CurrentRole":{ "shape":"String", - "documentation":"

The role that is currently assigned to the node - primary or replica. This member is only applicable for Redis (cluster mode disabled) replication groups.

" + "documentation":"

The role that is currently assigned to the node - primary or replica. This member is only applicable for Redis OSS (cluster mode disabled) replication groups.

" } }, "documentation":"

Represents a single node within a node group (shard).

" @@ -6044,7 +6046,7 @@ "members":{ "NumCacheNodes":{ "shape":"IntegerOptional", - "documentation":"

The new number of cache nodes for the cluster.

For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.

" + "documentation":"

The new number of cache nodes for the cluster.

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.

" }, "CacheNodeIdsToRemove":{ "shape":"CacheNodeIdsList", @@ -6108,7 +6110,7 @@ }, "UpdateActionStatus":{ "shape":"UpdateActionStatus", - "documentation":"

The status of the update action on the Redis cluster

" + "documentation":"

The status of the update action on the Redis OSS cluster

" } }, "documentation":"

Update action that has been processed for the corresponding apply/stop request

" @@ -6306,7 +6308,7 @@ }, "NodeGroups":{ "shape":"NodeGroupList", - "documentation":"

A list of node groups in this replication group. For Redis (cluster mode disabled) replication groups, this is a single-element list. For Redis (cluster mode enabled) replication groups, the list contains an entry for each node group (shard).

" + "documentation":"

A list of node groups in this replication group. For Redis OSS (cluster mode disabled) replication groups, this is a single-element list. For Redis OSS (cluster mode enabled) replication groups, the list contains an entry for each node group (shard).

" }, "SnapshottingClusterId":{ "shape":"String", @@ -6314,7 +6316,7 @@ }, "AutomaticFailover":{ "shape":"AutomaticFailoverStatus", - "documentation":"

Indicates the status of automatic failover for this Redis replication group.

" + "documentation":"

Indicates the status of automatic failover for this Redis OSS replication group.

" }, "MultiAZ":{ "shape":"MultiAZStatus", @@ -6342,7 +6344,7 @@ }, "AuthTokenEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables using an AuthToken (password) when issuing Redis commands.

Default: false

" + "documentation":"

A flag that enables using an AuthToken (password) when issuing Redis OSS commands.

Default: false

" }, "AuthTokenLastModifiedDate":{ "shape":"TStamp", @@ -6350,11 +6352,11 @@ }, "TransitEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables in-transit encryption when set to true.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

" + "documentation":"

A flag that enables in-transit encryption when set to true.

Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later.

Default: false

" }, "AtRestEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable encryption at-rest on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

" + "documentation":"

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable encryption at-rest on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later.

Default: false

" }, "MemberClustersOutpostArns":{ "shape":"ReplicationGroupOutpostArnList", @@ -6386,15 +6388,15 @@ }, "AutoMinorVersionUpgrade":{ "shape":"Boolean", - "documentation":"

If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions.

" + "documentation":"

If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions.

" }, "NetworkType":{ "shape":"NetworkType", - "documentation":"

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" }, "IpDiscovery":{ "shape":"IpDiscovery", - "documentation":"

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" }, "TransitEncryptionMode":{ "shape":"TransitEncryptionMode", @@ -6402,10 +6404,10 @@ }, "ClusterMode":{ "shape":"ClusterMode", - "documentation":"

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

" + "documentation":"

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

" } }, - "documentation":"

Contains all of the attributes of a specific Redis replication group.

", + "documentation":"

Contains all of the attributes of a specific Redis OSS replication group.

", "wrapper":true }, "ReplicationGroupAlreadyExistsFault":{ @@ -6498,7 +6500,7 @@ }, "AutomaticFailoverStatus":{ "shape":"PendingAutomaticFailoverStatus", - "documentation":"

Indicates the status of automatic failover for this Redis replication group.

" + "documentation":"

Indicates the status of automatic failover for this Redis OSS replication group.

" }, "Resharding":{ "shape":"ReshardingStatus", @@ -6526,10 +6528,10 @@ }, "ClusterMode":{ "shape":"ClusterMode", - "documentation":"

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

" + "documentation":"

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

" } }, - "documentation":"

The settings to be applied to the Redis replication group, either immediately or during the next maintenance window.

" + "documentation":"

The settings to be applied to the Redis OSS replication group, either immediately or during the next maintenance window.

" }, "ReservedCacheNode":{ "type":"structure", @@ -6544,7 +6546,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later.

" }, "StartTime":{ "shape":"TStamp", @@ -6656,7 +6658,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later.

" }, "Duration":{ "shape":"Integer", @@ -6743,7 +6745,7 @@ "members":{ "NodeGroupId":{ "shape":"AllowedNodeGroupId", - "documentation":"

Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.

" + "documentation":"

Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.

" }, "PreferredAvailabilityZones":{ "shape":"AvailabilityZonesList", @@ -6874,7 +6876,7 @@ }, "UserGroupId":{ "shape":"String", - "documentation":"

The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL.

" + "documentation":"

The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.

" }, "SubnetIds":{ "shape":"SubnetIdsList", @@ -6882,11 +6884,11 @@ }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", - "documentation":"

The current setting for the number of serverless cache snapshots the system will retain. Available for Redis only.

" + "documentation":"

The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.

" }, "DailySnapshotTime":{ "shape":"String", - "documentation":"

The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis only.

" + "documentation":"

The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.

" } }, "documentation":"

The resource representing a serverless cache.

" @@ -6954,48 +6956,48 @@ "members":{ "ServerlessCacheSnapshotName":{ "shape":"String", - "documentation":"

The identifier of a serverless cache snapshot. Available for Redis only.

" + "documentation":"

The identifier of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" }, "ARN":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Redis only.

" + "documentation":"

The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Redis only.

" + "documentation":"

The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" }, "SnapshotType":{ "shape":"String", - "documentation":"

The type of snapshot of serverless cache. Available for Redis only.

" + "documentation":"

The type of snapshot of serverless cache. Available for Redis OSS and Serverless Memcached only.

" }, "Status":{ "shape":"String", - "documentation":"

The current status of the serverless cache. Available for Redis only.

" + "documentation":"

The current status of the serverless cache. Available for Redis OSS and Serverless Memcached only.

" }, "CreateTime":{ "shape":"TStamp", - "documentation":"

The date and time that the source serverless cache's metadata and cache data set was obtained for the snapshot. Available for Redis only.

" + "documentation":"

The date and time that the source serverless cache's metadata and cache data set was obtained for the snapshot. Available for Redis OSS and Serverless Memcached only.

" }, "ExpiryTime":{ "shape":"TStamp", - "documentation":"

The time that the serverless cache snapshot will expire. Available for Redis only.

" + "documentation":"

The time that the serverless cache snapshot will expire. Available for Redis OSS and Serverless Memcached only.

" }, "BytesUsedForCache":{ "shape":"String", - "documentation":"

The total size of a serverless cache snapshot, in bytes. Available for Redis only.

" + "documentation":"

The total size of a serverless cache snapshot, in bytes. Available for Redis OSS and Serverless Memcached only.

" }, "ServerlessCacheConfiguration":{ "shape":"ServerlessCacheConfiguration", - "documentation":"

The configuration of the serverless cache, at the time the snapshot was taken. Available for Redis only.

" + "documentation":"

The configuration of the serverless cache, at the time the snapshot was taken. Available for Redis OSS and Serverless Memcached only.

" } }, - "documentation":"

The resource representing a serverless cache snapshot. Available for Redis only.

" + "documentation":"

The resource representing a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" }, "ServerlessCacheSnapshotAlreadyExistsFault":{ "type":"structure", "members":{ }, - "documentation":"

A serverless cache snapshot with this name already exists. Available for Redis only.

", + "documentation":"

A serverless cache snapshot with this name already exists. Available for Redis OSS and Serverless Memcached only.

", "error":{ "code":"ServerlessCacheSnapshotAlreadyExistsFault", "httpStatusCode":400, @@ -7014,7 +7016,7 @@ "type":"structure", "members":{ }, - "documentation":"

This serverless cache snapshot could not be found or does not exist. Available for Redis only.

", + "documentation":"

This serverless cache snapshot could not be found or does not exist. Available for Redis OSS and Serverless Memcached only.

", "error":{ "code":"ServerlessCacheSnapshotNotFoundFault", "httpStatusCode":404, @@ -7026,7 +7028,7 @@ "type":"structure", "members":{ }, - "documentation":"

The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Redis only.

", + "documentation":"

The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Redis OSS and Serverless Memcached only.

", "error":{ "code":"ServerlessCacheSnapshotQuotaExceededFault", "httpStatusCode":400, @@ -7083,11 +7085,11 @@ }, "Engine":{ "shape":"String", - "documentation":"

The Elasticache engine to which the update applies. Either Redis or Memcached

" + "documentation":"

The Elasticache engine to which the update applies. Either Redis OSS or Memcached.

" }, "EngineVersion":{ "shape":"String", - "documentation":"

The Elasticache engine version to which the update applies. Either Redis or Memcached engine version

" + "documentation":"

The Elasticache engine version to which the update applies. Either Redis OSS or Memcached engine version.

" }, "AutoUpdateAfterRecommendedApplyByDate":{ "shape":"BooleanOptional", @@ -7098,7 +7100,7 @@ "documentation":"

The estimated length of time the service update will take

" } }, - "documentation":"

An update that you can apply to your Redis clusters.

" + "documentation":"

An update that you can apply to your Redis OSS clusters.

" }, "ServiceUpdateList":{ "type":"list", @@ -7205,7 +7207,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge

      For region availability, see Supported Node Types

      M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge

      For region availability, see Supported Node Types

      R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later.

" }, "Engine":{ "shape":"String", @@ -7217,7 +7219,7 @@ }, "NumCacheNodes":{ "shape":"IntegerOptional", - "documentation":"

The number of cache nodes in the source cluster.

For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.

" + "documentation":"

The number of cache nodes in the source cluster.

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.

" }, "PreferredAvailabilityZone":{ "shape":"String", @@ -7257,7 +7259,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"Boolean", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", @@ -7273,7 +7275,7 @@ }, "AutomaticFailover":{ "shape":"AutomaticFailoverStatus", - "documentation":"

Indicates the status of automatic failover for the source Redis replication group.

" + "documentation":"

Indicates the status of automatic failover for the source Redis OSS replication group.

" }, "NodeSnapshots":{ "shape":"NodeSnapshotList", @@ -7292,7 +7294,7 @@ "documentation":"

Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering.

" } }, - "documentation":"

Represents a copy of an entire Redis cluster as of the time when the snapshot was taken.

", + "documentation":"

Represents a copy of an entire Redis OSS cluster as of the time when the snapshot was taken.

", "wrapper":true }, "SnapshotAlreadyExistsFault":{ @@ -7318,7 +7320,7 @@ "type":"structure", "members":{ }, - "documentation":"

You attempted one of the following operations:

  • Creating a snapshot of a Redis cluster running on a cache.t1.micro cache node.

  • Creating a snapshot of a cluster that is running Memcached rather than Redis.

Neither of these are supported by ElastiCache.

", + "documentation":"

You attempted one of the following operations:

  • Creating a snapshot of a Redis OSS cluster running on a cache.t1.micro cache node.

  • Creating a snapshot of a cluster that is running Memcached rather than Redis OSS.

Neither of these are supported by ElastiCache.

", "error":{ "code":"SnapshotFeatureNotSupportedFault", "httpStatusCode":400, @@ -7384,7 +7386,7 @@ }, "CustomerNodeEndpointList":{ "shape":"CustomerNodeEndpointList", - "documentation":"

List of endpoints from which data should be migrated. For Redis (cluster mode disabled), list should have only one element.

" + "documentation":"

List of endpoints from which data should be migrated. For Redis OSS (cluster mode disabled), list should have only one element.

" } } }, @@ -7412,7 +7414,7 @@ }, "SupportedNetworkTypes":{ "shape":"NetworkTypeList", - "documentation":"

Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "documentation":"

Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } }, "documentation":"

Represents the subnet associated with a cluster. This parameter refers to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with ElastiCache.

" @@ -7541,7 +7543,7 @@ }, "NodeGroupId":{ "shape":"AllowedNodeGroupId", - "documentation":"

The name of the node group (called shard in the console) in this replication group on which automatic failover is to be tested. You may test automatic failover on up to 5 node groups in any rolling 24-hour period.

" + "documentation":"

The name of the node group (called shard in the console) in this replication group on which automatic failover is to be tested. You may test automatic failover on up to 15 node groups in any rolling 24-hour period.

" } } }, @@ -7717,7 +7719,7 @@ }, "Engine":{ "shape":"String", - "documentation":"

The Elasticache engine to which the update applies. Either Redis or Memcached

" + "documentation":"

The Elasticache engine to which the update applies. Either Redis OSS or Memcached.

" } }, "documentation":"

The status of the service update for a specific replication group

" @@ -7795,7 +7797,7 @@ }, "MinimumEngineVersion":{ "shape":"String", - "documentation":"

The minimum engine version required, which is Redis 6.0

" + "documentation":"

The minimum engine version required, which is Redis OSS 6.0

" }, "AccessString":{ "shape":"String", @@ -7840,7 +7842,7 @@ }, "Engine":{ "shape":"EngineType", - "documentation":"

The current supported value is Redis.

" + "documentation":"

The current supported value is Redis user.

" }, "UserIds":{ "shape":"UserIdList", @@ -7848,7 +7850,7 @@ }, "MinimumEngineVersion":{ "shape":"String", - "documentation":"

The minimum engine version required, which is Redis 6.0

" + "documentation":"

The minimum engine version required, which is Redis OSS 6.0

" }, "PendingChanges":{ "shape":"UserGroupPendingChanges", @@ -7860,7 +7862,7 @@ }, "ServerlessCaches":{ "shape":"UGServerlessCacheIdList", - "documentation":"

Indicates which serverless caches the specified user group is associated with. Available for Redis only.

" + "documentation":"

Indicates which serverless caches the specified user group is associated with. Available for Redis OSS and Serverless Memcached only.

" }, "ARN":{ "shape":"String", diff --git a/botocore/data/elasticbeanstalk/2010-12-01/endpoint-rule-set-1.json b/botocore/data/elasticbeanstalk/2010-12-01/endpoint-rule-set-1.json index b872ba2142..40b17bf83b 100644 --- a/botocore/data/elasticbeanstalk/2010-12-01/endpoint-rule-set-1.json +++ b/botocore/data/elasticbeanstalk/2010-12-01/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -256,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -277,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -297,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -308,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/elasticbeanstalk/2010-12-01/service-2.json b/botocore/data/elasticbeanstalk/2010-12-01/service-2.json index f2b2b94f74..a7953e98c5 100644 --- a/botocore/data/elasticbeanstalk/2010-12-01/service-2.json +++ b/botocore/data/elasticbeanstalk/2010-12-01/service-2.json @@ -4,12 +4,14 @@ "apiVersion":"2010-12-01", "endpointPrefix":"elasticbeanstalk", "protocol":"query", + "protocols":["query"], "serviceAbbreviation":"Elastic Beanstalk", "serviceFullName":"AWS Elastic Beanstalk", "serviceId":"Elastic Beanstalk", "signatureVersion":"v4", "uid":"elasticbeanstalk-2010-12-01", - "xmlNamespace":"http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/" + "xmlNamespace":"http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/", + "auth":["aws.auth#sigv4"] }, "operations":{ "AbortEnvironmentUpdate":{ diff --git a/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json b/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json index 29f802e532..e6fea0075e 100644 --- a/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json +++ b/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/elastictranscoder/2012-09-25/service-2.json b/botocore/data/elastictranscoder/2012-09-25/service-2.json index 980f4d4dc4..e589644af1 100644 --- a/botocore/data/elastictranscoder/2012-09-25/service-2.json +++ b/botocore/data/elastictranscoder/2012-09-25/service-2.json @@ -4,10 +4,12 @@ "apiVersion":"2012-09-25", "endpointPrefix":"elastictranscoder", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Elastic Transcoder", "serviceId":"Elastic Transcoder", "signatureVersion":"v4", - "uid":"elastictranscoder-2012-09-25" + "uid":"elastictranscoder-2012-09-25", + "auth":["aws.auth#sigv4"] }, "operations":{ "CancelJob":{ diff --git a/botocore/data/elb/2012-06-01/endpoint-rule-set-1.json b/botocore/data/elb/2012-06-01/endpoint-rule-set-1.json index ed9f27248e..c840251e54 100644 --- a/botocore/data/elb/2012-06-01/endpoint-rule-set-1.json +++ b/botocore/data/elb/2012-06-01/endpoint-rule-set-1.json @@ -32,38 +32,83 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], - "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ { - "fn": "parseURL", + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -75,165 +120,109 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticloadbalancing-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" }, { "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } - ] - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] + ], + "type": "tree" }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseFIPS" + }, + true ] } ], - "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://elasticloadbalancing-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", + ], "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -242,12 +231,13 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], "endpoint": { - "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://elasticloadbalancing.{Region}.amazonaws.com", "properties": {}, "headers": {} }, @@ -262,79 +252,88 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } - ] + ], + "type": "tree" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], - "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + ], + "type": "tree" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/botocore/data/elb/2012-06-01/service-2.json b/botocore/data/elb/2012-06-01/service-2.json index 0365e5a9a3..49ca04c2ef 100644 --- a/botocore/data/elb/2012-06-01/service-2.json +++ b/botocore/data/elb/2012-06-01/service-2.json @@ -4,11 +4,13 @@ "apiVersion":"2012-06-01", "endpointPrefix":"elasticloadbalancing", "protocol":"query", + "protocols":["query"], "serviceFullName":"Elastic Load Balancing", "serviceId":"Elastic Load Balancing", "signatureVersion":"v4", "uid":"elasticloadbalancing-2012-06-01", - "xmlNamespace":"http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/" + "xmlNamespace":"http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddTags":{ diff --git a/botocore/data/elbv2/2015-12-01/service-2.json b/botocore/data/elbv2/2015-12-01/service-2.json index 7181baebcc..6825fd5cf3 100644 --- a/botocore/data/elbv2/2015-12-01/service-2.json +++ b/botocore/data/elbv2/2015-12-01/service-2.json @@ -4,12 +4,14 @@ "apiVersion":"2015-12-01", "endpointPrefix":"elasticloadbalancing", "protocol":"query", + "protocols":["query"], "serviceAbbreviation":"Elastic Load Balancing v2", "serviceFullName":"Elastic Load Balancing", "serviceId":"Elastic Load Balancing v2", "signatureVersion":"v4", "uid":"elasticloadbalancingv2-2015-12-01", - "xmlNamespace":"http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/" + "xmlNamespace":"http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddListenerCertificates":{ @@ -256,6 +258,24 @@ ], "documentation":"

Deletes the specified rule.

You can't delete the default rule.

" }, + "DeleteSharedTrustStoreAssociation":{ + "name":"DeleteSharedTrustStoreAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSharedTrustStoreAssociationInput"}, + "output":{ + "shape":"DeleteSharedTrustStoreAssociationOutput", + "resultWrapper":"DeleteSharedTrustStoreAssociationResult" + }, + "errors":[ + {"shape":"TrustStoreNotFoundException"}, + {"shape":"DeleteAssociationSameAccountException"}, + {"shape":"TrustStoreAssociationNotFoundException"} + ], + "documentation":"

Deletes a shared trust store association.

" + }, "DeleteTargetGroup":{ "name":"DeleteTargetGroup", "http":{ @@ -521,7 +541,7 @@ {"shape":"TrustStoreNotFoundException"}, {"shape":"RevocationIdNotFoundException"} ], - "documentation":"

Describes the revocation files in use by the specified trust store arn, or revocation ID.

" + "documentation":"

Describes the revocation files in use by the specified trust store or revocation files.

" }, "DescribeTrustStores":{ "name":"DescribeTrustStores", @@ -537,7 +557,23 @@ "errors":[ {"shape":"TrustStoreNotFoundException"} ], - "documentation":"

Describes all trust stores for a given account by trust store arn’s or name.

" + "documentation":"

Describes all trust stores for the specified account.

" + }, + "GetResourcePolicy":{ + "name":"GetResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetResourcePolicyInput"}, + "output":{ + "shape":"GetResourcePolicyOutput", + "resultWrapper":"GetResourcePolicyResult" + }, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves the resource policy for a specified resource.

" }, "GetTrustStoreCaCertificatesBundle":{ "name":"GetTrustStoreCaCertificatesBundle", @@ -699,7 +735,7 @@ {"shape":"InvalidCaCertificatesBundleException"}, {"shape":"CaCertificatesBundleNotFoundException"} ], - "documentation":"

Update the ca certificate bundle for a given trust store.

" + "documentation":"

Update the ca certificate bundle for the specified trust store.

" }, "RegisterTargets":{ "name":"RegisterTargets", @@ -1376,7 +1412,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" + "documentation":"

Note: Internal load balancers must use the ipv4 IP address type.

[Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses).

[Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener.

[Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" }, "CustomerOwnedIpv4Pool":{ "shape":"CustomerOwnedIpv4Pool", @@ -1563,6 +1599,18 @@ }, "DNSName":{"type":"string"}, "Default":{"type":"boolean"}, + "DeleteAssociationSameAccountException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified association cannot be within the same account.

", + "error":{ + "code":"DeleteAssociationSameAccount", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "DeleteListenerInput":{ "type":"structure", "required":["ListenerArn"], @@ -1608,6 +1656,28 @@ "members":{ } }, + "DeleteSharedTrustStoreAssociationInput":{ + "type":"structure", + "required":[ + "TrustStoreArn", + "ResourceArn" + ], + "members":{ + "TrustStoreArn":{ + "shape":"TrustStoreArn", + "documentation":"

The Amazon Resource Name (ARN) of the trust store.

" + }, + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

" + } + } + }, + "DeleteSharedTrustStoreAssociationOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteTargetGroupInput":{ "type":"structure", "required":["TargetGroupArn"], @@ -1962,7 +2032,7 @@ }, "Include":{ "shape":"ListOfDescribeTargetHealthIncludeOptions", - "documentation":"

Used to inclue anomaly detection information.

" + "documentation":"

Used to include anomaly detection information.

" } } }, @@ -2224,6 +2294,25 @@ }, "documentation":"

Information about a forward action.

" }, + "GetResourcePolicyInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

" + } + } + }, + "GetResourcePolicyOutput":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"Policy", + "documentation":"

The content of the resource policy.

" + } + } + }, "GetTrustStoreCaCertificatesBundleInput":{ "type":"structure", "required":["TrustStoreArn"], @@ -2449,7 +2538,8 @@ "type":"string", "enum":[ "ipv4", - "dualstack" + "dualstack", + "dualstack-without-public-ipv4" ] }, "IsDefault":{"type":"boolean"}, @@ -2591,7 +2681,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" + "documentation":"

[Application Load Balancers] The type of IP addresses used for public or private connections by the subnets attached to your load balancer. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses).

[Network Load Balancers and Gateway Load Balancers] The type of IP addresses used for public or private connections by the subnets attached to your load balancer. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" }, "CustomerOwnedIpv4Pool":{ "shape":"CustomerOwnedIpv4Pool", @@ -2977,6 +3067,10 @@ "IgnoreClientCertificateExpiry":{ "shape":"IgnoreClientCertificateExpiry", "documentation":"

Indicates whether expired client certificates are ignored.

" + }, + "TrustStoreAssociationStatus":{ + "shape":"TrustStoreAssociationStatusEnum", + "documentation":"

Indicates a shared trust stores association status.

" } }, "documentation":"

Information about the mutual authentication attributes of a listener.

" @@ -3017,6 +3111,10 @@ }, "documentation":"

Information about a path pattern condition.

" }, + "Policy":{ + "type":"string", + "min":1 + }, "Port":{ "type":"integer", "max":65535, @@ -3239,6 +3337,18 @@ }, "exception":true }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified resource does not exist.

", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "RevocationContent":{ "type":"structure", "members":{ @@ -3443,7 +3553,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

The IP address type. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener.

" + "documentation":"

Note: Internal load balancers must use the ipv4 IP address type.

[Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses).

Note: Application Load Balancer authentication only supports IPv4 addresses when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer cannot complete the authentication process, resulting in HTTP 500 errors.

[Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener.

[Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" } } }, @@ -3527,7 +3637,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

[Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener.

[Gateway Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" + "documentation":"

[Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses).

[Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener.

[Gateway Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" } } }, @@ -3540,7 +3650,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

[Network Load Balancers] The IP address type.

[Gateway Load Balancers] The IP address type.

" + "documentation":"

[Application Load Balancers] The IP address type.

[Network Load Balancers] The IP address type.

[Gateway Load Balancers] The IP address type.

" } } }, @@ -4164,7 +4274,26 @@ }, "documentation":"

Information about the resources a trust store is associated with.

" }, + "TrustStoreAssociationNotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified association does not exist.

", + "error":{ + "code":"AssociationNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "TrustStoreAssociationResourceArn":{"type":"string"}, + "TrustStoreAssociationStatusEnum":{ + "type":"string", + "enum":[ + "active", + "removed" + ] + }, "TrustStoreAssociations":{ "type":"list", "member":{"shape":"TrustStoreAssociation"} diff --git a/botocore/data/emr-serverless/2021-07-13/paginators-1.json b/botocore/data/emr-serverless/2021-07-13/paginators-1.json index 7193d8550a..aa3966b17b 100644 --- a/botocore/data/emr-serverless/2021-07-13/paginators-1.json +++ b/botocore/data/emr-serverless/2021-07-13/paginators-1.json @@ -11,6 +11,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "jobRuns" + }, + "ListJobRunAttempts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "jobRunAttempts" } } } diff --git a/botocore/data/emr-serverless/2021-07-13/service-2.json b/botocore/data/emr-serverless/2021-07-13/service-2.json index 6eeb903eb7..d5e5c73744 100644 --- a/botocore/data/emr-serverless/2021-07-13/service-2.json +++ b/botocore/data/emr-serverless/2021-07-13/service-2.json @@ -127,6 +127,22 @@ ], "documentation":"

Lists applications based on a set of parameters.

" }, + "ListJobRunAttempts":{ + "name":"ListJobRunAttempts", + "http":{ + "method":"GET", + "requestUri":"/applications/{applicationId}/jobruns/{jobRunId}/attempts", + "responseCode":200 + }, + "input":{"shape":"ListJobRunAttemptsRequest"}, + "output":{"shape":"ListJobRunAttemptsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all attempt of a job run.

" + }, "ListJobRuns":{ "name":"ListJobRuns", "http":{ @@ -350,7 +366,11 @@ "shape":"ConfigurationList", "documentation":"

The Configuration specifications of an application. Each configuration consists of a classification and properties. You use this parameter when creating or updating an application. To see the runtimeConfiguration object of an application, run the GetApplication API operation.

" }, - "monitoringConfiguration":{"shape":"MonitoringConfiguration"} + "monitoringConfiguration":{"shape":"MonitoringConfiguration"}, + "interactiveConfiguration":{ + "shape":"InteractiveConfiguration", + "documentation":"

The interactive configuration object that enables the interactive use cases for an application.

" + } }, "documentation":"

Information about an application. Amazon EMR Serverless uses applications to run jobs.

" }, @@ -456,6 +476,11 @@ "X86_64" ] }, + "AttemptNumber":{ + "type":"integer", + "box":true, + "min":1 + }, "AutoStartConfig":{ "type":"structure", "members":{ @@ -699,6 +724,10 @@ "monitoringConfiguration":{ "shape":"MonitoringConfiguration", "documentation":"

The configuration setting for monitoring.

" + }, + "interactiveConfiguration":{ + "shape":"InteractiveConfiguration", + "documentation":"

The interactive configuration object that enables the interactive use cases to use when running an application.

" } } }, @@ -829,6 +858,12 @@ "documentation":"

The ID of the job run.

", "location":"uri", "locationName":"jobRunId" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"

An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job.

", + "location":"querystring", + "locationName":"attempt" } } }, @@ -859,6 +894,12 @@ "documentation":"

The ID of the job run.

", "location":"uri", "locationName":"jobRunId" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"

An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job.

", + "location":"querystring", + "locationName":"attempt" } } }, @@ -972,6 +1013,20 @@ "type":"integer", "box":true }, + "InteractiveConfiguration":{ + "type":"structure", + "members":{ + "studioEnabled":{ + "shape":"Boolean", + "documentation":"

Enables you to connect an application to Amazon EMR Studio to run interactive workloads in a notebook.

" + }, + "livyEndpointEnabled":{ + "shape":"Boolean", + "documentation":"

Enables an Apache Livy endpoint that you can connect to and run interactive jobs.

" + } + }, + "documentation":"

The configuration to use to enable the different types of interactive use cases in an application.

" + }, "InternalServerException":{ "type":"structure", "required":["message"], @@ -1093,16 +1148,126 @@ "billedResourceUtilization":{ "shape":"ResourceUtilization", "documentation":"

The aggregate vCPU, memory, and storage that Amazon Web Services has billed for the job run. The billed resources include a 1-minute minimum usage for workers, plus additional storage over 20 GB per worker. Note that billed resources do not include usage for idle pre-initialized workers.

" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"

The mode of the job run.

" + }, + "retryPolicy":{ + "shape":"RetryPolicy", + "documentation":"

The retry policy of the job run.

" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"

The attempt of the job run.

" + }, + "attemptCreatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run attempt was created.

" + }, + "attemptUpdatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run attempt was last updated.

" } }, "documentation":"

Information about a job run. A job run is a unit of work, such as a Spark JAR, Hive query, or SparkSQL query, that you submit to an Amazon EMR Serverless application.

" }, + "JobRunAttemptSummary":{ + "type":"structure", + "required":[ + "applicationId", + "id", + "arn", + "createdBy", + "jobCreatedAt", + "createdAt", + "updatedAt", + "executionRole", + "state", + "stateDetails", + "releaseLabel" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"

The ID of the application the job is running on.

" + }, + "id":{ + "shape":"JobRunId", + "documentation":"

The ID of the job run attempt.

" + }, + "name":{ + "shape":"String256", + "documentation":"

The name of the job run attempt.

" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"

The mode of the job run attempt.

" + }, + "arn":{ + "shape":"JobArn", + "documentation":"

The Amazon Resource Name (ARN) of the job run.

" + }, + "createdBy":{ + "shape":"RequestIdentityUserArn", + "documentation":"

The user who created the job run.

" + }, + "jobCreatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run was created.

" + }, + "createdAt":{ + "shape":"Date", + "documentation":"

The date and time when the job run attempt was created.

" + }, + "updatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run attempt was last updated.

" + }, + "executionRole":{ + "shape":"IAMRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the execution role of the job run..

" + }, + "state":{ + "shape":"JobRunState", + "documentation":"

The state of the job run attempt.

" + }, + "stateDetails":{ + "shape":"String256", + "documentation":"

The state details of the job run attempt.

" + }, + "releaseLabel":{ + "shape":"ReleaseLabel", + "documentation":"

The Amazon EMR release label of the job run attempt.

" + }, + "type":{ + "shape":"JobRunType", + "documentation":"

The type of the job run, such as Spark or Hive.

" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"

The attempt number of the job run execution.

" + } + }, + "documentation":"

The summary of attributes associated with a job run attempt.

" + }, + "JobRunAttempts":{ + "type":"list", + "member":{"shape":"JobRunAttemptSummary"} + }, "JobRunId":{ "type":"string", "max":64, "min":1, "pattern":"[0-9a-z]+" }, + "JobRunMode":{ + "type":"string", + "enum":[ + "BATCH", + "STREAMING" + ] + }, "JobRunState":{ "type":"string", "enum":[ @@ -1149,6 +1314,10 @@ "shape":"String256", "documentation":"

The optional job run name. This doesn't have to be unique.

" }, + "mode":{ + "shape":"JobRunMode", + "documentation":"

The mode of the job run.

" + }, "arn":{ "shape":"JobArn", "documentation":"

The ARN of the job run.

" @@ -1184,6 +1353,18 @@ "type":{ "shape":"JobRunType", "documentation":"

The type of job run, such as Spark or Hive.

" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"

The attempt number of the job run execution.

" + }, + "attemptCreatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run attempt was created.

" + }, + "attemptUpdatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run attempt was last updated.

" } }, "documentation":"

The summary of attributes associated with a job run.

" @@ -1236,6 +1417,59 @@ } } }, + "ListJobRunAttemptsRequest":{ + "type":"structure", + "required":[ + "applicationId", + "jobRunId" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"

The ID of the application for which to list job runs.

", + "location":"uri", + "locationName":"applicationId" + }, + "jobRunId":{ + "shape":"JobRunId", + "documentation":"

The ID of the job run to list.

", + "location":"uri", + "locationName":"jobRunId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of job run attempt results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"ListJobRunAttemptsRequestMaxResultsInteger", + "documentation":"

The maximum number of job run attempts to list.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListJobRunAttemptsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, + "ListJobRunAttemptsResponse":{ + "type":"structure", + "required":["jobRunAttempts"], + "members":{ + "jobRunAttempts":{ + "shape":"JobRunAttempts", + "documentation":"

The array of the listed job run attempt objects.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The output displays the token for the next set of application results. This is required for pagination and is available as a response of the previous request.

" + } + } + }, "ListJobRunsRequest":{ "type":"structure", "required":["applicationId"], @@ -1275,6 +1509,12 @@ "documentation":"

An optional filter for job run states. Note that if this filter contains multiple states, the resulting list will be grouped by the state.

", "location":"querystring", "locationName":"states" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"

The mode of the job runs to list.

", + "location":"querystring", + "locationName":"mode" } } }, @@ -1507,6 +1747,25 @@ }, "documentation":"

The resource utilization for memory, storage, and vCPU for jobs.

" }, + "RetryPolicy":{ + "type":"structure", + "members":{ + "maxAttempts":{ + "shape":"AttemptNumber", + "documentation":"

Maximum number of attempts for the job run. This parameter is only applicable for BATCH mode.

" + }, + "maxFailedAttemptsPerHour":{ + "shape":"RetryPolicyMaxFailedAttemptsPerHourInteger", + "documentation":"

Maximum number of failed attempts per hour. This [arameter is only applicable for STREAMING mode.

" + } + }, + "documentation":"

The retry policy to use for a job run.

" + }, + "RetryPolicyMaxFailedAttemptsPerHourInteger":{ + "type":"integer", + "box":true, + "min":1 + }, "S3MonitoringConfiguration":{ "type":"structure", "members":{ @@ -1640,6 +1899,14 @@ "name":{ "shape":"String256", "documentation":"

The optional job run name. This doesn't have to be unique.

" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"

The mode of the job run when it starts.

" + }, + "retryPolicy":{ + "shape":"RetryPolicy", + "documentation":"

The retry policy when job run starts.

" } } }, @@ -1846,6 +2113,10 @@ "shape":"WorkerTypeSpecificationInputMap", "documentation":"

The key-value pairs that specify worker type to WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a Spark or Hive application. Valid worker types include Driver and Executor for Spark applications and HiveDriver and TezTask for Hive applications. You can either set image details in this parameter for each worker type, or in imageConfiguration for all worker types.

" }, + "interactiveConfiguration":{ + "shape":"InteractiveConfiguration", + "documentation":"

The interactive configuration object that contains new interactive use cases when the application is updated.

" + }, "releaseLabel":{ "shape":"ReleaseLabel", "documentation":"

The Amazon EMR release label for the application. You can change the release label to use a different release of Amazon EMR.

" diff --git a/botocore/data/emr/2009-03-31/service-2.json b/botocore/data/emr/2009-03-31/service-2.json index 74f1654142..13c67608d3 100644 --- a/botocore/data/emr/2009-03-31/service-2.json +++ b/botocore/data/emr/2009-03-31/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"elasticmapreduce", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Amazon EMR", "serviceFullName":"Amazon EMR", "serviceId":"EMR", "signatureVersion":"v4", "targetPrefix":"ElasticMapReduce", - "uid":"elasticmapreduce-2009-03-31" + "uid":"elasticmapreduce-2009-03-31", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddInstanceFleet":{ @@ -3123,6 +3125,10 @@ "CustomAmiId":{ "shape":"XmlStringMaxLen256", "documentation":"

The custom AMI ID to use for the instance type.

" + }, + "Priority":{ + "shape":"NonNegativeDouble", + "documentation":"

The priority at which Amazon EMR launches the Amazon EC2 instances with this instance type. Priority starts at 0, which is the highest priority. Amazon EMR considers the highest priority first.

" } }, "documentation":"

An instance type configuration for each instance type in an instance fleet, which determines the Amazon EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. When you use an allocation strategy, you can include a maximum of 30 instance type configurations for a fleet. For more information about how to use an allocation strategy, see Configure Instance Fleets. Without an allocation strategy, you may specify a maximum of five instance type configurations for a fleet.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

" @@ -3165,6 +3171,10 @@ "CustomAmiId":{ "shape":"XmlStringMaxLen256", "documentation":"

The custom AMI ID to use for the instance type.

" + }, + "Priority":{ + "shape":"NonNegativeDouble", + "documentation":"

The priority at which Amazon EMR launches the Amazon EC2 instances with this instance type. Priority starts at 0, which is the highest priority. Amazon EMR considers the highest priority first.

" } }, "documentation":"

The configuration specification for each instance type in an instance fleet.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

" @@ -4217,7 +4227,10 @@ }, "OnDemandProvisioningAllocationStrategy":{ "type":"string", - "enum":["lowest-price"] + "enum":[ + "lowest-price", + "prioritized" + ] }, "OnDemandProvisioningSpecification":{ "type":"structure", @@ -4225,7 +4238,7 @@ "members":{ "AllocationStrategy":{ "shape":"OnDemandProvisioningAllocationStrategy", - "documentation":"

Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first.

" + "documentation":"

Specifies the strategy to use in launching On-Demand instance fleets. Available options are lowest-price and prioritized. lowest-price specifies to launch the instances with the lowest price first, and prioritized specifies that Amazon EMR should launch the instances with the highest priority first. The default is lowest-price.

" }, "CapacityReservationOptions":{ "shape":"OnDemandCapacityReservationOptions", @@ -5011,7 +5024,8 @@ "capacity-optimized", "price-capacity-optimized", "lowest-price", - "diversified" + "diversified", + "capacity-optimized-prioritized" ] }, "SpotProvisioningSpecification":{ @@ -5035,7 +5049,7 @@ }, "AllocationStrategy":{ "shape":"SpotProvisioningAllocationStrategy", - "documentation":"

Specifies one of the following strategies to launch Spot Instance fleets: price-capacity-optimized, capacity-optimized, lowest-price, or diversified. For more information on the provisioning strategies, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide for Linux Instances.

When you launch a Spot Instance fleet with the old console, it automatically launches with the capacity-optimized strategy. You can't change the allocation strategy from the old console.

" + "documentation":"

Specifies one of the following strategies to launch Spot Instance fleets: capacity-optimized, price-capacity-optimized, lowest-price, or diversified, and capacity-optimized-prioritized. For more information on the provisioning strategies, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide for Linux Instances.

When you launch a Spot Instance fleet with the old console, it automatically launches with the capacity-optimized strategy. You can't change the allocation strategy from the old console.

" } }, "documentation":"

The launch specification for Spot Instances in the instance fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. Spot Instance allocation strategy is available in Amazon EMR releases 5.12.1 and later.

Spot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022.

" diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 543def11af..4c24f2afa5 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -112,11 +112,6 @@ } }, "services" : { - "a4b" : { - "endpoints" : { - "us-east-1" : { } - } - }, "access-analyzer" : { "endpoints" : { "af-south-1" : { }, @@ -454,20 +449,34 @@ }, "airflow" : { "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, + "me-central-1" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -547,10 +556,12 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -1629,6 +1640,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -1638,6 +1650,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2115,12 +2128,24 @@ }, "ca-central-1" : { "variants" : [ { + "hostname" : "athena-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "athena.ca-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { + "hostname" : "athena-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "athena.ca-west-1.api.aws", "tags" : [ "dualstack" ] } ] @@ -2173,6 +2198,20 @@ "tags" : [ "dualstack" ] } ] }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "athena-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "athena-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -2534,37 +2573,6 @@ "us-west-2" : { } } }, - "backupstorage" : { - "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } - } - }, "batch" : { "defaults" : { "variants" : [ { @@ -2682,6 +2690,12 @@ }, "hostname" : "bedrock.ap-southeast-2.amazonaws.com" }, + "bedrock-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "bedrock.ca-central-1.amazonaws.com" + }, "bedrock-eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -2694,12 +2708,24 @@ }, "hostname" : "bedrock.eu-west-1.amazonaws.com" }, + "bedrock-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "bedrock.eu-west-2.amazonaws.com" + }, "bedrock-eu-west-3" : { "credentialScope" : { "region" : "eu-west-3" }, "hostname" : "bedrock.eu-west-3.amazonaws.com" }, + "bedrock-fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "bedrock-fips.ca-central-1.amazonaws.com" + }, "bedrock-fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -2736,6 +2762,12 @@ }, "hostname" : "bedrock-runtime.ap-southeast-2.amazonaws.com" }, + "bedrock-runtime-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "bedrock-runtime.ca-central-1.amazonaws.com" + }, "bedrock-runtime-eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -2748,12 +2780,24 @@ }, "hostname" : "bedrock-runtime.eu-west-1.amazonaws.com" }, + "bedrock-runtime-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "bedrock-runtime.eu-west-2.amazonaws.com" + }, "bedrock-runtime-eu-west-3" : { "credentialScope" : { "region" : "eu-west-3" }, "hostname" : "bedrock-runtime.eu-west-3.amazonaws.com" }, + "bedrock-runtime-fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "bedrock-runtime-fips.ca-central-1.amazonaws.com" + }, "bedrock-runtime-fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -2766,6 +2810,12 @@ }, "hostname" : "bedrock-runtime-fips.us-west-2.amazonaws.com" }, + "bedrock-runtime-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "bedrock-runtime.sa-east-1.amazonaws.com" + }, "bedrock-runtime-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -2778,6 +2828,12 @@ }, "hostname" : "bedrock-runtime.us-west-2.amazonaws.com" }, + "bedrock-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "bedrock.sa-east-1.amazonaws.com" + }, "bedrock-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -2790,9 +2846,12 @@ }, "hostname" : "bedrock.us-west-2.amazonaws.com" }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -2811,6 +2870,7 @@ }, "braket" : { "endpoints" : { + "eu-north-1" : { }, "eu-west-2" : { }, "us-east-1" : { }, "us-west-1" : { }, @@ -2831,6 +2891,8 @@ }, "cases" : { "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -2950,131 +3012,435 @@ }, "cloud9" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "il-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } - } - }, - "cloudcontrolapi" : { - "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, + "af-south-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { - "hostname" : "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + }, { + "hostname" : "cloud9-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloud9-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, - "ca-west-1" : { + "eu-central-1" : { "variants" : [ { - "hostname" : "cloudcontrolapi-fips.ca-west-1.amazonaws.com", - "tags" : [ "fips" ] + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" }, "deprecated" : true, - "hostname" : "cloudcontrolapi-fips.ca-central-1.amazonaws.com" - }, - "fips-ca-west-1" : { - "credentialScope" : { - "region" : "ca-west-1" - }, - "deprecated" : true, - "hostname" : "cloudcontrolapi-fips.ca-west-1.amazonaws.com" + "hostname" : "cloud9-fips.ca-central-1.amazonaws.com" }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" }, "deprecated" : true, - "hostname" : "cloudcontrolapi-fips.us-east-1.amazonaws.com" + "hostname" : "cloud9-fips.us-east-1.amazonaws.com" }, "fips-us-east-2" : { "credentialScope" : { "region" : "us-east-2" }, "deprecated" : true, - "hostname" : "cloudcontrolapi-fips.us-east-2.amazonaws.com" + "hostname" : "cloud9-fips.us-east-2.amazonaws.com" }, "fips-us-west-1" : { "credentialScope" : { "region" : "us-west-1" }, "deprecated" : true, - "hostname" : "cloudcontrolapi-fips.us-west-1.amazonaws.com" + "hostname" : "cloud9-fips.us-west-1.amazonaws.com" }, "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" }, "deprecated" : true, - "hostname" : "cloudcontrolapi-fips.us-west-2.amazonaws.com" + "hostname" : "cloud9-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { + "il-central-1" : { "variants" : [ { - "hostname" : "cloudcontrolapi-fips.us-east-1.amazonaws.com", - "tags" : [ "fips" ] + "tags" : [ "dualstack" ] } ] }, - "us-east-2" : { + "me-south-1" : { "variants" : [ { - "hostname" : "cloudcontrolapi-fips.us-east-2.amazonaws.com", - "tags" : [ "fips" ] + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "hostname" : "cloud9-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloud9-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "hostname" : "cloud9-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloud9-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "hostname" : "cloud9-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloud9-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "hostname" : "cloud9-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloud9-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + } + } + }, + "cloudcontrolapi" : { + "endpoints" : { + "af-south-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.ca-west-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.us-west-2.amazonaws.com" + }, + "il-central-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "cloudcontrolapi-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "cloudcontrolapi-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -3800,6 +4166,7 @@ "cognito-identity" : { "endpoints" : { "af-south-1" : { }, + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -3810,6 +4177,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -3879,6 +4247,7 @@ "cognito-idp" : { "endpoints" : { "af-south-1" : { }, + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -3889,6 +4258,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -5025,12 +5395,6 @@ } ] }, "endpoints" : { - "af-south-1" : { - "hostname" : "datazone.af-south-1.api.aws" - }, - "ap-east-1" : { - "hostname" : "datazone.ap-east-1.api.aws" - }, "ap-northeast-1" : { "hostname" : "datazone.ap-northeast-1.api.aws" }, @@ -5040,9 +5404,6 @@ "ap-northeast-3" : { "hostname" : "datazone.ap-northeast-3.api.aws" }, - "ap-south-1" : { - "hostname" : "datazone.ap-south-1.api.aws" - }, "ap-south-2" : { "hostname" : "datazone.ap-south-2.api.aws" }, @@ -5071,18 +5432,12 @@ "eu-central-1" : { "hostname" : "datazone.eu-central-1.api.aws" }, - "eu-central-2" : { - "hostname" : "datazone.eu-central-2.api.aws" - }, "eu-north-1" : { "hostname" : "datazone.eu-north-1.api.aws" }, "eu-south-1" : { "hostname" : "datazone.eu-south-1.api.aws" }, - "eu-south-2" : { - "hostname" : "datazone.eu-south-2.api.aws" - }, "eu-west-1" : { "hostname" : "datazone.eu-west-1.api.aws" }, @@ -5137,6 +5492,8 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -5248,8 +5605,18 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "directconnect-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "directconnect-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -5258,6 +5625,20 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "directconnect-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "directconnect-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -7220,6 +7601,7 @@ } ] }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, "eu-south-2" : { }, @@ -8553,6 +8935,16 @@ } } }, + "globalaccelerator" : { + "endpoints" : { + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "globalaccelerator-fips.us-west-2.amazonaws.com" + } + } + }, "glue" : { "endpoints" : { "af-south-1" : { }, @@ -8947,11 +9339,6 @@ "us-west-2" : { } } }, - "honeycode" : { - "endpoints" : { - "us-west-2" : { } - } - }, "iam" : { "endpoints" : { "aws-global" : { @@ -9024,6 +9411,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -10179,9 +10567,21 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "kendra-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-west-1" : { }, "eu-west-2" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "kendra-fips.ca-central-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -10426,8 +10826,18 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -10436,14 +10846,76 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.ca-west-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-west-2.amazonaws.com" + }, "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "kinesisvideo" : { @@ -12131,12 +12603,25 @@ }, "meetings-chime" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "meetings-chime-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "meetings-chime-fips.ca-central-1.amazonaws.com" + }, "eu-central-1" : { }, "eu-west-2" : { }, "il-central-1" : { }, @@ -12560,6 +13045,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -12766,6 +13252,7 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -12984,6 +13471,12 @@ }, "hostname" : "oidc.ca-central-1.amazonaws.com" }, + "ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "hostname" : "oidc.ca-west-1.amazonaws.com" + }, "eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -13338,66 +13831,318 @@ }, "us-east-1" : { "variants" : [ { - "hostname" : "participant.connect-fips.us-east-1.amazonaws.com", + "hostname" : "participant.connect-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "participant.connect-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "personalize" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "pi" : { + "endpoints" : { + "af-south-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.ca-west-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-west-2.amazonaws.com" + }, + "il-central-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { + "protocols" : [ "https" ], "variants" : [ { - "hostname" : "participant.connect-fips.us-west-2.amazonaws.com", + "hostname" : "pi-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } }, - "personalize" : { - "endpoints" : { - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-west-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-2" : { } - } - }, - "pi" : { - "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } - } - }, "pinpoint" : { "defaults" : { "credentialScope" : { @@ -13659,6 +14404,12 @@ }, "hostname" : "portal.sso.ca-central-1.amazonaws.com" }, + "ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "hostname" : "portal.sso.ca-west-1.amazonaws.com" + }, "eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -14004,14 +14755,19 @@ }, "quicksight" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "api" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -14624,7 +15380,9 @@ } ] }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -14663,6 +15421,8 @@ "deprecated" : true, "hostname" : "redshift-serverless-fips.us-west-2.amazonaws.com" }, + "me-central-1" : { }, + "sa-east-1" : { }, "us-east-1" : { "variants" : [ { "hostname" : "redshift-serverless-fips.us-east-1.amazonaws.com", @@ -15795,6 +16555,31 @@ "hostname" : "s3-control-fips.ca-central-1.amazonaws.com", "signatureVersions" : [ "s3v4" ] }, + "ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "hostname" : "s3-control.ca-west-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3-control-fips.dualstack.ca-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-control.dualstack.ca-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1-fips" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "s3-control-fips.ca-west-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ] + }, "eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -17530,6 +18315,7 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -18357,6 +19143,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -18482,6 +19269,19 @@ "deprecated" : true, "hostname" : "storagegateway-fips.ca-central-1.amazonaws.com" }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "storagegateway-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-west-1-fips" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "storagegateway-fips.ca-west-1.amazonaws.com" + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -18895,22 +19695,80 @@ "us-west-2" : { } } }, + "tax" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "tax.us-east-1.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, "textract" : { "endpoints" : { - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "textract.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "textract.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "textract.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "textract.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "textract-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "textract.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "textract.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "textract.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "textract.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -18950,24 +19808,48 @@ "variants" : [ { "hostname" : "textract-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "textract-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "textract-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "textract-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -19321,7 +20203,19 @@ "deprecated" : true, "hostname" : "translate-fips.us-east-2.amazonaws.com" }, - "us-west-1" : { }, + "us-west-1" : { + "variants" : [ { + "hostname" : "translate-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "translate-fips.us-west-1.amazonaws.com" + }, "us-west-2" : { "variants" : [ { "hostname" : "translate-fips.us-west-2.amazonaws.com", @@ -19542,17 +20436,23 @@ }, "vpc-lattice" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -20869,6 +21769,15 @@ "cn-northwest-1" : { } } }, + "acm-pca" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "airflow" : { "endpoints" : { "cn-north-1" : { }, @@ -21014,12 +21923,6 @@ "cn-northwest-1" : { } } }, - "backupstorage" : { - "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } - } - }, "batch" : { "endpoints" : { "cn-north-1" : { }, @@ -21058,8 +21961,18 @@ }, "cloudcontrolapi" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "cloudformation" : { @@ -21788,8 +22701,20 @@ }, "pi" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "pipes" : { @@ -21868,7 +22793,8 @@ }, "redshift-serverless" : { "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "resource-groups" : { @@ -22874,8 +23800,6 @@ }, "endpoints" : { "us-gov-east-1" : { - "hostname" : "autoscaling-plans.us-gov-east-1.amazonaws.com", - "protocols" : [ "http", "https" ], "variants" : [ { "hostname" : "autoscaling-plans.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] @@ -22887,8 +23811,6 @@ "protocols" : [ "http", "https" ] }, "us-gov-west-1" : { - "hostname" : "autoscaling-plans.us-gov-west-1.amazonaws.com", - "protocols" : [ "http", "https" ], "variants" : [ { "hostname" : "autoscaling-plans.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] @@ -22913,12 +23835,6 @@ "us-gov-west-1" : { } } }, - "backupstorage" : { - "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } - } - }, "batch" : { "defaults" : { "variants" : [ { @@ -22957,6 +23873,18 @@ }, "bedrock" : { "endpoints" : { + "bedrock-fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "bedrock-fips.us-gov-west-1.amazonaws.com" + }, + "bedrock-runtime-fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "bedrock-runtime-fips.us-gov-west-1.amazonaws.com" + }, "bedrock-runtime-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" @@ -23030,12 +23958,24 @@ "variants" : [ { "hostname" : "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -23618,17 +24558,31 @@ }, "directconnect" : { "endpoints" : { - "us-gov-east-1" : { + "fips-us-gov-east-1" : { "credentialScope" : { "region" : "us-gov-east-1" }, - "hostname" : "directconnect.us-gov-east-1.amazonaws.com" + "deprecated" : true, + "hostname" : "directconnect-fips.us-gov-east-1.amazonaws.com" }, - "us-gov-west-1" : { + "fips-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" }, - "hostname" : "directconnect.us-gov-west-1.amazonaws.com" + "deprecated" : true, + "hostname" : "directconnect-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "directconnect-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "directconnect-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] } } }, @@ -25056,8 +26010,70 @@ }, "kinesisanalytics" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "kinesisanalytics-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "kinesisanalytics-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "kinesisvideo" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "kinesisvideo-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "kms" : { @@ -25532,6 +26548,12 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-us-gov-global" }, + "oam" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "oidc" : { "endpoints" : { "us-gov-east-1" : { @@ -25620,8 +26642,46 @@ }, "pi" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-west-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + } } }, "pinpoint" : { @@ -26283,6 +27343,36 @@ } } }, + "securitylake" : { + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "securitylake.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "securitylake.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "securitylake.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "securitylake.us-gov-west-1.amazonaws.com" + } + } + }, "serverlessrepo" : { "defaults" : { "protocols" : [ "https" ] @@ -27010,12 +28100,24 @@ "variants" : [ { "hostname" : "textract-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "textract-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -27343,7 +28445,8 @@ }, "apigateway" : { "endpoints" : { - "us-iso-east-1" : { } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "appconfig" : { @@ -27820,32 +28923,8 @@ }, "ram" : { "endpoints" : { - "fips-us-iso-east-1" : { - "credentialScope" : { - "region" : "us-iso-east-1" - }, - "deprecated" : true, - "hostname" : "ram-fips.us-iso-east-1.c2s.ic.gov" - }, - "fips-us-iso-west-1" : { - "credentialScope" : { - "region" : "us-iso-west-1" - }, - "deprecated" : true, - "hostname" : "ram-fips.us-iso-west-1.c2s.ic.gov" - }, - "us-iso-east-1" : { - "variants" : [ { - "hostname" : "ram-fips.us-iso-east-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - }, - "us-iso-west-1" : { - "variants" : [ { - "hostname" : "ram-fips.us-iso-west-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "rbin" : { @@ -27880,43 +28959,23 @@ }, "rds" : { "endpoints" : { - "rds-fips.us-iso-east-1" : { - "credentialScope" : { - "region" : "us-iso-east-1" - }, - "deprecated" : true, - "hostname" : "rds-fips.us-iso-east-1.c2s.ic.gov" - }, - "rds-fips.us-iso-west-1" : { - "credentialScope" : { - "region" : "us-iso-west-1" - }, - "deprecated" : true, - "hostname" : "rds-fips.us-iso-west-1.c2s.ic.gov" - }, "rds.us-iso-east-1" : { "credentialScope" : { "region" : "us-iso-east-1" }, "deprecated" : true, - "variants" : [ { - "hostname" : "rds-fips.us-iso-east-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "rds.us-iso-east-1.c2s.ic.gov" }, "rds.us-iso-west-1" : { "credentialScope" : { "region" : "us-iso-west-1" }, "deprecated" : true, - "variants" : [ { - "hostname" : "rds-fips.us-iso-west-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "rds.us-iso-west-1.c2s.ic.gov" }, "us-iso-east-1" : { "variants" : [ { - "hostname" : "rds-fips.us-iso-east-1.c2s.ic.gov", + "hostname" : "rds.us-iso-east-1.c2s.ic.gov", "tags" : [ "fips" ] } ] }, @@ -27925,11 +28984,11 @@ "region" : "us-iso-east-1" }, "deprecated" : true, - "hostname" : "rds-fips.us-iso-east-1.c2s.ic.gov" + "hostname" : "rds.us-iso-east-1.c2s.ic.gov" }, "us-iso-west-1" : { "variants" : [ { - "hostname" : "rds-fips.us-iso-west-1.c2s.ic.gov", + "hostname" : "rds.us-iso-west-1.c2s.ic.gov", "tags" : [ "fips" ] } ] }, @@ -27938,37 +28997,23 @@ "region" : "us-iso-west-1" }, "deprecated" : true, - "hostname" : "rds-fips.us-iso-west-1.c2s.ic.gov" + "hostname" : "rds.us-iso-west-1.c2s.ic.gov" } } }, "redshift" : { "endpoints" : { - "fips-us-iso-east-1" : { + "us-iso-east-1" : { "credentialScope" : { "region" : "us-iso-east-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov" + "hostname" : "redshift.us-iso-east-1.c2s.ic.gov" }, - "fips-us-iso-west-1" : { + "us-iso-west-1" : { "credentialScope" : { "region" : "us-iso-west-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov" - }, - "us-iso-east-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - }, - "us-iso-west-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "redshift.us-iso-west-1.c2s.ic.gov" } } }, @@ -28277,6 +29322,11 @@ "us-isob-east-1" : { } } }, + "apigateway" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "appconfig" : { "endpoints" : { "us-isob-east-1" : { } @@ -28586,19 +29636,7 @@ }, "ram" : { "endpoints" : { - "fips-us-isob-east-1" : { - "credentialScope" : { - "region" : "us-isob-east-1" - }, - "deprecated" : true, - "hostname" : "ram-fips.us-isob-east-1.sc2s.sgov.gov" - }, - "us-isob-east-1" : { - "variants" : [ { - "hostname" : "ram-fips.us-isob-east-1.sc2s.sgov.gov", - "tags" : [ "fips" ] - } ] - } + "us-isob-east-1" : { } } }, "rbin" : { @@ -28620,26 +29658,16 @@ }, "rds" : { "endpoints" : { - "rds-fips.us-isob-east-1" : { - "credentialScope" : { - "region" : "us-isob-east-1" - }, - "deprecated" : true, - "hostname" : "rds-fips.us-isob-east-1.sc2s.sgov.gov" - }, "rds.us-isob-east-1" : { "credentialScope" : { "region" : "us-isob-east-1" }, "deprecated" : true, - "variants" : [ { - "hostname" : "rds-fips.us-isob-east-1.sc2s.sgov.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "rds.us-isob-east-1.sc2s.sgov.gov" }, "us-isob-east-1" : { "variants" : [ { - "hostname" : "rds-fips.us-isob-east-1.sc2s.sgov.gov", + "hostname" : "rds.us-isob-east-1.sc2s.sgov.gov", "tags" : [ "fips" ] } ] }, @@ -28648,24 +29676,17 @@ "region" : "us-isob-east-1" }, "deprecated" : true, - "hostname" : "rds-fips.us-isob-east-1.sc2s.sgov.gov" + "hostname" : "rds.us-isob-east-1.sc2s.sgov.gov" } } }, "redshift" : { "endpoints" : { - "fips-us-isob-east-1" : { + "us-isob-east-1" : { "credentialScope" : { "region" : "us-isob-east-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov" - }, - "us-isob-east-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "redshift.us-isob-east-1.sc2s.sgov.gov" } } }, @@ -28889,7 +29910,11 @@ "partition" : "aws-iso-e", "partitionName" : "AWS ISOE (Europe)", "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", - "regions" : { }, + "regions" : { + "eu-isoe-west-1" : { + "description" : "EU ISOE West" + } + }, "services" : { } }, { "defaults" : { diff --git a/botocore/data/entityresolution/2018-05-10/service-2.json b/botocore/data/entityresolution/2018-05-10/service-2.json index 0df021781a..636189ff5b 100644 --- a/botocore/data/entityresolution/2018-05-10/service-2.json +++ b/botocore/data/entityresolution/2018-05-10/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"entityresolution", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"AWSEntityResolution", "serviceFullName":"AWS EntityResolution", "serviceId":"EntityResolution", "signatureVersion":"v4", "signingName":"entityresolution", - "uid":"entityresolution-2018-05-10" + "uid":"entityresolution-2018-05-10", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddPolicyStatement":{ @@ -695,7 +697,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

You do not have sufficient access to perform this action. HTTP Status Code: 403

", + "documentation":"

You do not have sufficient access to perform this action.

", "error":{ "httpStatusCode":403, "senderFault":true @@ -728,7 +730,7 @@ }, "effect":{ "shape":"StatementEffect", - "documentation":"

Determines whether the permissions specified in the policy are to be allowed (Allow) or denied (Deny).

" + "documentation":"

Determines whether the permissions specified in the policy are to be allowed (Allow) or denied (Deny).

If you set the value of the effect parameter to Deny for the AddPolicyStatement operation, you must also set the value of the effect parameter in the policy to Deny for the PutPolicy operation.

" }, "principal":{ "shape":"StatementPrincipalList", @@ -851,7 +853,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The request could not be processed because of conflict in the current state of the resource. Example: Workflow already exists, Schema already exists, Workflow is currently running, etc. HTTP Status Code: 400

", + "documentation":"

The request could not be processed because of conflict in the current state of the resource. Example: Workflow already exists, Schema already exists, Workflow is currently running, etc.

", "error":{ "httpStatusCode":400, "senderFault":true @@ -863,7 +865,6 @@ "required":[ "idMappingTechniques", "inputSourceConfig", - "roleArn", "workflowName" ], "members":{ @@ -873,7 +874,7 @@ }, "idMappingTechniques":{ "shape":"IdMappingTechniques", - "documentation":"

An object which defines the idMappingType and the providerProperties.

" + "documentation":"

An object which defines the ID mapping technique and any additional configurations.

" }, "inputSourceConfig":{ "shape":"IdMappingWorkflowInputSourceConfig", @@ -884,7 +885,7 @@ "documentation":"

A list of IdMappingWorkflowOutputSource objects, each of which contains fields OutputS3Path and Output.

" }, "roleArn":{ - "shape":"RoleArn", + "shape":"IdMappingRoleArn", "documentation":"

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.

" }, "tags":{ @@ -902,7 +903,6 @@ "required":[ "idMappingTechniques", "inputSourceConfig", - "roleArn", "workflowArn", "workflowName" ], @@ -913,7 +913,7 @@ }, "idMappingTechniques":{ "shape":"IdMappingTechniques", - "documentation":"

An object which defines the idMappingType and the providerProperties.

" + "documentation":"

An object which defines the ID mapping technique and any additional configurations.

" }, "inputSourceConfig":{ "shape":"IdMappingWorkflowInputSourceConfig", @@ -924,7 +924,7 @@ "documentation":"

A list of IdMappingWorkflowOutputSource objects, each of which contains fields OutputS3Path and Output.

" }, "roleArn":{ - "shape":"RoleArn", + "shape":"IdMappingRoleArn", "documentation":"

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.

" }, "workflowArn":{ @@ -1405,7 +1405,7 @@ "documentation":"

The current quota value for the customers.

" } }, - "documentation":"

The request was rejected because it attempted to create resources beyond the current Entity Resolution account limits. The error message describes the limit exceeded. HTTP Status Code: 402

", + "documentation":"

The request was rejected because it attempted to create resources beyond the current Entity Resolution account limits. The error message describes the limit exceeded.

", "error":{ "httpStatusCode":402, "senderFault":true @@ -1486,7 +1486,6 @@ "createdAt", "idMappingTechniques", "inputSourceConfig", - "roleArn", "updatedAt", "workflowArn", "workflowName" @@ -1502,7 +1501,7 @@ }, "idMappingTechniques":{ "shape":"IdMappingTechniques", - "documentation":"

An object which defines the idMappingType and the providerProperties.

" + "documentation":"

An object which defines the ID mapping technique and any additional configurations.

" }, "inputSourceConfig":{ "shape":"IdMappingWorkflowInputSourceConfig", @@ -1513,7 +1512,7 @@ "documentation":"

A list of OutputSource objects, each of which contains fields OutputS3Path and KMSArn.

" }, "roleArn":{ - "shape":"RoleArn", + "shape":"IdMappingRoleArn", "documentation":"

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes this role to access Amazon Web Services resources on your behalf.

" }, "tags":{ @@ -1947,18 +1946,30 @@ "members":{ "inputRecords":{ "shape":"Integer", - "documentation":"

The total number of input records.

" + "documentation":"

The total number of records that were input for processing.

" }, "recordsNotProcessed":{ "shape":"Integer", "documentation":"

The total number of records that did not get processed.

" }, + "totalMappedRecords":{ + "shape":"Integer", + "documentation":"

The total number of records that were mapped.

" + }, + "totalMappedSourceRecords":{ + "shape":"Integer", + "documentation":"

The total number of mapped source records.

" + }, + "totalMappedTargetRecords":{ + "shape":"Integer", + "documentation":"

The total number of distinct mapped target records.

" + }, "totalRecordsProcessed":{ "shape":"Integer", - "documentation":"

The total number of records processed.

" + "documentation":"

The total number of records that were processed.

" } }, - "documentation":"

An object containing InputRecords, TotalRecordsProcessed, MatchIDs, and RecordsNotProcessed.

" + "documentation":"

An object containing InputRecords, RecordsNotProcessed, TotalRecordsProcessed, TotalMappedRecords, TotalMappedSourceRecords, and TotalMappedTargetRecords.

" }, "IdMappingJobOutputSource":{ "type":"structure", @@ -1988,6 +1999,45 @@ "max":1, "min":1 }, + "IdMappingRoleArn":{ + "type":"string", + "max":512, + "min":0, + "pattern":"^$|^arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" + }, + "IdMappingRuleBasedProperties":{ + "type":"structure", + "required":[ + "attributeMatchingModel", + "recordMatchingModel", + "ruleDefinitionType" + ], + "members":{ + "attributeMatchingModel":{ + "shape":"AttributeMatchingModel", + "documentation":"

The comparison type. You can either choose ONE_TO_ONE or MANY_TO_MANY as the attributeMatchingModel.

If you choose MANY_TO_MANY, the system can match attributes across the sub-types of an attribute type. For example, if the value of the Email field of Profile A matches the value of the BusinessEmail field of Profile B, the two profiles are matched on the Email attribute type.

If you choose ONE_TO_ONE, the system can only match attributes if the sub-types are an exact match. For example, for the Email attribute type, the system will only consider it a match if the value of the Email field of Profile A matches the value of the Email field of Profile B.

" + }, + "recordMatchingModel":{ + "shape":"RecordMatchingModel", + "documentation":"

The type of matching record that is allowed to be used in an ID mapping workflow.

If the value is set to ONE_SOURCE_TO_ONE_TARGET, only one record in the source can be matched to the same record in the target.

If the value is set to MANY_SOURCE_TO_ONE_TARGET, multiple records in the source can be matched to one record in the target.

" + }, + "ruleDefinitionType":{ + "shape":"IdMappingWorkflowRuleDefinitionType", + "documentation":"

The set of rules you can use in an ID mapping workflow. The limitations specified for the source or target to define the match rules must be compatible.

" + }, + "rules":{ + "shape":"IdMappingRuleBasedPropertiesRulesList", + "documentation":"

The rules that can be used for ID mapping.

" + } + }, + "documentation":"

An object that defines the list of matching rules to run in an ID mapping workflow.

" + }, + "IdMappingRuleBasedPropertiesRulesList":{ + "type":"list", + "member":{"shape":"Rule"}, + "max":25, + "min":1 + }, "IdMappingTechniques":{ "type":"structure", "required":["idMappingType"], @@ -1999,13 +2049,20 @@ "providerProperties":{ "shape":"ProviderProperties", "documentation":"

An object which defines any additional configurations required by the provider service.

" + }, + "ruleBasedProperties":{ + "shape":"IdMappingRuleBasedProperties", + "documentation":"

An object which defines any additional configurations required by rule-based matching.

" } }, - "documentation":"

An object which defines the ID mapping techniques and provider configurations.

" + "documentation":"

An object which defines the ID mapping technique and any additional configurations.

" }, "IdMappingType":{ "type":"string", - "enum":["PROVIDER"] + "enum":[ + "PROVIDER", + "RULE_BASED" + ] }, "IdMappingWorkflowArn":{ "type":"string", @@ -2017,7 +2074,7 @@ "members":{ "inputSourceARN":{ "shape":"IdMappingWorkflowInputSourceInputSourceARNString", - "documentation":"

An Glue table ARN for the input source table.

" + "documentation":"

An Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.

" }, "schemaName":{ "shape":"EntityName", @@ -2025,7 +2082,7 @@ }, "type":{ "shape":"IdNamespaceType", - "documentation":"

The type of ID namespace. There are two types: SOURCE and TARGET.

The SOURCE contains configurations for sourceId data that will be processed in an ID mapping workflow.

The TARGET contains a configuration of targetId to which all sourceIds will resolve to.

" + "documentation":"

The type of ID namespace. There are two types: SOURCE and TARGET.

The SOURCE contains configurations for sourceId data that will be processed in an ID mapping workflow.

The TARGET contains a configuration of targetId which all sourceIds will resolve to.

" } }, "documentation":"

An object containing InputSourceARN, SchemaName, and Type.

" @@ -2065,6 +2122,17 @@ "max":1, "min":1 }, + "IdMappingWorkflowRuleDefinitionType":{ + "type":"string", + "enum":[ + "SOURCE", + "TARGET" + ] + }, + "IdMappingWorkflowRuleDefinitionTypeList":{ + "type":"list", + "member":{"shape":"IdMappingWorkflowRuleDefinitionType"} + }, "IdMappingWorkflowSummary":{ "type":"structure", "required":[ @@ -2097,6 +2165,23 @@ "type":"string", "pattern":"^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$" }, + "IdNamespaceIdMappingWorkflowMetadata":{ + "type":"structure", + "required":["idMappingType"], + "members":{ + "idMappingType":{ + "shape":"IdMappingType", + "documentation":"

The type of ID mapping.

" + } + }, + "documentation":"

The settings for the ID namespace for the ID mapping workflow job.

" + }, + "IdNamespaceIdMappingWorkflowMetadataList":{ + "type":"list", + "member":{"shape":"IdNamespaceIdMappingWorkflowMetadata"}, + "max":1, + "min":1 + }, "IdNamespaceIdMappingWorkflowProperties":{ "type":"structure", "required":["idMappingType"], @@ -2108,9 +2193,13 @@ "providerProperties":{ "shape":"NamespaceProviderProperties", "documentation":"

An object which defines any additional configurations required by the provider service.

" + }, + "ruleBasedProperties":{ + "shape":"NamespaceRuleBasedProperties", + "documentation":"

An object which defines any additional configurations required by rule-based matching.

" } }, - "documentation":"

An object containing IdMappingType and ProviderProperties.

" + "documentation":"

An object containing IdMappingType, ProviderProperties, and RuleBasedProperties.

" }, "IdNamespaceIdMappingWorkflowPropertiesList":{ "type":"list", @@ -2124,7 +2213,7 @@ "members":{ "inputSourceARN":{ "shape":"IdNamespaceInputSourceInputSourceARNString", - "documentation":"

An Glue table ARN for the input source table.

" + "documentation":"

An Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.

" }, "schemaName":{ "shape":"EntityName", @@ -2165,6 +2254,10 @@ "shape":"Description", "documentation":"

The description of the ID namespace.

" }, + "idMappingWorkflowProperties":{ + "shape":"IdNamespaceIdMappingWorkflowMetadataList", + "documentation":"

An object which defines any additional configurations required by the ID mapping workflow.

" + }, "idNamespaceArn":{ "shape":"IdNamespaceArn", "documentation":"

The Amazon Resource Name (ARN) of the ID namespace.

" @@ -2175,7 +2268,7 @@ }, "type":{ "shape":"IdNamespaceType", - "documentation":"

The type of ID namespace. There are two types: SOURCE and TARGET.

The SOURCE contains configurations for sourceId data that will be processed in an ID mapping workflow.

The TARGET contains a configuration of targetId to which all sourceIds will resolve to.

" + "documentation":"

The type of ID namespace. There are two types: SOURCE and TARGET.

The SOURCE contains configurations for sourceId data that will be processed in an ID mapping workflow.

The TARGET contains a configuration of targetId which all sourceIds will resolve to.

" }, "updatedAt":{ "shape":"Timestamp", @@ -2218,7 +2311,7 @@ }, "inputSourceARN":{ "shape":"InputSourceInputSourceARNString", - "documentation":"

An Glue table ARN for the input source table.

" + "documentation":"

An Glue table Amazon Resource Name (ARN) for the input source table.

" }, "schemaName":{ "shape":"EntityName", @@ -2257,7 +2350,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

This exception occurs when there is an internal failure in the Entity Resolution service. HTTP Status Code: 500

", + "documentation":"

This exception occurs when there is an internal failure in the Entity Resolution service.

", "error":{"httpStatusCode":500}, "exception":true, "fault":true, @@ -2651,6 +2744,13 @@ } } }, + "MatchPurpose":{ + "type":"string", + "enum":[ + "IDENTIFIER_GENERATION", + "INDEXING" + ] + }, "MatchingWorkflowArn":{ "type":"string", "pattern":"^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$" @@ -2707,6 +2807,34 @@ }, "documentation":"

An object containing ProviderConfiguration and ProviderServiceArn.

" }, + "NamespaceRuleBasedProperties":{ + "type":"structure", + "members":{ + "attributeMatchingModel":{ + "shape":"AttributeMatchingModel", + "documentation":"

The comparison type. You can either choose ONE_TO_ONE or MANY_TO_MANY as the attributeMatchingModel.

If you choose MANY_TO_MANY, the system can match attributes across the sub-types of an attribute type. For example, if the value of the Email field of Profile A matches the value of BusinessEmail field of Profile B, the two profiles are matched on the Email attribute type.

If you choose ONE_TO_ONE, the system can only match attributes if the sub-types are an exact match. For example, for the Email attribute type, the system will only consider it a match if the value of the Email field of Profile A matches the value of the Email field of Profile B.

" + }, + "recordMatchingModels":{ + "shape":"RecordMatchingModelList", + "documentation":"

The type of matching record that is allowed to be used in an ID mapping workflow.

If the value is set to ONE_SOURCE_TO_ONE_TARGET, only one record in the source is matched to one record in the target.

If the value is set to MANY_SOURCE_TO_ONE_TARGET, all matching records in the source are matched to one record in the target.

" + }, + "ruleDefinitionTypes":{ + "shape":"IdMappingWorkflowRuleDefinitionTypeList", + "documentation":"

The sets of rules you can use in an ID mapping workflow. The limitations specified for the source and target must be compatible.

" + }, + "rules":{ + "shape":"NamespaceRuleBasedPropertiesRulesList", + "documentation":"

The rules for the ID namespace.

" + } + }, + "documentation":"

The rule-based properties of an ID namespace. These properties define how the ID namespace can be used in an ID mapping workflow.

" + }, + "NamespaceRuleBasedPropertiesRulesList":{ + "type":"list", + "member":{"shape":"Rule"}, + "max":25, + "min":1 + }, "NextToken":{ "type":"string", "max":1024, @@ -2974,7 +3102,7 @@ }, "policy":{ "shape":"PolicyDocument", - "documentation":"

The resource-based policy.

" + "documentation":"

The resource-based policy.

If you set the value of the effect parameter in the policy to Deny for the PutPolicy operation, you must also set the value of the effect parameter to Deny for the AddPolicyStatement operation.

" }, "token":{ "shape":"PolicyToken", @@ -3021,6 +3149,17 @@ "min":0, "pattern":"^[a-zA-Z_0-9-./@ ()+\\t]*$" }, + "RecordMatchingModel":{ + "type":"string", + "enum":[ + "ONE_SOURCE_TO_ONE_TARGET", + "MANY_SOURCE_TO_ONE_TARGET" + ] + }, + "RecordMatchingModelList":{ + "type":"list", + "member":{"shape":"RecordMatchingModel"} + }, "RequiredBucketActionsList":{ "type":"list", "member":{"shape":"String"} @@ -3057,7 +3196,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The resource could not be found. HTTP Status Code: 404

", + "documentation":"

The resource could not be found.

", "error":{ "httpStatusCode":404, "senderFault":true @@ -3097,14 +3236,18 @@ "members":{ "attributeMatchingModel":{ "shape":"AttributeMatchingModel", - "documentation":"

The comparison type. You can either choose ONE_TO_ONE or MANY_TO_MANY as the AttributeMatchingModel. When choosing MANY_TO_MANY, the system can match attributes across the sub-types of an attribute type. For example, if the value of the Email field of Profile A and the value of BusinessEmail field of Profile B matches, the two profiles are matched on the Email type. When choosing ONE_TO_ONE ,the system can only match if the sub-types are exact matches. For example, only when the value of the Email field of Profile A and the value of the Email field of Profile B matches, the two profiles are matched on the Email type.

" + "documentation":"

The comparison type. You can either choose ONE_TO_ONE or MANY_TO_MANY as the attributeMatchingModel.

If you choose MANY_TO_MANY, the system can match attributes across the sub-types of an attribute type. For example, if the value of the Email field of Profile A and the value of BusinessEmail field of Profile B matches, the two profiles are matched on the Email attribute type.

If you choose ONE_TO_ONE, the system can only match attributes if the sub-types are an exact match. For example, for the Email attribute type, the system will only consider it a match if the value of the Email field of Profile A matches the value of the Email field of Profile B.

" + }, + "matchPurpose":{ + "shape":"MatchPurpose", + "documentation":"

An indicator of whether to generate IDs and index the data or not.

If you choose IDENTIFIER_GENERATION, the process generates IDs and indexes the data.

If you choose INDEXING, the process indexes the data without generating IDs.

" }, "rules":{ "shape":"RuleBasedPropertiesRulesList", "documentation":"

A list of Rule objects, each of which have fields RuleName and MatchingKeys.

" } }, - "documentation":"

An object which defines the list of matching rules to run and has a field Rules, which is a list of rule objects.

" + "documentation":"

An object which defines the list of matching rules to run in a matching workflow. RuleBasedProperties contain a Rules field, which is a list of rule objects.

" }, "RuleBasedPropertiesRulesList":{ "type":"list", @@ -3170,9 +3313,13 @@ "shape":"AttributeName", "documentation":"

A string that instructs Entity Resolution to combine several columns into a unified column with the identical attribute type.

For example, when working with columns such as first_name, middle_name, and last_name, assigning them a common groupName will prompt Entity Resolution to concatenate them into a single value.

" }, + "hashed":{ + "shape":"Boolean", + "documentation":"

Indicates if the column values are hashed in the schema input. If the value is set to TRUE, the column values are hashed. If the value is set to FALSE, the column values are cleartext.

" + }, "matchKey":{ "shape":"AttributeName", - "documentation":"

A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as business_address and shipping_address. By assigning a matchKey called address to both attributes, Entity Resolution will match records across these fields to create a consolidated matching group. If no matchKey is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.

" + "documentation":"

A key that allows grouping of multiple input attributes into a unified matching group.

For example, consider a scenario where the source table contains various addresses, such as business_address and shipping_address. By assigning a matchKey called address to both attributes, Entity Resolution will match records across these fields to create a consolidated matching group.

If no matchKey is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.

" }, "subType":{ "shape":"AttributeName", @@ -3183,7 +3330,7 @@ "documentation":"

The type of the attribute, selected from a list of values.

" } }, - "documentation":"

An object containing FieldName, Type, GroupName, MatchKey, and SubType.

" + "documentation":"

An object containing FieldName, Type, GroupName, MatchKey, Hashing, and SubType.

" }, "SchemaInputAttributes":{ "type":"list", @@ -3392,7 +3539,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The request was denied due to request throttling. HTTP Status Code: 429

", + "documentation":"

The request was denied due to request throttling.

", "error":{ "httpStatusCode":429, "senderFault":true @@ -3405,7 +3552,7 @@ "type":"string", "max":760, "min":1, - "pattern":"^[a-zA-Z_0-9-,]*$" + "pattern":"^[a-zA-Z_0-9-+=/,]*$" }, "UniqueIdList":{ "type":"list", @@ -3442,7 +3589,6 @@ "required":[ "idMappingTechniques", "inputSourceConfig", - "roleArn", "workflowName" ], "members":{ @@ -3452,7 +3598,7 @@ }, "idMappingTechniques":{ "shape":"IdMappingTechniques", - "documentation":"

An object which defines the idMappingType and the providerProperties.

" + "documentation":"

An object which defines the ID mapping technique and any additional configurations.

" }, "inputSourceConfig":{ "shape":"IdMappingWorkflowInputSourceConfig", @@ -3463,7 +3609,7 @@ "documentation":"

A list of OutputSource objects, each of which contains fields OutputS3Path and KMSArn.

" }, "roleArn":{ - "shape":"RoleArn", + "shape":"IdMappingRoleArn", "documentation":"

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes this role to access Amazon Web Services resources on your behalf.

" }, "workflowName":{ @@ -3479,7 +3625,6 @@ "required":[ "idMappingTechniques", "inputSourceConfig", - "roleArn", "workflowArn", "workflowName" ], @@ -3490,7 +3635,7 @@ }, "idMappingTechniques":{ "shape":"IdMappingTechniques", - "documentation":"

An object which defines the idMappingType and the providerProperties.

" + "documentation":"

An object which defines the ID mapping technique and any additional configurations.

" }, "inputSourceConfig":{ "shape":"IdMappingWorkflowInputSourceConfig", @@ -3501,7 +3646,7 @@ "documentation":"

A list of OutputSource objects, each of which contains fields OutputS3Path and KMSArn.

" }, "roleArn":{ - "shape":"RoleArn", + "shape":"IdMappingRoleArn", "documentation":"

The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes this role to access Amazon Web Services resources on your behalf.

" }, "workflowArn":{ @@ -3726,7 +3871,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The input fails to satisfy the constraints specified by Entity Resolution. HTTP Status Code: 400

", + "documentation":"

The input fails to satisfy the constraints specified by Entity Resolution.

", "error":{ "httpStatusCode":400, "senderFault":true diff --git a/botocore/data/es/2015-01-01/service-2.json b/botocore/data/es/2015-01-01/service-2.json index fcc9f8d045..56bd189436 100644 --- a/botocore/data/es/2015-01-01/service-2.json +++ b/botocore/data/es/2015-01-01/service-2.json @@ -4,10 +4,12 @@ "apiVersion":"2015-01-01", "endpointPrefix":"es", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Elasticsearch Service", "serviceId":"Elasticsearch Service", "signatureVersion":"v4", - "uid":"es-2015-01-01" + "uid":"es-2015-01-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptInboundCrossClusterSearchConnection":{ diff --git a/botocore/data/events/2015-10-07/service-2.json b/botocore/data/events/2015-10-07/service-2.json index cb49d30e39..2e828947dd 100644 --- a/botocore/data/events/2015-10-07/service-2.json +++ b/botocore/data/events/2015-10-07/service-2.json @@ -10,7 +10,8 @@ "serviceId":"EventBridge", "signatureVersion":"v4", "targetPrefix":"AWSEvents", - "uid":"eventbridge-2015-10-07" + "uid":"eventbridge-2015-10-07", + "auth":["aws.auth#sigv4"] }, "operations":{ "ActivateEventSource":{ diff --git a/botocore/data/firehose/2015-08-04/service-2.json b/botocore/data/firehose/2015-08-04/service-2.json index 646a626801..9c5be929eb 100644 --- a/botocore/data/firehose/2015-08-04/service-2.json +++ b/botocore/data/firehose/2015-08-04/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"firehose", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Firehose", "serviceFullName":"Amazon Kinesis Firehose", "serviceId":"Firehose", "signatureVersion":"v4", "targetPrefix":"Firehose_20150804", - "uid":"firehose-2015-08-04" + "uid":"firehose-2015-08-04", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateDeliveryStream":{ @@ -130,7 +132,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidKMSResourceException"} ], - "documentation":"

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

For the KMS grant creation to be successful, Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations.

You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

" + "documentation":"

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

For the KMS grant creation to be successful, the Firehose API operations StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations.

You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

" }, "StopDeliveryStreamEncryption":{ "name":"StopDeliveryStreamEncryption", @@ -637,6 +639,16 @@ }, "documentation":"

Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Firehose might choose to use different values when it is optimal. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

" }, + "CatalogConfiguration":{ + "type":"structure", + "members":{ + "CatalogARN":{ + "shape":"GlueDataCatalogARN", + "documentation":"

Specifies the Glue catalog ARN indentifier of the destination Apache Iceberg Tables. You must specify the ARN in the format arn:aws:glue:region:account-id:catalog.

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "documentation":"

Describes the containers where the destination Apache Iceberg Tables are persisted.

Amazon Data Firehose is in preview release and is subject to change.

" + }, "CloudWatchLoggingOptions":{ "type":"structure", "members":{ @@ -787,6 +799,10 @@ "SnowflakeDestinationConfiguration":{ "shape":"SnowflakeDestinationConfiguration", "documentation":"

Configure Snowflake destination

" + }, + "IcebergDestinationConfiguration":{ + "shape":"IcebergDestinationConfiguration", + "documentation":"

Configure Apache Iceberg Tables destination.

Amazon Data Firehose is in preview release and is subject to change.

" } } }, @@ -1131,6 +1147,10 @@ "AmazonOpenSearchServerlessDestinationDescription":{ "shape":"AmazonOpenSearchServerlessDestinationDescription", "documentation":"

The destination in the Serverless offering for Amazon OpenSearch Service.

" + }, + "IcebergDestinationDescription":{ + "shape":"IcebergDestinationDescription", + "documentation":"

Describes a destination in Apache Iceberg Tables.

Amazon Data Firehose is in preview release and is subject to change.

" } }, "documentation":"

Describes the destination for a delivery stream.

" @@ -1145,6 +1165,36 @@ "min":1, "pattern":"[a-zA-Z0-9-]+" }, + "DestinationTableConfiguration":{ + "type":"structure", + "required":[ + "DestinationTableName", + "DestinationDatabaseName" + ], + "members":{ + "DestinationTableName":{ + "shape":"NonEmptyStringWithoutWhitespace", + "documentation":"

Specifies the name of the Apache Iceberg Table.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "DestinationDatabaseName":{ + "shape":"NonEmptyStringWithoutWhitespace", + "documentation":"

The name of the Apache Iceberg database.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "UniqueKeys":{ + "shape":"ListOfNonEmptyStringsWithoutWhitespace", + "documentation":"

A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create/Update/Delete operations on the given Iceberg table.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "S3ErrorOutputPrefix":{ + "shape":"ErrorOutputPrefix", + "documentation":"

The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "documentation":"

Describes the configuration of a destination in Apache Iceberg Tables.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "DestinationTableConfigurationList":{ + "type":"list", + "member":{"shape":"DestinationTableConfiguration"} + }, "DocumentIdOptions":{ "type":"structure", "required":["DefaultDocumentIdFormat"], @@ -1688,6 +1738,12 @@ "min":0, "pattern":"^$|\\.[0-9a-z!\\-_.*'()]+" }, + "GlueDataCatalogARN":{ + "type":"string", + "max":512, + "min":1, + "pattern":"arn:.*" + }, "HECAcknowledgmentTimeoutInSeconds":{ "type":"integer", "max":600, @@ -1842,7 +1898,7 @@ "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "RequestConfiguration":{ "shape":"HttpEndpointRequestConfiguration", - "documentation":"

The configuration of the requeste sent to the HTTP endpoint specified as the destination.

" + "documentation":"

The configuration of the request sent to the HTTP endpoint that is specified as the destination.

" }, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "RoleARN":{ @@ -1857,7 +1913,11 @@ "shape":"HttpEndpointS3BackupMode", "documentation":"

Describes the S3 bucket backup options for the data that Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

" }, - "S3Configuration":{"shape":"S3DestinationConfiguration"} + "S3Configuration":{"shape":"S3DestinationConfiguration"}, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

The configuration that defines how you access secrets for HTTP Endpoint destination.

" + } }, "documentation":"

Describes the configuration of the HTTP endpoint destination.

" }, @@ -1890,7 +1950,11 @@ "shape":"HttpEndpointS3BackupMode", "documentation":"

Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

" }, - "S3DestinationDescription":{"shape":"S3DestinationDescription"} + "S3DestinationDescription":{"shape":"S3DestinationDescription"}, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

The configuration that defines how you access secrets for HTTP Endpoint destination.

" + } }, "documentation":"

Describes the HTTP endpoint destination.

" }, @@ -1923,7 +1987,11 @@ "shape":"HttpEndpointS3BackupMode", "documentation":"

Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

" }, - "S3Update":{"shape":"S3DestinationUpdate"} + "S3Update":{"shape":"S3DestinationUpdate"}, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

The configuration that defines how you access secrets for HTTP Endpoint destination.

" + } }, "documentation":"

Updates the specified HTTP endpoint destination.

" }, @@ -1976,6 +2044,99 @@ "pattern":"https://.*", "sensitive":true }, + "IcebergDestinationConfiguration":{ + "type":"structure", + "required":[ + "RoleARN", + "CatalogConfiguration", + "S3Configuration" + ], + "members":{ + "DestinationTableConfigurationList":{ + "shape":"DestinationTableConfigurationList", + "documentation":"

Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg tables.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "BufferingHints":{"shape":"BufferingHints"}, + "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, + "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, + "S3BackupMode":{ + "shape":"IcebergS3BackupMode", + "documentation":"

Describes how Firehose will backup records. Currently,Firehose only supports FailedDataOnly for preview.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "RetryOptions":{"shape":"RetryOptions"}, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the Apache Iceberg tables role.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "CatalogConfiguration":{ + "shape":"CatalogConfiguration", + "documentation":"

Configuration describing where the destination Apache Iceberg Tables are persisted.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "S3Configuration":{"shape":"S3DestinationConfiguration"} + }, + "documentation":"

Specifies the destination configure settings for Apache Iceberg Table.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "IcebergDestinationDescription":{ + "type":"structure", + "members":{ + "DestinationTableConfigurationList":{ + "shape":"DestinationTableConfigurationList", + "documentation":"

Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg tables.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "BufferingHints":{"shape":"BufferingHints"}, + "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, + "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, + "S3BackupMode":{ + "shape":"IcebergS3BackupMode", + "documentation":"

Describes how Firehose will backup records. Currently,Firehose only supports FailedDataOnly for preview.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "RetryOptions":{"shape":"RetryOptions"}, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the Apache Iceberg Tables role.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "CatalogConfiguration":{ + "shape":"CatalogConfiguration", + "documentation":"

Configuration describing where the destination Iceberg tables are persisted.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "S3DestinationDescription":{"shape":"S3DestinationDescription"} + }, + "documentation":"

Describes a destination in Apache Iceberg Tables.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "IcebergDestinationUpdate":{ + "type":"structure", + "members":{ + "DestinationTableConfigurationList":{ + "shape":"DestinationTableConfigurationList", + "documentation":"

Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg tables.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "BufferingHints":{"shape":"BufferingHints"}, + "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, + "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, + "S3BackupMode":{ + "shape":"IcebergS3BackupMode", + "documentation":"

Describes how Firehose will backup records. Currently,Firehose only supports FailedDataOnly for preview.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "RetryOptions":{"shape":"RetryOptions"}, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the Apache Iceberg Tables role.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "CatalogConfiguration":{ + "shape":"CatalogConfiguration", + "documentation":"

Configuration describing where the destination Iceberg tables are persisted.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "S3Configuration":{"shape":"S3DestinationConfiguration"} + }, + "documentation":"

Describes an update for a destination in Apache Iceberg Tables.

Amazon Data Firehose is in preview release and is subject to change.

" + }, + "IcebergS3BackupMode":{ + "type":"string", + "enum":[ + "FailedDataOnly", + "AllData" + ] + }, "InputFormatConfiguration":{ "type":"structure", "members":{ @@ -2221,6 +2382,10 @@ "AuthenticationConfiguration":{ "shape":"AuthenticationConfiguration", "documentation":"

The authentication configuration of the Amazon MSK cluster.

" + }, + "ReadFromTimestamp":{ + "shape":"ReadFromTimestamp", + "documentation":"

The start date and time in UTC for the offset position within your MSK topic from where Firehose begins to read. By default, this is set to timestamp when Firehose becomes Active.

If you want to create a Firehose stream with Earliest start position from SDK or CLI, you need to set the ReadFromTimestamp parameter to Epoch (1970-01-01T00:00:00Z).

" } }, "documentation":"

The configuration for the Amazon MSK cluster to be used as the source for a delivery stream.

" @@ -2243,6 +2408,10 @@ "DeliveryStartTimestamp":{ "shape":"DeliveryStartTimestamp", "documentation":"

Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.

" + }, + "ReadFromTimestamp":{ + "shape":"ReadFromTimestamp", + "documentation":"

The start date and time in UTC for the offset position within your MSK topic from where Firehose begins to read. By default, this is set to timestamp when Firehose becomes Active.

If you want to create a Firehose stream with Earliest start position from SDK or CLI, you need to set the ReadFromTimestampUTC parameter to Epoch (1970-01-01T00:00:00Z).

" } }, "documentation":"

Details about the Amazon MSK cluster used as the source for a Firehose delivery stream.

" @@ -2404,7 +2573,7 @@ "documentation":"

Indicates the version of row format to output. The possible values are V1 and V2. The default is V1.

" } }, - "documentation":"

A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet.

" + "documentation":"

A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet.

" }, "ParquetWriterVersion":{ "type":"string", @@ -2622,6 +2791,7 @@ "type":"string", "min":1 }, + "ReadFromTimestamp":{"type":"timestamp"}, "Record":{ "type":"structure", "required":["Data"], @@ -2639,8 +2809,6 @@ "RoleARN", "ClusterJDBCURL", "CopyCommand", - "Username", - "Password", "S3Configuration" ], "members":{ @@ -2687,6 +2855,10 @@ "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", "documentation":"

The CloudWatch logging options for your delivery stream.

" + }, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

The configuration that defines how you access secrets for Amazon Redshift.

" } }, "documentation":"

Describes the configuration of a destination in Amazon Redshift.

" @@ -2697,7 +2869,6 @@ "RoleARN", "ClusterJDBCURL", "CopyCommand", - "Username", "S3DestinationDescription" ], "members":{ @@ -2740,6 +2911,10 @@ "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" + }, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

The configuration that defines how you access secrets for Amazon Redshift.

" } }, "documentation":"

Describes a destination in Amazon Redshift.

" @@ -2790,6 +2965,10 @@ "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" + }, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

The configuration that defines how you access secrets for Amazon Redshift.

" } }, "documentation":"

Describes an update for a destination in Amazon Redshift.

" @@ -3021,6 +3200,31 @@ }, "documentation":"

Specifies the schema to which you want Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

" }, + "SecretARN":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:.*" + }, + "SecretsManagerConfiguration":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "SecretARN":{ + "shape":"SecretARN", + "documentation":"

The ARN of the secret that stores your credentials. It must be in the same region as the Firehose stream and the role. The secret ARN can reside in a different account than the delivery stream and role as Firehose supports cross-account secret access. This parameter is required when Enabled is set to True.

" + }, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

Specifies the role that Firehose assumes when calling the Secrets Manager API operation. When you provide the role, it overrides any destination specific role defined in the destination configuration. If you do not provide the then we use the destination specific role. This parameter is required for Splunk.

" + }, + "Enabled":{ + "shape":"BooleanObject", + "documentation":"

Specifies whether you want to use the the secrets manager feature. When set as True the secrets manager configuration overwrites the existing secrets in the destination configuration. When it's set to False Firehose falls back to the credentials in the destination configuration.

" + } + }, + "documentation":"

The structure that defines how Firehose accesses the secret.

" + }, "SecurityGroupIdList":{ "type":"list", "member":{"shape":"NonEmptyStringWithoutWhitespace"}, @@ -3065,6 +3269,30 @@ "pattern":".+?\\.snowflakecomputing\\.com", "sensitive":true }, + "SnowflakeBufferingHints":{ + "type":"structure", + "members":{ + "SizeInMBs":{ + "shape":"SnowflakeBufferingSizeInMBs", + "documentation":"

Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 1.

" + }, + "IntervalInSeconds":{ + "shape":"SnowflakeBufferingIntervalInSeconds", + "documentation":"

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 0.

" + } + }, + "documentation":"

Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values.

" + }, + "SnowflakeBufferingIntervalInSeconds":{ + "type":"integer", + "max":900, + "min":0 + }, + "SnowflakeBufferingSizeInMBs":{ + "type":"integer", + "max":128, + "min":1 + }, "SnowflakeContentColumnName":{ "type":"string", "max":255, @@ -3089,8 +3317,6 @@ "type":"structure", "required":[ "AccountUrl", - "PrivateKey", - "User", "Database", "Schema", "Table", @@ -3160,7 +3386,15 @@ "shape":"SnowflakeS3BackupMode", "documentation":"

Choose an S3 backup mode

" }, - "S3Configuration":{"shape":"S3DestinationConfiguration"} + "S3Configuration":{"shape":"S3DestinationConfiguration"}, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

The configuration that defines how you access secrets for Snowflake.

" + }, + "BufferingHints":{ + "shape":"SnowflakeBufferingHints", + "documentation":"

Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values.

" + } }, "documentation":"

Configure Snowflake destination

" }, @@ -3221,7 +3455,15 @@ "shape":"SnowflakeS3BackupMode", "documentation":"

Choose an S3 backup mode

" }, - "S3DestinationDescription":{"shape":"S3DestinationDescription"} + "S3DestinationDescription":{"shape":"S3DestinationDescription"}, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

The configuration that defines how you access secrets for Snowflake.

" + }, + "BufferingHints":{ + "shape":"SnowflakeBufferingHints", + "documentation":"

Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values.

" + } }, "documentation":"

Optional Snowflake destination description

" }, @@ -3286,7 +3528,15 @@ "shape":"SnowflakeS3BackupMode", "documentation":"

Choose an S3 backup mode

" }, - "S3Update":{"shape":"S3DestinationUpdate"} + "S3Update":{"shape":"S3DestinationUpdate"}, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

Describes the Secrets Manager configuration in Snowflake.

" + }, + "BufferingHints":{ + "shape":"SnowflakeBufferingHints", + "documentation":"

Describes the buffering to perform before delivering data to the Snowflake destination.

" + } }, "documentation":"

Update to configuration settings

" }, @@ -3430,7 +3680,6 @@ "required":[ "HECEndpoint", "HECEndpointType", - "HECToken", "S3Configuration" ], "members":{ @@ -3473,6 +3722,10 @@ "BufferingHints":{ "shape":"SplunkBufferingHints", "documentation":"

The buffering options. If no value is specified, the default values for Splunk are used.

" + }, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

The configuration that defines how you access secrets for Splunk.

" } }, "documentation":"

Describes the configuration of a destination in Splunk.

" @@ -3519,6 +3772,10 @@ "BufferingHints":{ "shape":"SplunkBufferingHints", "documentation":"

The buffering options. If no value is specified, the default values for Splunk are used.

" + }, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

The configuration that defines how you access secrets for Splunk.

" } }, "documentation":"

Describes a destination in Splunk.

" @@ -3565,6 +3822,10 @@ "BufferingHints":{ "shape":"SplunkBufferingHints", "documentation":"

The buffering options. If no value is specified, the default values for Splunk are used.

" + }, + "SecretsManagerConfiguration":{ + "shape":"SecretsManagerConfiguration", + "documentation":"

The configuration that defines how you access secrets for Splunk.

" } }, "documentation":"

Describes an update for a destination in Splunk.

" @@ -3776,7 +4037,11 @@ }, "SnowflakeDestinationUpdate":{ "shape":"SnowflakeDestinationUpdate", - "documentation":"

Update to the Snowflake destination condiguration settings

" + "documentation":"

Update to the Snowflake destination configuration settings.

" + }, + "IcebergDestinationUpdate":{ + "shape":"IcebergDestinationUpdate", + "documentation":"

Describes an update for a destination in Apache Iceberg Tables.

Amazon Data Firehose is in preview release and is subject to change.

" } } }, @@ -3844,5 +4109,5 @@ "documentation":"

The details of the VPC of the Amazon ES destination.

" } }, - "documentation":"Amazon Data Firehose

Amazon Data Firehose was previously known as Amazon Kinesis Data Firehose.

Amazon Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.

" + "documentation":"Amazon Data Firehose

Amazon Data Firehose was previously known as Amazon Kinesis Data Firehose.

Amazon Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supported destinations.

" } diff --git a/botocore/data/fis/2020-12-01/service-2.json b/botocore/data/fis/2020-12-01/service-2.json index d3f5ff45ce..a0992745a9 100644 --- a/botocore/data/fis/2020-12-01/service-2.json +++ b/botocore/data/fis/2020-12-01/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"fis", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"FIS", "serviceFullName":"AWS Fault Injection Simulator", "serviceId":"fis", "signatureVersion":"v4", "signingName":"fis", - "uid":"fis-2020-12-01" + "uid":"fis-2020-12-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateExperimentTemplate":{ @@ -1010,6 +1012,31 @@ "documentation":"

Describes the configuration for experiment logging to Amazon CloudWatch Logs.

" }, "ExperimentEndTime":{"type":"timestamp"}, + "ExperimentError":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"ExperimentErrorAccountId", + "documentation":"

The Amazon Web Services Account ID where the experiment failure occurred.

" + }, + "code":{ + "shape":"ExperimentErrorCode", + "documentation":"

The error code for the failed experiment.

" + }, + "location":{ + "shape":"ExperimentErrorLocation", + "documentation":"

Context for the section of the experiment template that failed.

" + } + }, + "documentation":"

Describes the error when an experiment has failed.

" + }, + "ExperimentErrorAccountId":{"type":"string"}, + "ExperimentErrorCode":{ + "type":"string", + "max":128, + "pattern":"[\\S]+" + }, + "ExperimentErrorLocation":{"type":"string"}, "ExperimentId":{ "type":"string", "max":64, @@ -1076,6 +1103,10 @@ "reason":{ "shape":"ExperimentStatusReason", "documentation":"

The reason for the state.

" + }, + "error":{ + "shape":"ExperimentError", + "documentation":"

The error information of the experiment when the action has failed.

" } }, "documentation":"

Describes the state of an experiment.

" @@ -2702,5 +2733,5 @@ "exception":true } }, - "documentation":"

Fault Injection Service is a managed service that enables you to perform fault injection experiments on your Amazon Web Services workloads. For more information, see the Fault Injection Service User Guide.

" + "documentation":"

Amazon Web Services Fault Injection Service is a managed service that enables you to perform fault injection experiments on your Amazon Web Services workloads. For more information, see the Fault Injection Service User Guide.

" } diff --git a/botocore/data/fms/2018-01-01/service-2.json b/botocore/data/fms/2018-01-01/service-2.json index 043bc8da02..a2dc2137e6 100644 --- a/botocore/data/fms/2018-01-01/service-2.json +++ b/botocore/data/fms/2018-01-01/service-2.json @@ -11,7 +11,8 @@ "serviceId":"FMS", "signatureVersion":"v4", "targetPrefix":"AWSFMS_20180101", - "uid":"fms-2018-01-01" + "uid":"fms-2018-01-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateAdminAccount":{ @@ -2548,7 +2549,7 @@ }, "ManagedServiceData":{ "type":"string", - "max":10000, + "max":30000, "min":1, "pattern":"^((?!\\\\[nr]).)+" }, diff --git a/botocore/data/fsx/2018-03-01/service-2.json b/botocore/data/fsx/2018-03-01/service-2.json index a26b252ce2..16d7dda7a4 100644 --- a/botocore/data/fsx/2018-03-01/service-2.json +++ b/botocore/data/fsx/2018-03-01/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"fsx", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon FSx", "serviceId":"FSx", "signatureVersion":"v4", "signingName":"fsx", "targetPrefix":"AWSSimbaAPIService_v20180301", - "uid":"fsx-2018-03-01" + "uid":"fsx-2018-03-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateFileSystemAliases":{ @@ -187,7 +189,7 @@ {"shape":"InternalServerError"}, {"shape":"MissingFileSystemConfiguration"} ], - "documentation":"

Creates a new, empty Amazon FSx file system. You can create the following supported Amazon FSx file systems using the CreateFileSystem API operation:

  • Amazon FSx for Lustre

  • Amazon FSx for NetApp ONTAP

  • Amazon FSx for OpenZFS

  • Amazon FSx for Windows File Server

This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

If a file system with the specified client request token exists and the parameters match, CreateFileSystem returns the description of the existing file system. If a file system with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, CreateFileSystem does the following:

  • Creates a new, empty Amazon FSx file system with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the file system in JSON format.

The CreateFileSystem call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

" + "documentation":"

Creates a new, empty Amazon FSx file system. You can create the following supported Amazon FSx file systems using the CreateFileSystem API operation:

  • Amazon FSx for Lustre

  • Amazon FSx for NetApp ONTAP

  • Amazon FSx for OpenZFS

  • Amazon FSx for Windows File Server

This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

If a file system with the specified client request token exists and the parameters match, CreateFileSystem returns the description of the existing file system. If a file system with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, CreateFileSystem does the following:

  • Creates a new, empty Amazon FSx file system with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the file system in JSON format.

The CreateFileSystem call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

" }, "CreateFileSystemFromBackup":{ "name":"CreateFileSystemFromBackup", @@ -357,7 +359,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted.

To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId value to the DeleFileSystem operation.

By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted.

To delete an Amazon FSx for Lustre file system, first unmount it from every connected Amazon EC2 instance, then provide a FileSystemId value to the DeleFileSystem operation. By default, Amazon FSx will not take a final backup when the DeleteFileSystem operation is invoked. On file systems not linked to an Amazon S3 bucket, set SkipFinalBackup to false to take a final backup of the file system you are deleting. Backups cannot be enabled on S3-linked file systems. To ensure all of your data is written back to S3 before deleting your file system, you can either monitor for the AgeOfOldestQueuedMessage metric to be zero (if using automatic export) or you can run an export data repository task. If you have automatic export enabled and want to use an export data repository task, you have to disable automatic export before executing the export data repository task.

The DeleteFileSystem operation returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems operation returns a FileSystemNotFound error.

If a data repository task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request).

The data in a deleted file system is also deleted and can't be recovered by any means.

", + "documentation":"

Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted.

To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId value to the DeleteFileSystem operation.

By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted.

To delete an Amazon FSx for Lustre file system, first unmount it from every connected Amazon EC2 instance, then provide a FileSystemId value to the DeleteFileSystem operation. By default, Amazon FSx will not take a final backup when the DeleteFileSystem operation is invoked. On file systems not linked to an Amazon S3 bucket, set SkipFinalBackup to false to take a final backup of the file system you are deleting. Backups cannot be enabled on S3-linked file systems. To ensure all of your data is written back to S3 before deleting your file system, you can either monitor for the AgeOfOldestQueuedMessage metric to be zero (if using automatic export) or you can run an export data repository task. If you have automatic export enabled and want to use an export data repository task, you have to disable automatic export before executing the export data repository task.

The DeleteFileSystem operation returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems operation returns a FileSystemNotFound error.

If a data repository task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request).

The data in a deleted file system is also deleted and can't be recovered by any means.

", "idempotent":true }, "DeleteSnapshot":{ @@ -738,7 +740,7 @@ {"shape":"MissingFileSystemConfiguration"}, {"shape":"ServiceLimitExceeded"} ], - "documentation":"

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For FSx for Windows File Server file systems, you can update the following properties:

  • AuditLogConfiguration

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • SelfManagedActiveDirectoryConfiguration

  • StorageCapacity

  • StorageType

  • ThroughputCapacity

  • DiskIopsConfiguration

  • WeeklyMaintenanceStartTime

For FSx for Lustre file systems, you can update the following properties:

  • AutoImportPolicy

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • DataCompressionType

  • LogConfiguration

  • LustreRootSquashConfiguration

  • PerUnitStorageThroughput

  • StorageCapacity

  • WeeklyMaintenanceStartTime

For FSx for ONTAP file systems, you can update the following properties:

  • AddRouteTableIds

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • DiskIopsConfiguration

  • FsxAdminPassword

  • HAPairs

  • RemoveRouteTableIds

  • StorageCapacity

  • ThroughputCapacity

  • ThroughputCapacityPerHAPair

  • WeeklyMaintenanceStartTime

For FSx for OpenZFS file systems, you can update the following properties:

  • AddRouteTableIds

  • AutomaticBackupRetentionDays

  • CopyTagsToBackups

  • CopyTagsToVolumes

  • DailyAutomaticBackupStartTime

  • DiskIopsConfiguration

  • RemoveRouteTableIds

  • StorageCapacity

  • ThroughputCapacity

  • WeeklyMaintenanceStartTime

" + "documentation":"

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For FSx for Windows File Server file systems, you can update the following properties:

  • AuditLogConfiguration

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • SelfManagedActiveDirectoryConfiguration

  • StorageCapacity

  • StorageType

  • ThroughputCapacity

  • DiskIopsConfiguration

  • WeeklyMaintenanceStartTime

For FSx for Lustre file systems, you can update the following properties:

  • AutoImportPolicy

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • DataCompressionType

  • LogConfiguration

  • LustreRootSquashConfiguration

  • MetadataConfiguration

  • PerUnitStorageThroughput

  • StorageCapacity

  • WeeklyMaintenanceStartTime

For FSx for ONTAP file systems, you can update the following properties:

  • AddRouteTableIds

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • DiskIopsConfiguration

  • FsxAdminPassword

  • HAPairs

  • RemoveRouteTableIds

  • StorageCapacity

  • ThroughputCapacity

  • ThroughputCapacityPerHAPair

  • WeeklyMaintenanceStartTime

For FSx for OpenZFS file systems, you can update the following properties:

  • AddRouteTableIds

  • AutomaticBackupRetentionDays

  • CopyTagsToBackups

  • CopyTagsToVolumes

  • DailyAutomaticBackupStartTime

  • DiskIopsConfiguration

  • RemoveRouteTableIds

  • StorageCapacity

  • ThroughputCapacity

  • WeeklyMaintenanceStartTime

" }, "UpdateSharedVpcConfiguration":{ "name":"UpdateSharedVpcConfiguration", @@ -875,7 +877,7 @@ "AdministrativeActionType":{"shape":"AdministrativeActionType"}, "ProgressPercent":{ "shape":"ProgressPercent", - "documentation":"

The percentage-complete status of a STORAGE_OPTIMIZATION administrative action. Does not apply to any other administrative action type.

" + "documentation":"

The percentage-complete status of a STORAGE_OPTIMIZATION or DOWNLOAD_DATA_FROM_BACKUP administrative action. Does not apply to any other administrative action type.

" }, "RequestTime":{ "shape":"RequestTime", @@ -883,7 +885,7 @@ }, "Status":{ "shape":"Status", - "documentation":"

The status of the administrative action, as follows:

  • FAILED - Amazon FSx failed to process the administrative action successfully.

  • IN_PROGRESS - Amazon FSx is processing the administrative action.

  • PENDING - Amazon FSx is waiting to process the administrative action.

  • COMPLETED - Amazon FSx has finished processing the administrative task.

  • UPDATED_OPTIMIZING - For a storage-capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage-optimization process.

" + "documentation":"

The status of the administrative action, as follows:

  • FAILED - Amazon FSx failed to process the administrative action successfully.

  • IN_PROGRESS - Amazon FSx is processing the administrative action.

  • PENDING - Amazon FSx is waiting to process the administrative action.

  • COMPLETED - Amazon FSx has finished processing the administrative task.

    For a backup restore to a second-generation FSx for ONTAP file system, indicates that all data has been downloaded to the volume, and clients now have read-write access to volume.

  • UPDATED_OPTIMIZING - For a storage-capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage-optimization process.

  • PENDING - For a backup restore to a second-generation FSx for ONTAP file system, indicates that the file metadata is being downloaded onto the volume. The volume's Lifecycle state is CREATING.

  • IN_PROGRESS - For a backup restore to a second-generation FSx for ONTAP file system, indicates that all metadata has been downloaded to the new volume and client can access data with read-only access while Amazon FSx downloads the file data to the volume. Track the progress of this process with the ProgressPercent element.

" }, "TargetFileSystemValues":{ "shape":"FileSystem", @@ -915,7 +917,7 @@ }, "AdministrativeActionType":{ "type":"string", - "documentation":"

Describes the type of administrative action, as follows:

  • FILE_SYSTEM_UPDATE - A file system update administrative action initiated from the Amazon FSx console, API (UpdateFileSystem), or CLI (update-file-system).

  • THROUGHPUT_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's throughput capacity has been completed successfully, a THROUGHPUT_OPTIMIZATION task starts.

    You can track the storage-optimization progress using the ProgressPercent property. When THROUGHPUT_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing throughput capacity in the Amazon FSx for Windows File Server User Guide.

  • STORAGE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's storage capacity has been completed successfully, a STORAGE_OPTIMIZATION task starts.

    • For Windows and ONTAP, storage optimization is the process of migrating the file system data to newer larger disks.

    • For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.

    You can track the storage-optimization progress using the ProgressPercent property. When STORAGE_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide, Managing storage capacity in the Amazon FSx for Lustre User Guide, and Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.

  • FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a new Domain Name System (DNS) alias with the file system. For more information, see AssociateFileSystemAliases.

  • FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate a DNS alias from the file system. For more information, see DisassociateFileSystemAliases.

  • IOPS_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's throughput capacity has been completed successfully, a IOPS_OPTIMIZATION task starts.

    You can track the storage-optimization progress using the ProgressPercent property. When IOPS_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing provisioned SSD IOPS in the Amazon FSx for Windows File Server User Guide.

  • STORAGE_TYPE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's throughput capacity has been completed successfully, a STORAGE_TYPE_OPTIMIZATION task starts.

    You can track the storage-optimization progress using the ProgressPercent property. When STORAGE_TYPE_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED.

  • VOLUME_UPDATE - A volume update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateVolume), or CLI (update-volume).

  • VOLUME_RESTORE - An Amazon FSx for OpenZFS volume is returned to the state saved by the specified snapshot, initiated from an API (RestoreVolumeFromSnapshot) or CLI (restore-volume-from-snapshot).

  • SNAPSHOT_UPDATE - A snapshot update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateSnapshot), or CLI (update-snapshot).

  • RELEASE_NFS_V3_LOCKS - Tracks the release of Network File System (NFS) V3 locks on an Amazon FSx for OpenZFS file system.

  • VOLUME_INITIALIZE_WITH_SNAPSHOT - A volume is being created from a snapshot on a different FSx for OpenZFS file system. You can initiate this from the Amazon FSx console, API (CreateVolume), or CLI (create-volume) when using the using the FULL_COPY strategy.

  • VOLUME_UPDATE_WITH_SNAPSHOT - A volume is being updated from a snapshot on a different FSx for OpenZFS file system. You can initiate this from the Amazon FSx console, API (CopySnapshotAndUpdateVolume), or CLI (copy-snapshot-and-update-volume).

", + "documentation":"

Describes the type of administrative action, as follows:

  • FILE_SYSTEM_UPDATE - A file system update administrative action initiated from the Amazon FSx console, API (UpdateFileSystem), or CLI (update-file-system).

  • THROUGHPUT_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's throughput capacity has been completed successfully, a THROUGHPUT_OPTIMIZATION task starts.

    You can track the storage-optimization progress using the ProgressPercent property. When THROUGHPUT_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing throughput capacity in the Amazon FSx for Windows File Server User Guide.

  • STORAGE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's storage capacity has completed successfully, a STORAGE_OPTIMIZATION task starts.

    • For Windows and ONTAP, storage optimization is the process of migrating the file system data to newer larger disks.

    • For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.

    You can track the storage-optimization progress using the ProgressPercent property. When STORAGE_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide, Managing storage capacity in the Amazon FSx for Lustre User Guide, and Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.

  • FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a new Domain Name System (DNS) alias with the file system. For more information, see AssociateFileSystemAliases.

  • FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate a DNS alias from the file system. For more information, see DisassociateFileSystemAliases.

  • IOPS_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's throughput capacity has been completed successfully, a IOPS_OPTIMIZATION task starts.

    You can track the storage-optimization progress using the ProgressPercent property. When IOPS_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing provisioned SSD IOPS in the Amazon FSx for Windows File Server User Guide.

  • STORAGE_TYPE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's throughput capacity has been completed successfully, a STORAGE_TYPE_OPTIMIZATION task starts.

    You can track the storage-optimization progress using the ProgressPercent property. When STORAGE_TYPE_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED.

  • VOLUME_UPDATE - A volume update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateVolume), or CLI (update-volume).

  • VOLUME_RESTORE - An Amazon FSx for OpenZFS volume is returned to the state saved by the specified snapshot, initiated from an API (RestoreVolumeFromSnapshot) or CLI (restore-volume-from-snapshot).

  • SNAPSHOT_UPDATE - A snapshot update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateSnapshot), or CLI (update-snapshot).

  • RELEASE_NFS_V3_LOCKS - Tracks the release of Network File System (NFS) V3 locks on an Amazon FSx for OpenZFS file system.

  • DOWNLOAD_DATA_FROM_BACKUP - An FSx for ONTAP backup is being restored to a new volume on a second-generation file system. Once the all the file metadata is loaded onto the volume, you can mount the volume with read-only access. during this process.

  • VOLUME_INITIALIZE_WITH_SNAPSHOT - A volume is being created from a snapshot on a different FSx for OpenZFS file system. You can initiate this from the Amazon FSx console, API (CreateVolume), or CLI (create-volume) when using the using the FULL_COPY strategy.

  • VOLUME_UPDATE_WITH_SNAPSHOT - A volume is being updated from a snapshot on a different FSx for OpenZFS file system. You can initiate this from the Amazon FSx console, API (CopySnapshotAndUpdateVolume), or CLI (copy-snapshot-and-update-volume).

", "enum":[ "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", @@ -930,7 +932,8 @@ "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", - "VOLUME_INITIALIZE_WITH_SNAPSHOT" + "VOLUME_INITIALIZE_WITH_SNAPSHOT", + "DOWNLOAD_DATA_FROM_BACKUP" ] }, "AdministrativeActions":{ @@ -949,7 +952,7 @@ "members":{ "Aggregates":{ "shape":"Aggregates", - "documentation":"

The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The strings in the value of Aggregates are not are not formatted as aggrX, where X is a number between 1 and 6.

  • The value of Aggregates contains aggregates that are not present.

  • One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.

" + "documentation":"

The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The strings in the value of Aggregates are not are not formatted as aggrX, where X is a number between 1 and 12.

  • The value of Aggregates contains aggregates that are not present.

  • One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.

" }, "TotalConstituents":{ "shape":"TotalConstituents", @@ -1718,7 +1721,7 @@ }, "DeploymentType":{ "shape":"LustreDeploymentType", - "documentation":"

(Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

Choose PERSISTENT_1 for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1 supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.

Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2 supports SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB). PERSISTENT_2 is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2 is available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.

If you choose PERSISTENT_2, and you set FileSystemTypeVersion to 2.10, the CreateFileSystem operation fails.

Encryption of data in transit is automatically turned on when you access SCRATCH_2, PERSISTENT_1 and PERSISTENT_2 file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide.

(Default = SCRATCH_1)

" + "documentation":"

(Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

Choose PERSISTENT_1 for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1 supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.

Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2 supports SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB). You can optionally specify a metadata configuration mode for PERSISTENT_2 which supports increasing metadata performance. PERSISTENT_2 is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2 is available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.

If you choose PERSISTENT_2, and you set FileSystemTypeVersion to 2.10, the CreateFileSystem operation fails.

Encryption of data in transit is automatically turned on when you access SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide.

(Default = SCRATCH_1)

" }, "AutoImportPolicy":{ "shape":"AutoImportPolicyType", @@ -1752,10 +1755,29 @@ "RootSquashConfiguration":{ "shape":"LustreRootSquashConfiguration", "documentation":"

The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.

" + }, + "MetadataConfiguration":{ + "shape":"CreateFileSystemLustreMetadataConfiguration", + "documentation":"

The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a PERSISTENT_2 deployment type.

" } }, "documentation":"

The Lustre configuration for the file system being created.

The following parameters are not supported for file systems with a data repository association created with .

  • AutoImportPolicy

  • ExportPath

  • ImportedFileChunkSize

  • ImportPath

" }, + "CreateFileSystemLustreMetadataConfiguration":{ + "type":"structure", + "required":["Mode"], + "members":{ + "Iops":{ + "shape":"MetadataIops", + "documentation":"

(USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for the file system. This parameter sets the maximum rate of metadata disk IOPS supported by the file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

Iops doesn’t have a default value. If you're using USER_PROVISIONED mode, you can choose to specify a valid value. If you're using AUTOMATIC mode, you cannot specify a value because FSx for Lustre automatically sets the value based on your file system storage capacity.

" + }, + "Mode":{ + "shape":"MetadataConfigurationMode", + "documentation":"

The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.

  • In AUTOMATIC mode, FSx for Lustre automatically provisions and scales the number of Metadata IOPS for your file system based on your file system storage capacity.

  • In USER_PROVISIONED mode, you specify the number of Metadata IOPS to provision for your file system.

" + } + }, + "documentation":"

The Lustre metadata performance configuration for the creation of an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. The configuration uses a Metadata IOPS value to set the maximum rate of metadata disk IOPS supported by the file system.

After creation, the file system supports increasing metadata performance. For more information on Metadata IOPS, see Lustre metadata performance configuration in the Amazon FSx for Lustre User Guide.

" + }, "CreateFileSystemOntapConfiguration":{ "type":"structure", "required":["DeploymentType"], @@ -1764,7 +1786,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OntapDeploymentType", - "documentation":"

Specifies the FSx for ONTAP file system deployment type to use in creating the file system.

  • MULTI_AZ_1 - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability.

  • SINGLE_AZ_1 - A file system configured for Single-AZ redundancy.

  • SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing a file system deployment type.

" + "documentation":"

Specifies the FSx for ONTAP file system deployment type to use in creating the file system.

  • MULTI_AZ_1 - A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system.

  • MULTI_AZ_2 - A high availability file system configured for Multi-AZ redundancy to tolerate temporary AZ unavailability. This is a second-generation FSx for ONTAP file system.

  • SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system.

  • SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for ONTAP file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing a file system deployment type.

" }, "EndpointIpAddressRange":{ "shape":"IpAddressRange", @@ -1780,7 +1802,7 @@ }, "PreferredSubnetId":{ "shape":"SubnetId", - "documentation":"

Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located.

" + "documentation":"

Required when DeploymentType is set to MULTI_AZ_1 or MULTI_AZ_2. This specifies the subnet in which you want the preferred file server to be located.

" }, "RouteTableIds":{ "shape":"RouteTableIds", @@ -1793,11 +1815,11 @@ "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "HAPairs":{ "shape":"HAPairs", - "documentation":"

Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The value of HAPairs is less than 1 or greater than 12.

  • The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1 or MULTI_AZ_1.

" + "documentation":"

Specifies how many high-availability (HA) pairs of file servers will power your file system. First-generation file systems are powered by 1 HA pair. Second-generation multi-AZ file systems are powered by 1 HA pair. Second generation single-AZ file systems are powered by up to 12 HA pairs. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see Using block storage protocols.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The value of HAPairs is less than 1 or greater than 12.

  • The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1, MULTI_AZ_1, or MULTI_AZ_2.

" }, "ThroughputCapacityPerHAPair":{ "shape":"ThroughputCapacityPerHAPair", - "documentation":"

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity when creating a file system, but not both.

This field and ThroughputCapacity are the same for scale-up file systems powered by one HA pair.

  • For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

  • For SINGLE_AZ_2 file systems, valid values are 3072 or 6144 MBps.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair.

  • The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and 12).

  • The value of ThroughputCapacityPerHAPair is not a valid value.

" + "documentation":"

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity when creating a file system, but not both.

This field and ThroughputCapacity are the same for file systems powered by one HA pair.

  • For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

  • For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps.

  • For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair.

  • The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12).

  • The value of ThroughputCapacityPerHAPair is not a valid value.

" } }, "documentation":"

The ONTAP configuration properties of the FSx for ONTAP file system that you are creating.

" @@ -1821,7 +1843,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OpenZFSDeploymentType", - "documentation":"

Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

  • MULTI_AZ_1- Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). Multi_AZ_1 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services Regions.

  • SINGLE_AZ_1- Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available.

  • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services Regions.

For more information, see Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

" + "documentation":"

Specifies the file system deployment type. Valid values are the following:

  • MULTI_AZ_1- Creates file systems with high availability and durability by replicating your data and supporting failover across multiple Availability Zones in the same Amazon Web Services Region.

  • SINGLE_AZ_HA_2- Creates file systems with high availability and throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache by deploying a primary and standby file system within the same Availability Zone.

  • SINGLE_AZ_HA_1- Creates file systems with high availability and throughput capacities of 64 - 4,096 MB/s by deploying a primary and standby file system within the same Availability Zone.

  • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache that automatically recover within a single Availability Zone.

  • SINGLE_AZ_1- Creates file systems with throughput capacities of 64 - 4,096 MBs that automatically recover within a single Availability Zone.

For a list of which Amazon Web Services Regions each deployment type is available in, see Deployment type availability. For more information on the differences in performance between deployment types, see File system performance in the Amazon FSx for OpenZFS User Guide.

" }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", @@ -1867,11 +1889,11 @@ }, "StorageCapacity":{ "shape":"StorageCapacity", - "documentation":"

Sets the storage capacity of the file system that you're creating, in gibibytes (GiB).

FSx for Lustre file systems - The amount of storage capacity that you can configure depends on the value that you set for StorageType and the Lustre DeploymentType, as follows:

  • For SCRATCH_2, PERSISTENT_2 and PERSISTENT_1 deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.

  • For PERSISTENT_1 HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.

  • For SCRATCH_1 deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB.

FSx for ONTAP file systems - The amount of storage capacity that you can configure depends on the value of the HAPairs property. The minimum value is calculated as 1,024 * HAPairs and the maximum is calculated as 524,288 * HAPairs.

FSx for OpenZFS file systems - The amount of storage capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB).

FSx for Windows File Server file systems - The amount of storage capacity that you can configure depends on the value that you set for StorageType as follows:

  • For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).

  • For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).

" + "documentation":"

Sets the storage capacity of the file system that you're creating, in gibibytes (GiB).

FSx for Lustre file systems - The amount of storage capacity that you can configure depends on the value that you set for StorageType and the Lustre DeploymentType, as follows:

  • For SCRATCH_2, PERSISTENT_2, and PERSISTENT_1 deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.

  • For PERSISTENT_1 HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.

  • For SCRATCH_1 deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB.

FSx for ONTAP file systems - The amount of storage capacity that you can configure depends on the value of the HAPairs property. The minimum value is calculated as 1,024 * HAPairs and the maximum is calculated as 524,288 * HAPairs.

FSx for OpenZFS file systems - The amount of storage capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB).

FSx for Windows File Server file systems - The amount of storage capacity that you can configure depends on the value that you set for StorageType as follows:

  • For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).

  • For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).

" }, "StorageType":{ "shape":"StorageType", - "documentation":"

Sets the storage type for the file system that you're creating. Valid values are SSD and HDD.

  • Set to SSD to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.

  • Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types, and on PERSISTENT_1 Lustre file system deployment types.

Default value is SSD. For more information, see Storage type options in the FSx for Windows File Server User Guide and Multiple storage options in the FSx for Lustre User Guide.

" + "documentation":"

Sets the storage type for the file system that you're creating. Valid values are SSD and HDD.

  • Set to SSD to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.

  • Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types, and on PERSISTENT_1 Lustre file system deployment types.

Default value is SSD. For more information, see Storage type options in the FSx for Windows File Server User Guide and Multiple storage options in the FSx for Lustre User Guide.

" }, "SubnetIds":{ "shape":"SubnetIds", @@ -1894,7 +1916,7 @@ "OntapConfiguration":{"shape":"CreateFileSystemOntapConfiguration"}, "FileSystemTypeVersion":{ "shape":"FileSystemTypeVersion", - "documentation":"

(Optional) For FSx for Lustre file systems, sets the Lustre version for the file system that you're creating. Valid values are 2.10, 2.12, and 2.15:

  • 2.10 is supported by the Scratch and Persistent_1 Lustre deployment types.

  • 2.12 and 2.15 are supported by all Lustre deployment types. 2.12 or 2.15 is required when setting FSx for Lustre DeploymentType to PERSISTENT_2.

Default value = 2.10, except when DeploymentType is set to PERSISTENT_2, then the default is 2.12.

If you set FileSystemTypeVersion to 2.10 for a PERSISTENT_2 Lustre deployment type, the CreateFileSystem operation fails.

" + "documentation":"

For FSx for Lustre file systems, sets the Lustre version for the file system that you're creating. Valid values are 2.10, 2.12, and 2.15:

  • 2.10 is supported by the Scratch and Persistent_1 Lustre deployment types.

  • 2.12 is supported by all Lustre deployment types, except for PERSISTENT_2 with a metadata configuration mode.

  • 2.15 is supported by all Lustre deployment types and is recommended for all new file systems.

Default value is 2.10, except for the following deployments:

  • Default value is 2.12 when DeploymentType is set to PERSISTENT_2 without a metadata configuration mode.

  • Default value is 2.15 when DeploymentType is set to PERSISTENT_2 with a metadata configuration mode.

" }, "OpenZFSConfiguration":{ "shape":"CreateFileSystemOpenZFSConfiguration", @@ -1975,7 +1997,7 @@ }, "SecurityStyle":{ "shape":"SecurityStyle", - "documentation":"

Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see Volume security style in the Amazon FSx for NetApp ONTAP User Guide. Specify one of the following values:

  • UNIX if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.

  • NTFS if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.

  • MIXED This is an advanced setting. For more information, see the topic What the security styles and their effects are in the NetApp Documentation Center.

For more information, see Volume security style in the FSx for ONTAP User Guide.

" + "documentation":"

Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. Specify one of the following values:

  • UNIX if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.

  • NTFS if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.

  • MIXED This is an advanced setting. For more information, see the topic What the security styles and their effects are in the NetApp Documentation Center.

For more information, see Volume security style in the FSx for ONTAP User Guide.

" }, "SizeInMegabytes":{ "shape":"VolumeCapacity", @@ -1994,7 +2016,7 @@ "TieringPolicy":{"shape":"TieringPolicy"}, "OntapVolumeType":{ "shape":"InputOntapVolumeType", - "documentation":"

Specifies the type of volume you are creating. Valid values are the following:

  • RW specifies a read/write volume. RW is the default.

  • DP specifies a data-protection volume. A DP volume is read-only and can be used as the destination of a NetApp SnapMirror relationship.

For more information, see Volume types in the Amazon FSx for NetApp ONTAP User Guide.

" + "documentation":"

Specifies the type of volume you are creating. Valid values are the following:

  • RW specifies a read/write volume. RW is the default.

  • DP specifies a data-protection volume. A DP volume is read-only and can be used as the destination of a NetApp SnapMirror relationship.

For more information, see Volume types in the Amazon FSx for NetApp ONTAP User Guide.

" }, "SnapshotPolicy":{ "shape":"SnapshotPolicy", @@ -2010,7 +2032,7 @@ }, "VolumeStyle":{ "shape":"VolumeStyle", - "documentation":"

Use to specify the style of an ONTAP volume. FSx for ONTAP offers two styles of volumes that you can use for different purposes, FlexVol and FlexGroup volumes. For more information, see Volume styles in the Amazon FSx for NetApp ONTAP User Guide.

" + "documentation":"

Use to specify the style of an ONTAP volume. FSx for ONTAP offers two styles of volumes that you can use for different purposes, FlexVol and FlexGroup volumes. For more information, see Volume styles in the Amazon FSx for NetApp ONTAP User Guide.

" }, "AggregateConfiguration":{ "shape":"CreateAggregateConfiguration", @@ -2174,7 +2196,7 @@ "Tags":{"shape":"Tags"}, "RootVolumeSecurityStyle":{ "shape":"StorageVirtualMachineRootVolumeSecurityStyle", - "documentation":"

The security style of the root volume of the SVM. Specify one of the following values:

  • UNIX if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.

  • NTFS if the file system is managed by a Microsoft Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Microsoft Windows user as the service account.

  • MIXED This is an advanced setting. For more information, see Volume security style in the Amazon FSx for NetApp ONTAP User Guide.

" + "documentation":"

The security style of the root volume of the SVM. Specify one of the following values:

  • UNIX if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.

  • NTFS if the file system is managed by a Microsoft Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Microsoft Windows user as the service account.

  • MIXED This is an advanced setting. For more information, see Volume security style in the Amazon FSx for NetApp ONTAP User Guide.

" } } }, @@ -3440,7 +3462,7 @@ }, "FailureDetails":{ "shape":"FileCacheFailureDetails", - "documentation":"

A structure providing details of any failures that occurred.

" + "documentation":"

A structure providing details of any failures that occurred in creating a cache.

" }, "StorageCapacity":{ "shape":"StorageCapacity", @@ -3487,7 +3509,7 @@ }, "DataRepositoryPath":{ "shape":"ArchivePath", - "documentation":"

The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths:

  • The path can be an NFS data repository that links to the cache. The path can be in one of two formats:

    • If you are not using the DataRepositorySubdirectories parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nsf://nfs-domain-name/exportpath. You can therefore link a single NFS Export to a single data repository association.

    • If you are using the DataRepositorySubdirectories parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories parameter.

  • The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/.

" + "documentation":"

The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths:

  • The path can be an NFS data repository that links to the cache. The path can be in one of two formats:

    • If you are not using the DataRepositorySubdirectories parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nfs://nfs-domain-name/exportpath. You can therefore link a single NFS Export to a single data repository association.

    • If you are using the DataRepositorySubdirectories parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories parameter.

  • The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/.

" }, "DataRepositorySubdirectories":{ "shape":"SubDirectoriesPaths", @@ -3756,6 +3778,21 @@ "MISCONFIGURED_UNAVAILABLE" ] }, + "FileSystemLustreMetadataConfiguration":{ + "type":"structure", + "required":["Mode"], + "members":{ + "Iops":{ + "shape":"MetadataIops", + "documentation":"

The number of Metadata IOPS provisioned for the file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

" + }, + "Mode":{ + "shape":"MetadataConfigurationMode", + "documentation":"

The metadata configuration mode for provisioning Metadata IOPS for the file system.

  • In AUTOMATIC mode, FSx for Lustre automatically provisions and scales the number of Metadata IOPS on your file system based on your file system storage capacity.

  • In USER_PROVISIONED mode, you can choose to specify the number of Metadata IOPS to provision for your file system.

" + } + }, + "documentation":"

The Lustre metadata performance configuration of an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. The configuration enables the file system to support increasing metadata performance.

" + }, "FileSystemMaintenanceOperation":{ "type":"string", "documentation":"

An enumeration specifying the currently ongoing maintenance operation.

", @@ -4133,6 +4170,10 @@ "RootSquashConfiguration":{ "shape":"LustreRootSquashConfiguration", "documentation":"

The Lustre root squash configuration for an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.

" + }, + "MetadataConfiguration":{ + "shape":"FileSystemLustreMetadataConfiguration", + "documentation":"

The Lustre metadata performance configuration for an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type.

" } }, "documentation":"

The configuration for the Amazon FSx for Lustre file system.

" @@ -4221,6 +4262,18 @@ "max":100000, "min":8 }, + "MetadataConfigurationMode":{ + "type":"string", + "enum":[ + "AUTOMATIC", + "USER_PROVISIONED" + ] + }, + "MetadataIops":{ + "type":"integer", + "max":192000, + "min":1500 + }, "MetadataStorageCapacity":{ "type":"integer", "max":2147483647, @@ -4323,7 +4376,8 @@ "enum":[ "MULTI_AZ_1", "SINGLE_AZ_1", - "SINGLE_AZ_2" + "SINGLE_AZ_2", + "MULTI_AZ_2" ] }, "OntapEndpointIpAddresses":{ @@ -4339,7 +4393,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OntapDeploymentType", - "documentation":"

Specifies the FSx for ONTAP file system deployment type in use in the file system.

  • MULTI_AZ_1 - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability.

  • SINGLE_AZ_1 - A file system configured for Single-AZ redundancy.

  • SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment.

" + "documentation":"

Specifies the FSx for ONTAP file system deployment type in use in the file system.

  • MULTI_AZ_1 - A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system.

  • MULTI_AZ_2 - A high availability file system configured for Multi-AZ redundancy to tolerate temporary AZ unavailability. This is a second-generation FSx for ONTAP file system.

  • SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system.

  • SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for ONTAP file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment.

" }, "EndpointIpAddressRange":{ "shape":"IpAddressRange", @@ -4366,11 +4420,11 @@ }, "HAPairs":{ "shape":"HAPairs", - "documentation":"

Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The value of HAPairs is less than 1 or greater than 12.

  • The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1 or MULTI_AZ_1.

" + "documentation":"

Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The value of HAPairs is less than 1 or greater than 12.

  • The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1, MULTI_AZ_1, or MULTI_AZ_2.

" }, "ThroughputCapacityPerHAPair":{ "shape":"ThroughputCapacityPerHAPair", - "documentation":"

Use to choose the throughput capacity per HA pair. When the value of HAPairs is equal to 1, the value of ThroughputCapacityPerHAPair is the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

  • For SINGLE_AZ_1 and MULTI_AZ_1, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

  • For SINGLE_AZ_2, valid values are 3072 or 6144 MBps.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value.

  • The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and 12).

  • The value of ThroughputCapacityPerHAPair is not a valid value.

" + "documentation":"

Use to choose the throughput capacity per HA pair. When the value of HAPairs is equal to 1, the value of ThroughputCapacityPerHAPair is the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

  • For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

  • For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps.

  • For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value.

  • The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12).

  • The value of ThroughputCapacityPerHAPair is not a valid value.

" } }, "documentation":"

Configuration for the FSx for NetApp ONTAP file system.

" @@ -4533,6 +4587,8 @@ "enum":[ "SINGLE_AZ_1", "SINGLE_AZ_2", + "SINGLE_AZ_HA_1", + "SINGLE_AZ_HA_2", "MULTI_AZ_1" ] }, @@ -4551,7 +4607,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OpenZFSDeploymentType", - "documentation":"

Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 MULTI_AZ_1, SINGLE_AZ_1, and SINGLE_AZ_2.

" + "documentation":"

Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 MULTI_AZ_1, SINGLE_AZ_HA_2, SINGLE_AZ_HA_1, SINGLE_AZ_2, and SINGLE_AZ_1.

" }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", @@ -5054,30 +5110,30 @@ "members":{ "UserName":{ "shape":"DirectoryUserName", - "documentation":"

Specifies the updated user name for the service account on your self-managed AD domain. Amazon FSx uses this account to join to your self-managed AD domain.

This account must have the permissions required to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName.

" + "documentation":"

Specifies the updated user name for the service account on your self-managed Active Directory domain. Amazon FSx uses this account to join to your self-managed Active Directory domain.

This account must have the permissions required to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName.

" }, "Password":{ "shape":"DirectoryPassword", - "documentation":"

Specifies the updated password for the service account on your self-managed AD domain. Amazon FSx uses this account to join to your self-managed AD domain.

" + "documentation":"

Specifies the updated password for the service account on your self-managed Active Directory domain. Amazon FSx uses this account to join to your self-managed Active Directory domain.

" }, "DnsIps":{ "shape":"DnsIps", - "documentation":"

A list of up to three DNS server or domain controller IP addresses in your self-managed AD domain.

" + "documentation":"

A list of up to three DNS server or domain controller IP addresses in your self-managed Active Directory domain.

" }, "DomainName":{ "shape":"ActiveDirectoryFullyQualifiedName", - "documentation":"

Specifies an updated fully qualified domain name of your self-managed AD configuration.

" + "documentation":"

Specifies an updated fully qualified domain name of your self-managed Active Directory configuration.

" }, "OrganizationalUnitDistinguishedName":{ "shape":"OrganizationalUnitDistinguishedName", - "documentation":"

Specifies an updated fully qualified distinguished name of the organization unit within your self-managed AD.

" + "documentation":"

Specifies an updated fully qualified distinguished name of the organization unit within your self-managed Active Directory.

" }, "FileSystemAdministratorsGroup":{ "shape":"FileSystemAdministratorsGroupName", - "documentation":"

Specifies the updated name of the self-managed AD domain group whose members are granted administrative privileges for the Amazon FSx resource.

" + "documentation":"

For FSx for ONTAP file systems only - Specifies the updated name of the self-managed Active Directory domain group whose members are granted administrative privileges for the Amazon FSx resource.

" } }, - "documentation":"

Specifies changes you are making to the self-managed Microsoft Active Directory (AD) configuration to which an FSx for Windows File Server file system or an FSx for ONTAP SVM is joined.

" + "documentation":"

Specifies changes you are making to the self-managed Microsoft Active Directory configuration to which an FSx for Windows File Server file system or an FSx for ONTAP SVM is joined.

" }, "ServiceLimit":{ "type":"string", @@ -5319,7 +5375,8 @@ "IN_PROGRESS", "PENDING", "COMPLETED", - "UPDATED_OPTIMIZING" + "UPDATED_OPTIMIZING", + "OPTIMIZING" ] }, "StorageCapacity":{ @@ -5793,10 +5850,28 @@ "PerUnitStorageThroughput":{ "shape":"PerUnitStorageThroughput", "documentation":"

The throughput of an Amazon FSx for Lustre Persistent SSD-based file system, measured in megabytes per second per tebibyte (MB/s/TiB). You can increase or decrease your file system's throughput. Valid values depend on the deployment type of the file system, as follows:

  • For PERSISTENT_1 SSD-based deployment types, valid values are 50, 100, and 200 MB/s/TiB.

  • For PERSISTENT_2 SSD-based deployment types, valid values are 125, 250, 500, and 1000 MB/s/TiB.

For more information, see Managing throughput capacity.

" + }, + "MetadataConfiguration":{ + "shape":"UpdateFileSystemLustreMetadataConfiguration", + "documentation":"

The Lustre metadata performance configuration for an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. When this configuration is enabled, the file system supports increasing metadata performance.

" } }, "documentation":"

The configuration object for Amazon FSx for Lustre file systems used in the UpdateFileSystem operation.

" }, + "UpdateFileSystemLustreMetadataConfiguration":{ + "type":"structure", + "members":{ + "Iops":{ + "shape":"MetadataIops", + "documentation":"

(USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for your file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

The value you provide must be greater than or equal to the current number of Metadata IOPS provisioned for the file system.

" + }, + "Mode":{ + "shape":"MetadataConfigurationMode", + "documentation":"

The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.

  • To increase the Metadata IOPS or to switch from AUTOMATIC mode, specify USER_PROVISIONED as the value for this parameter. Then use the Iops parameter to provide a Metadata IOPS value that is greater than or equal to the current number of Metadata IOPS provisioned for the file system.

  • To switch from USER_PROVISIONED mode, specify AUTOMATIC as the value for this parameter, but do not input a value for Iops.

    If you request to switch from USER_PROVISIONED to AUTOMATIC mode and the current Metadata IOPS value is greater than the automated default, FSx for Lustre rejects the request because downscaling Metadata IOPS is not supported.

" + } + }, + "documentation":"

The Lustre metadata performance configuration update for an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. You can request an increase in your file system's Metadata IOPS and/or switch your file system's metadata configuration mode. For more information, see Managing metadata performance in the Amazon FSx for Lustre User Guide.

" + }, "UpdateFileSystemOntapConfiguration":{ "type":"structure", "members":{ @@ -5825,7 +5900,11 @@ }, "ThroughputCapacityPerHAPair":{ "shape":"ThroughputCapacityPerHAPair", - "documentation":"

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

  • For SINGLE_AZ_1 and MULTI_AZ_1, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

  • For SINGLE_AZ_2, valid values are 3072 or 6144 MBps.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair.

  • The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and 12).

  • The value of ThroughputCapacityPerHAPair is not a valid value.

" + "documentation":"

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

  • For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

  • For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps.

  • For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

  • The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair.

  • The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12).

  • The value of ThroughputCapacityPerHAPair is not a valid value.

" + }, + "HAPairs":{ + "shape":"HAPairs", + "documentation":"

Use to update the number of high-availability (HA) pairs for a second-generation single-AZ file system. If you increase the number of HA pairs for your file system, you must specify proportional increases for StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see Using block storage protocols.

" } }, "documentation":"

The configuration updates for an Amazon FSx for NetApp ONTAP file system.

" diff --git a/botocore/data/gamelift/2015-10-01/service-2.json b/botocore/data/gamelift/2015-10-01/service-2.json index 2640ade186..d91b54b07a 100644 --- a/botocore/data/gamelift/2015-10-01/service-2.json +++ b/botocore/data/gamelift/2015-10-01/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"gamelift", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon GameLift", "serviceId":"GameLift", "signatureVersion":"v4", "targetPrefix":"GameLift", - "uid":"gamelift-2015-10-01" + "uid":"gamelift-2015-10-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptMatch":{ diff --git a/botocore/data/globalaccelerator/2018-08-08/service-2.json b/botocore/data/globalaccelerator/2018-08-08/service-2.json index 9870e6db54..518a7cbd9f 100644 --- a/botocore/data/globalaccelerator/2018-08-08/service-2.json +++ b/botocore/data/globalaccelerator/2018-08-08/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"globalaccelerator", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS Global Accelerator", "serviceId":"Global Accelerator", "signatureVersion":"v4", "signingName":"globalaccelerator", "targetPrefix":"GlobalAccelerator_V20180706", - "uid":"globalaccelerator-2018-08-08" + "uid":"globalaccelerator-2018-08-08", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddCustomRoutingEndpoints":{ @@ -92,7 +94,9 @@ "errors":[ {"shape":"InternalServiceErrorException"}, {"shape":"InvalidArgumentException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TransactionInProgressException"} ], "documentation":"

Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers.

Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.

" }, @@ -125,7 +129,8 @@ {"shape":"InternalServiceErrorException"}, {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"TransactionInProgressException"} ], "documentation":"

Create a custom routing accelerator. A custom routing accelerator directs traffic to one of possibly thousands of Amazon EC2 instance destinations running in a single or multiple virtual private clouds (VPC) subnet endpoints.

Be aware that, by default, all destination EC2 instances in a VPC subnet endpoint cannot receive traffic. To enable all destinations to receive traffic, or to specify individual port mappings that can receive traffic, see the AllowCustomRoutingTraffic operation.

Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.

" }, @@ -214,7 +219,8 @@ {"shape":"AcceleratorNotDisabledException"}, {"shape":"AssociatedListenerFoundException"}, {"shape":"InternalServiceErrorException"}, - {"shape":"InvalidArgumentException"} + {"shape":"InvalidArgumentException"}, + {"shape":"TransactionInProgressException"} ], "documentation":"

Delete an accelerator. Before you can delete an accelerator, you must disable it and remove all dependent resources (listeners and endpoint groups). To disable the accelerator, update the accelerator to set Enabled to false.

When you create an accelerator, by default, Global Accelerator provides you with a set of two static IP addresses. Alternatively, you can bring your own IP address ranges to Global Accelerator and assign IP addresses from those ranges.

The IP addresses are assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete an accelerator, you lose the static IP addresses that are assigned to the accelerator, so you can no longer route traffic by using them. As a best practice, ensure that you have permissions in place to avoid inadvertently deleting accelerators. You can use IAM policies with Global Accelerator to limit the users who have permissions to delete an accelerator. For more information, see Identity and access management in the Global Accelerator Developer Guide.

" }, @@ -246,7 +252,8 @@ {"shape":"AcceleratorNotDisabledException"}, {"shape":"AssociatedListenerFoundException"}, {"shape":"InternalServiceErrorException"}, - {"shape":"InvalidArgumentException"} + {"shape":"InvalidArgumentException"}, + {"shape":"TransactionInProgressException"} ], "documentation":"

Delete a custom routing accelerator. Before you can delete an accelerator, you must disable it and remove all dependent resources (listeners and endpoint groups). To disable the accelerator, update the accelerator to set Enabled to false.

When you create a custom routing accelerator, by default, Global Accelerator provides you with a set of two static IP addresses.

The IP addresses are assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete an accelerator, you lose the static IP addresses that are assigned to the accelerator, so you can no longer route traffic by using them. As a best practice, ensure that you have permissions in place to avoid inadvertently deleting accelerators. You can use IAM policies with Global Accelerator to limit the users who have permissions to delete an accelerator. For more information, see Identity and access management in the Global Accelerator Developer Guide.

" }, @@ -675,6 +682,9 @@ "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ {"shape":"AcceleratorNotFoundException"}, + {"shape":"AttachmentNotFoundException"}, + {"shape":"EndpointGroupNotFoundException"}, + {"shape":"ListenerNotFoundException"}, {"shape":"InternalServiceErrorException"}, {"shape":"InvalidArgumentException"} ], @@ -772,7 +782,9 @@ {"shape":"AcceleratorNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServiceErrorException"}, - {"shape":"InvalidArgumentException"} + {"shape":"InvalidArgumentException"}, + {"shape":"TransactionInProgressException"}, + {"shape":"ConflictException"} ], "documentation":"

Update an accelerator to make changes, such as the following:

  • Change the name of the accelerator.

  • Disable the accelerator so that it no longer accepts or routes traffic, or so that you can delete it.

  • Enable the accelerator, if it is disabled.

  • Change the IP address type to dual-stack if it is IPv4, or change the IP address type to IPv4 if it's dual-stack.

Be aware that static IP addresses remain assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete the accelerator, you lose the static IP addresses that are assigned to it, so you can no longer route traffic by using them.

Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.

" }, @@ -788,7 +800,8 @@ {"shape":"AcceleratorNotFoundException"}, {"shape":"InternalServiceErrorException"}, {"shape":"InvalidArgumentException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"TransactionInProgressException"} ], "documentation":"

Update the attributes for an accelerator.

" }, @@ -821,7 +834,9 @@ "errors":[ {"shape":"AcceleratorNotFoundException"}, {"shape":"InternalServiceErrorException"}, - {"shape":"InvalidArgumentException"} + {"shape":"InvalidArgumentException"}, + {"shape":"TransactionInProgressException"}, + {"shape":"ConflictException"} ], "documentation":"

Update a custom routing accelerator.

" }, @@ -837,7 +852,8 @@ {"shape":"AcceleratorNotFoundException"}, {"shape":"InternalServiceErrorException"}, {"shape":"InvalidArgumentException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"TransactionInProgressException"} ], "documentation":"

Update the attributes for a custom routing accelerator.

" }, @@ -3264,6 +3280,10 @@ "shape":"IpAddressType", "documentation":"

The IP address type that an accelerator supports. For a standard accelerator, the value can be IPV4 or DUAL_STACK.

" }, + "IpAddresses":{ + "shape":"IpAddresses", + "documentation":"

The IP addresses for an accelerator.

" + }, "Enabled":{ "shape":"GenericBoolean", "documentation":"

Indicates whether an accelerator is enabled. The value is true or false. The default value is true.

If the value is set to true, the accelerator cannot be deleted. If set to false, the accelerator can be deleted.

" @@ -3365,6 +3385,10 @@ "shape":"IpAddressType", "documentation":"

The IP address type that an accelerator supports. For a custom routing accelerator, the value must be IPV4.

" }, + "IpAddresses":{ + "shape":"IpAddresses", + "documentation":"

The IP addresses for an accelerator.

" + }, "Enabled":{ "shape":"GenericBoolean", "documentation":"

Indicates whether an accelerator is enabled. The value is true or false. The default value is true.

If the value is set to true, the accelerator cannot be deleted. If set to false, the accelerator can be deleted.

" diff --git a/botocore/data/glue/2017-03-31/paginators-1.json b/botocore/data/glue/2017-03-31/paginators-1.json index 25970b7b45..62622ad921 100644 --- a/botocore/data/glue/2017-03-31/paginators-1.json +++ b/botocore/data/glue/2017-03-31/paginators-1.json @@ -112,6 +112,42 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "Schemas" + }, + "ListUsageProfiles": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Profiles" + }, + "GetWorkflowRuns": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Runs" + }, + "ListBlueprints": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Blueprints" + }, + "ListJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "JobNames" + }, + "ListTriggers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TriggerNames" + }, + "ListWorkflows": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Workflows" } } } diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index c84b898194..e84d55f7f1 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"glue", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS Glue", "serviceId":"Glue", "signatureVersion":"v4", "targetPrefix":"AWSGlue", - "uid":"glue-2017-03-31" + "uid":"glue-2017-03-31", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchCreatePartition":{ @@ -248,6 +250,22 @@ ], "documentation":"

Returns a list of resource metadata for a given list of workflow names. After calling the ListWorkflows operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

" }, + "BatchPutDataQualityStatisticAnnotation":{ + "name":"BatchPutDataQualityStatisticAnnotation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchPutDataQualityStatisticAnnotationRequest"}, + "output":{"shape":"BatchPutDataQualityStatisticAnnotationResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"ResourceNumberLimitExceededException"} + ], + "documentation":"

Annotate datapoints over time for a specific data quality statistic.

" + }, "BatchStopJobRun":{ "name":"BatchStopJobRun", "http":{ @@ -726,6 +744,24 @@ ], "documentation":"

Creates a new trigger.

" }, + "CreateUsageProfile":{ + "name":"CreateUsageProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUsageProfileRequest"}, + "output":{"shape":"CreateUsageProfileResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"OperationNotSupportedException"} + ], + "documentation":"

Creates an Glue usage profile.

" + }, "CreateUserDefinedFunction":{ "name":"CreateUserDefinedFunction", "http":{ @@ -1152,6 +1188,22 @@ ], "documentation":"

Deletes a specified trigger. If the trigger is not found, no exception is thrown.

" }, + "DeleteUsageProfile":{ + "name":"DeleteUsageProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUsageProfileRequest"}, + "output":{"shape":"DeleteUsageProfileResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"OperationNotSupportedException"} + ], + "documentation":"

Deletes the Glue specified usage profile.

" + }, "DeleteUserDefinedFunction":{ "name":"DeleteUserDefinedFunction", "http":{ @@ -1438,6 +1490,38 @@ ], "documentation":"

Retrieves the security configuration for a specified catalog.

" }, + "GetDataQualityModel":{ + "name":"GetDataQualityModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDataQualityModelRequest"}, + "output":{"shape":"GetDataQualityModelResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieve the training status of the model along with more information (CompletedOn, StartedOn, FailureReason).

" + }, + "GetDataQualityModelResult":{ + "name":"GetDataQualityModelResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDataQualityModelResultRequest"}, + "output":{"shape":"GetDataQualityModelResultResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieve a statistic's predictions for a given Profile ID.

" + }, "GetDataQualityResult":{ "name":"GetDataQualityResult", "http":{ @@ -1630,7 +1714,7 @@ {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"} ], - "documentation":"

Retrieves the metadata for a given job run.

" + "documentation":"

Retrieves the metadata for a given job run. Job run history is accessible for 90 days for your workflow and job run.

" }, "GetJobRuns":{ "name":"GetJobRuns", @@ -2192,6 +2276,23 @@ ], "documentation":"

Allows a third-party analytical engine to retrieve unfiltered table metadata from the Data Catalog.

For IAM authorization, the public IAM action associated with this API is glue:GetTable.

" }, + "GetUsageProfile":{ + "name":"GetUsageProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUsageProfileRequest"}, + "output":{"shape":"GetUsageProfileResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"OperationNotSupportedException"} + ], + "documentation":"

Retrieves information about the specified Glue usage profile.

" + }, "GetUserDefinedFunction":{ "name":"GetUserDefinedFunction", "http":{ @@ -2256,7 +2357,7 @@ {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"} ], - "documentation":"

Retrieves the metadata for a given workflow run.

" + "documentation":"

Retrieves the metadata for a given workflow run. Job run history is accessible for 90 days for your workflow and job run.

" }, "GetWorkflowRunProperties":{ "name":"GetWorkflowRunProperties", @@ -2436,6 +2537,35 @@ ], "documentation":"

Returns a paginated list of rulesets for the specified list of Glue tables.

" }, + "ListDataQualityStatisticAnnotations":{ + "name":"ListDataQualityStatisticAnnotations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDataQualityStatisticAnnotationsRequest"}, + "output":{"shape":"ListDataQualityStatisticAnnotationsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieve annotations for a data quality statistic.

" + }, + "ListDataQualityStatistics":{ + "name":"ListDataQualityStatistics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDataQualityStatisticsRequest"}, + "output":{"shape":"ListDataQualityStatisticsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieves a list of data quality statistics.

" + }, "ListDevEndpoints":{ "name":"ListDevEndpoints", "http":{ @@ -2597,6 +2727,22 @@ ], "documentation":"

Retrieves the names of all trigger resources in this Amazon Web Services account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.

This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.

" }, + "ListUsageProfiles":{ + "name":"ListUsageProfiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUsageProfilesRequest"}, + "output":{"shape":"ListUsageProfilesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationNotSupportedException"} + ], + "documentation":"

List all the Glue usage profiles.

" + }, "ListWorkflows":{ "name":"ListWorkflows", "http":{ @@ -2627,6 +2773,21 @@ ], "documentation":"

Sets the security configuration for a specified catalog. After the configuration has been set, the specified encryption is applied to every catalog write thereafter.

" }, + "PutDataQualityProfileAnnotation":{ + "name":"PutDataQualityProfileAnnotation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutDataQualityProfileAnnotationRequest"}, + "output":{"shape":"PutDataQualityProfileAnnotationResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Annotate all datapoints for a Profile.

" + }, "PutResourcePolicy":{ "name":"PutResourcePolicy", "http":{ @@ -3503,6 +3664,24 @@ ], "documentation":"

Updates a trigger definition.

" }, + "UpdateUsageProfile":{ + "name":"UpdateUsageProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateUsageProfileRequest"}, + "output":{"shape":"UpdateUsageProfileResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"OperationNotSupportedException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Update an Glue usage profile.

" + }, "UpdateUserDefinedFunction":{ "name":"UpdateUserDefinedFunction", "http":{ @@ -3539,6 +3718,11 @@ } }, "shapes":{ + "AWSManagedClientApplicationReference":{ + "type":"string", + "max":2048, + "pattern":"\\S+" + }, "AccessDeniedException":{ "type":"structure", "members":{ @@ -3683,6 +3867,10 @@ "max":30, "min":1 }, + "AllowedValuesStringList":{ + "type":"list", + "member":{"shape":"ConfigValueString"} + }, "AlreadyExistsException":{ "type":"structure", "members":{ @@ -3854,6 +4042,32 @@ }, "documentation":"

Specifies an Amazon Redshift target.

" }, + "AnnotationError":{ + "type":"structure", + "members":{ + "ProfileId":{ + "shape":"HashString", + "documentation":"

The Profile ID for the failed annotation.

" + }, + "StatisticId":{ + "shape":"HashString", + "documentation":"

The Statistic ID for the failed annotation.

" + }, + "FailureReason":{ + "shape":"DescriptionString", + "documentation":"

The reason why the annotation failed.

" + } + }, + "documentation":"

A failed annotation.

" + }, + "AnnotationErrorList":{ + "type":"list", + "member":{"shape":"AnnotationError"} + }, + "AnnotationList":{ + "type":"list", + "member":{"shape":"StatisticAnnotation"} + }, "ApplyMapping":{ "type":"structure", "required":[ @@ -3957,6 +4171,70 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "AuthenticationConfiguration":{ + "type":"structure", + "members":{ + "AuthenticationType":{ + "shape":"AuthenticationType", + "documentation":"

A structure containing the authentication configuration.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The secret manager ARN to store credentials.

" + }, + "OAuth2Properties":{ + "shape":"OAuth2Properties", + "documentation":"

The properties for OAuth2 authentication.

" + } + }, + "documentation":"

A structure containing the authentication configuration.

" + }, + "AuthenticationConfigurationInput":{ + "type":"structure", + "members":{ + "AuthenticationType":{ + "shape":"AuthenticationType", + "documentation":"

A structure containing the authentication configuration in the CreateConnection request.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The secret manager ARN to store credentials in the CreateConnection request.

" + }, + "OAuth2Properties":{ + "shape":"OAuth2PropertiesInput", + "documentation":"

The properties for OAuth2 authentication in the CreateConnection request.

" + } + }, + "documentation":"

A structure containing the authentication configuration in the CreateConnection request.

" + }, + "AuthenticationType":{ + "type":"string", + "enum":[ + "BASIC", + "OAUTH2", + "CUSTOM" + ] + }, + "AuthorizationCode":{ + "type":"string", + "max":4096, + "min":1, + "pattern":"\\S+" + }, + "AuthorizationCodeProperties":{ + "type":"structure", + "members":{ + "AuthorizationCode":{ + "shape":"AuthorizationCode", + "documentation":"

An authorization code to be used in the third leg of the AUTHORIZATION_CODE grant workflow. This is a single-use code which becomes invalid once exchanged for an access token, thus it is acceptable to have this value as a request parameter.

" + }, + "RedirectUri":{ + "shape":"RedirectUri", + "documentation":"

The redirect URI where the user gets redirected to by authorization server when issuing an authorization code. The URI is subsequently used when the authorization code is exchanged for an access token.

" + } + }, + "documentation":"

The set of properties required for the the OAuth2 AUTHORIZATION_CODE grant type workflow.

" + }, "BackfillError":{ "type":"structure", "members":{ @@ -4006,6 +4284,10 @@ "shape":"OneInput", "documentation":"

The nodes that are inputs to the data target.

" }, + "PartitionKeys":{ + "shape":"GlueStudioPathList", + "documentation":"

The partition keys used to distribute data across multiple partitions or shards based on a specific key or set of key.

" + }, "Database":{ "shape":"EnclosedInStringProperty", "documentation":"

The database that contains the table you want to use as the target. This database must already exist in the Data Catalog.

" @@ -4528,6 +4810,29 @@ } } }, + "BatchPutDataQualityStatisticAnnotationRequest":{ + "type":"structure", + "required":["InclusionAnnotations"], + "members":{ + "InclusionAnnotations":{ + "shape":"InclusionAnnotationList", + "documentation":"

A list of DatapointInclusionAnnotation's.

" + }, + "ClientToken":{ + "shape":"HashString", + "documentation":"

Client Token.

" + } + } + }, + "BatchPutDataQualityStatisticAnnotationResponse":{ + "type":"structure", + "members":{ + "FailedInclusionAnnotations":{ + "shape":"AnnotationErrorList", + "documentation":"

A list of AnnotationError's.

" + } + } + }, "BatchSize":{ "type":"integer", "max":100, @@ -6216,10 +6521,69 @@ "documentation":"

A specified condition was not satisfied.

", "exception":true }, + "ConditionExpression":{ + "type":"structure", + "required":[ + "Condition", + "TargetColumn" + ], + "members":{ + "Condition":{ + "shape":"DatabrewCondition", + "documentation":"

The condition of the condition expression.

" + }, + "Value":{ + "shape":"DatabrewConditionValue", + "documentation":"

The value of the condition expression.

" + }, + "TargetColumn":{ + "shape":"TargetColumn", + "documentation":"

The target column of the condition expressions.

" + } + }, + "documentation":"

Condition expression defined in the Glue Studio data preparation recipe node.

" + }, + "ConditionExpressionList":{ + "type":"list", + "member":{"shape":"ConditionExpression"} + }, "ConditionList":{ "type":"list", "member":{"shape":"Condition"} }, + "ConfigValueString":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "ConfigurationMap":{ + "type":"map", + "key":{"shape":"NameString"}, + "value":{"shape":"ConfigurationObject"} + }, + "ConfigurationObject":{ + "type":"structure", + "members":{ + "DefaultValue":{ + "shape":"ConfigValueString", + "documentation":"

A default value for the parameter.

" + }, + "AllowedValues":{ + "shape":"AllowedValuesStringList", + "documentation":"

A list of allowed values for the parameter.

" + }, + "MinValue":{ + "shape":"ConfigValueString", + "documentation":"

A minimum allowed value for the parameter.

" + }, + "MaxValue":{ + "shape":"ConfigValueString", + "documentation":"

A maximum allowed value for the parameter.

" + } + }, + "documentation":"

Specifies the values that an admin sets for each job or session parameter configured in a Glue usage profile.

" + }, "ConflictException":{ "type":"structure", "members":{ @@ -6274,23 +6638,39 @@ }, "ConnectionProperties":{ "shape":"ConnectionProperties", - "documentation":"

These key-value pairs define parameters for the connection:

  • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

  • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

  • USER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME\".

  • PASSWORD - A password, if one is used, for the user name.

  • ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

  • JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.

  • JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

  • JDBC_ENGINE - The name of the JDBC engine to use.

  • JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

  • CONFIG_FILES - (Reserved for future use.)

  • INSTANCE_ID - The instance ID to use.

  • JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.

  • JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.

  • CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.

  • SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate.

  • CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.

  • CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.

  • SECRET_ID - The secret ID used for the secret manager of credentials.

  • CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection.

  • CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection.

  • CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection.

  • KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.

  • KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".

  • KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.

  • KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".

  • KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).

  • KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional).

  • KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).

  • ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).

  • ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).

  • KAFKA_SASL_MECHANISM - \"SCRAM-SHA-512\", \"GSSAPI\", \"AWS_MSK_IAM\", or \"PLAIN\". These are the supported SASL Mechanisms.

  • KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with the \"PLAIN\" mechanism.

  • KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with the \"PLAIN\" mechanism.

  • ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected).

  • KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.

  • KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.

  • ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).

  • KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.

  • KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.

  • KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.

  • KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration.

  • KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.

" + "documentation":"

These key-value pairs define parameters for the connection:

  • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

  • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

  • USER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME\".

  • PASSWORD - A password, if one is used, for the user name.

  • ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

  • JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.

  • JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

  • JDBC_ENGINE - The name of the JDBC engine to use.

  • JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

  • CONFIG_FILES - (Reserved for future use.)

  • INSTANCE_ID - The instance ID to use.

  • JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.

  • JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.

  • CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.

  • SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate.

  • CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.

  • CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.

  • SECRET_ID - The secret ID used for the secret manager of credentials.

  • CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection.

  • CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection.

  • CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection.

  • KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.

  • KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".

  • KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.

  • KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".

  • KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).

  • KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional).

  • KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).

  • ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).

  • ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).

  • KAFKA_SASL_MECHANISM - \"SCRAM-SHA-512\", \"GSSAPI\", \"AWS_MSK_IAM\", or \"PLAIN\". These are the supported SASL Mechanisms.

  • KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with the \"PLAIN\" mechanism.

  • KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with the \"PLAIN\" mechanism.

  • ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected).

  • KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.

  • KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.

  • ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).

  • KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.

  • KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.

  • KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.

  • KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration.

  • KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.

  • ROLE_ARN - The role to be used for running queries.

  • REGION - The Amazon Web Services Region where queries will be run.

  • WORKGROUP_NAME - The name of an Amazon Redshift serverless workgroup or Amazon Athena workgroup in which queries will run.

  • CLUSTER_IDENTIFIER - The cluster identifier of an Amazon Redshift cluster in which queries will run.

  • DATABASE - The Amazon Redshift database that you are connecting to.

" }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", - "documentation":"

A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to make this connection successfully.

" + "documentation":"

The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to make this connection successfully.

" }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

The time that this connection definition was created.

" + "documentation":"

The timestamp of the time that this connection definition was created.

" }, "LastUpdatedTime":{ "shape":"Timestamp", - "documentation":"

The last time that this connection definition was updated.

" + "documentation":"

The timestamp of the last time the connection definition was updated.

" }, "LastUpdatedBy":{ "shape":"NameString", "documentation":"

The user, group, or role that last updated this connection definition.

" + }, + "Status":{ + "shape":"ConnectionStatus", + "documentation":"

The status of the connection. Can be one of: READY, IN_PROGRESS, or FAILED.

" + }, + "StatusReason":{ + "shape":"LongValueString", + "documentation":"

The reason for the connection status.

" + }, + "LastConnectionValidationTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp of the time this connection was last validated.

" + }, + "AuthenticationConfiguration":{ + "shape":"AuthenticationConfiguration", + "documentation":"

The authentication properties of the connection.

" } }, "documentation":"

Defines a connection to a data source.

" @@ -6305,7 +6685,7 @@ "members":{ "Name":{ "shape":"NameString", - "documentation":"

The name of the connection. Connection will not function as expected without a name.

" + "documentation":"

The name of the connection.

" }, "Description":{ "shape":"DescriptionString", @@ -6313,7 +6693,7 @@ }, "ConnectionType":{ "shape":"ConnectionType", - "documentation":"

The type of the connection. Currently, these types are supported:

  • JDBC - Designates a connection to a database through Java Database Connectivity (JDBC).

    JDBC Connections use the following ConnectionParameters.

    • Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL.

    • Required: All of (USERNAME, PASSWORD) or SECRET_ID.

    • Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC.

  • KAFKA - Designates a connection to an Apache Kafka streaming platform.

    KAFKA Connections use the following ConnectionParameters.

    • Required: KAFKA_BOOTSTRAP_SERVERS.

    • Optional: KAFKA_SSL_ENABLED, KAFKA_CUSTOM_CERT, KAFKA_SKIP_CUSTOM_CERT_VALIDATION. These parameters are used to configure SSL with KAFKA.

    • Optional: KAFKA_CLIENT_KEYSTORE, KAFKA_CLIENT_KEYSTORE_PASSWORD, KAFKA_CLIENT_KEY_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD. These parameters are used to configure TLS client configuration with SSL in KAFKA.

    • Optional: KAFKA_SASL_MECHANISM. Can be specified as SCRAM-SHA-512, GSSAPI, or AWS_MSK_IAM.

    • Optional: KAFKA_SASL_SCRAM_USERNAME, KAFKA_SASL_SCRAM_PASSWORD, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA.

    • Optional: KAFKA_SASL_GSSAPI_KEYTAB, KAFKA_SASL_GSSAPI_KRB5_CONF, KAFKA_SASL_GSSAPI_SERVICE, KAFKA_SASL_GSSAPI_PRINCIPAL. These parameters are used to configure SASL/GSSAPI authentication with KAFKA.

  • MONGODB - Designates a connection to a MongoDB document database.

    MONGODB Connections use the following ConnectionParameters.

    • Required: CONNECTION_URL.

    • Required: All of (USERNAME, PASSWORD) or SECRET_ID.

  • NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).

    NETWORK Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.

  • MARKETPLACE - Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue.

    MARKETPLACE Connections use the following ConnectionParameters.

    • Required: CONNECTOR_TYPE, CONNECTOR_URL, CONNECTOR_CLASS_NAME, CONNECTION_URL.

    • Required for JDBC CONNECTOR_TYPE connections: All of (USERNAME, PASSWORD) or SECRET_ID.

  • CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.

SFTP is not supported.

For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties.

For more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections.

" + "documentation":"

The type of the connection. Currently, these types are supported:

  • JDBC - Designates a connection to a database through Java Database Connectivity (JDBC).

    JDBC Connections use the following ConnectionParameters.

    • Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL.

    • Required: All of (USERNAME, PASSWORD) or SECRET_ID.

    • Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC.

  • KAFKA - Designates a connection to an Apache Kafka streaming platform.

    KAFKA Connections use the following ConnectionParameters.

    • Required: KAFKA_BOOTSTRAP_SERVERS.

    • Optional: KAFKA_SSL_ENABLED, KAFKA_CUSTOM_CERT, KAFKA_SKIP_CUSTOM_CERT_VALIDATION. These parameters are used to configure SSL with KAFKA.

    • Optional: KAFKA_CLIENT_KEYSTORE, KAFKA_CLIENT_KEYSTORE_PASSWORD, KAFKA_CLIENT_KEY_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD. These parameters are used to configure TLS client configuration with SSL in KAFKA.

    • Optional: KAFKA_SASL_MECHANISM. Can be specified as SCRAM-SHA-512, GSSAPI, or AWS_MSK_IAM.

    • Optional: KAFKA_SASL_SCRAM_USERNAME, KAFKA_SASL_SCRAM_PASSWORD, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA.

    • Optional: KAFKA_SASL_GSSAPI_KEYTAB, KAFKA_SASL_GSSAPI_KRB5_CONF, KAFKA_SASL_GSSAPI_SERVICE, KAFKA_SASL_GSSAPI_PRINCIPAL. These parameters are used to configure SASL/GSSAPI authentication with KAFKA.

  • MONGODB - Designates a connection to a MongoDB document database.

    MONGODB Connections use the following ConnectionParameters.

    • Required: CONNECTION_URL.

    • Required: All of (USERNAME, PASSWORD) or SECRET_ID.

  • SALESFORCE - Designates a connection to Salesforce using OAuth authencation.

    • Requires the AuthenticationConfiguration member to be configured.

  • VIEW_VALIDATION_REDSHIFT - Designates a connection used for view validation by Amazon Redshift.

  • VIEW_VALIDATION_ATHENA - Designates a connection used for view validation by Amazon Athena.

  • NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).

    NETWORK Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.

  • MARKETPLACE - Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue.

    MARKETPLACE Connections use the following ConnectionParameters.

    • Required: CONNECTOR_TYPE, CONNECTOR_URL, CONNECTOR_CLASS_NAME, CONNECTION_URL.

    • Required for JDBC CONNECTOR_TYPE connections: All of (USERNAME, PASSWORD) or SECRET_ID.

  • CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.

SFTP is not supported.

For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties.

For more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections.

" }, "MatchCriteria":{ "shape":"MatchCriteria", @@ -6325,7 +6705,15 @@ }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", - "documentation":"

A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to successfully make this connection.

" + "documentation":"

The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to successfully make this connection.

" + }, + "AuthenticationConfiguration":{ + "shape":"AuthenticationConfigurationInput", + "documentation":"

The authentication properties of the connection. Used for a Salesforce connection.

" + }, + "ValidateCredentials":{ + "shape":"Boolean", + "documentation":"

A flag to validate the credentials during create connection. Used for a Salesforce connection. Default is true.

" } }, "documentation":"

A structure that is used to specify a connection to create or update.

" @@ -6401,7 +6789,20 @@ "KAFKA_SASL_GSSAPI_KEYTAB", "KAFKA_SASL_GSSAPI_KRB5_CONF", "KAFKA_SASL_GSSAPI_SERVICE", - "KAFKA_SASL_GSSAPI_PRINCIPAL" + "KAFKA_SASL_GSSAPI_PRINCIPAL", + "ROLE_ARN", + "REGION", + "WORKGROUP_NAME", + "CLUSTER_IDENTIFIER", + "DATABASE" + ] + }, + "ConnectionStatus":{ + "type":"string", + "enum":[ + "READY", + "IN_PROGRESS", + "FAILED" ] }, "ConnectionType":{ @@ -6413,7 +6814,10 @@ "KAFKA", "NETWORK", "MARKETPLACE", - "CUSTOM" + "CUSTOM", + "SALESFORCE", + "VIEW_VALIDATION_REDSHIFT", + "VIEW_VALIDATION_ATHENA" ] }, "ConnectionsList":{ @@ -6949,6 +7353,10 @@ "CreateConnectionResponse":{ "type":"structure", "members":{ + "CreateConnectionStatus":{ + "shape":"ConnectionStatus", + "documentation":"

The status of the connection creation request. The request can take some time for certain authentication types, for example when creating an OAuth connection with token exchange over VPC.

" + } } }, "CreateCrawlerRequest":{ @@ -7134,6 +7542,10 @@ "shape":"DataQualityTargetTable", "documentation":"

A target table associated with the data quality ruleset.

" }, + "DataQualitySecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the security configuration created with the data quality encryption option.

" + }, "ClientToken":{ "shape":"HashString", "documentation":"

Used for idempotency and is recommended to be set to a random ID (such as a UUID) to avoid creating or starting multiple instances of the same resource.

" @@ -7361,6 +7773,10 @@ "shape":"NameString", "documentation":"

The name you assign to this job definition. It must be unique in your account.

" }, + "JobMode":{ + "shape":"JobMode", + "documentation":"

A mode that describes how a job was created. Valid values are:

  • SCRIPT - The job was created using the Glue Studio script editor.

  • VISUAL - The job was created using the Glue Studio visual editor.

  • NOTEBOOK - The job was created using an interactive sessions notebook.

When the JobMode field is missing or null, SCRIPT is assigned as the default value.

" + }, "Description":{ "shape":"DescriptionString", "documentation":"

Description of the job being defined.

" @@ -7405,7 +7821,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

" + "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs.

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" }, "MaxCapacity":{ "shape":"NullableDouble", @@ -7446,6 +7862,10 @@ "SourceControlDetails":{ "shape":"SourceControlDetails", "documentation":"

The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository.

" + }, + "MaintenanceWindow":{ + "shape":"MaintenanceWindow", + "documentation":"

This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.

Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.

" } } }, @@ -8021,6 +8441,40 @@ } } }, + "CreateUsageProfileRequest":{ + "type":"structure", + "required":[ + "Name", + "Configuration" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the usage profile.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the usage profile.

" + }, + "Configuration":{ + "shape":"ProfileConfiguration", + "documentation":"

A ProfileConfiguration object specifying the job and session values for the profile.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

A list of tags applied to the usage profile.

" + } + } + }, + "CreateUsageProfileResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the usage profile that was created.

" + } + } + }, "CreateUserDefinedFunctionRequest":{ "type":"structure", "required":[ @@ -8276,6 +8730,13 @@ "key":{"shape":"AdditionalOptionKeys"}, "value":{"shape":"GenericString"} }, + "DQCompositeRuleEvaluationMethod":{ + "type":"string", + "enum":[ + "COLUMN", + "ROW" + ] + }, "DQDLAliases":{ "type":"map", "key":{"shape":"NodeName"}, @@ -8416,6 +8877,10 @@ "ResultsS3Prefix":{ "shape":"UriString", "documentation":"

Prefix for Amazon S3 to store results.

" + }, + "CompositeRuleEvaluationMethod":{ + "shape":"DQCompositeRuleEvaluationMethod", + "documentation":"

Set the evaluation method for composite rules in the ruleset to ROW/COLUMN

" } }, "documentation":"

Additional run options you can specify for an evaluation run.

" @@ -8442,6 +8907,14 @@ }, "documentation":"

Describes the data quality metric value according to the analysis of historical data.

" }, + "DataQualityModelStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "SUCCEEDED", + "FAILED" + ] + }, "DataQualityObservation":{ "type":"structure", "members":{ @@ -8460,7 +8933,8 @@ "type":"string", "max":2048, "min":0, - "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", + "sensitive":true }, "DataQualityObservations":{ "type":"list", @@ -8475,6 +8949,10 @@ "shape":"HashString", "documentation":"

A unique result ID for the data quality result.

" }, + "ProfileId":{ + "shape":"HashString", + "documentation":"

The Profile ID for the data quality result.

" + }, "Score":{ "shape":"GenericBoundedDouble", "documentation":"

An aggregate data quality score. Represents the ratio of rules that passed to the total number of rules.

" @@ -8665,6 +9143,10 @@ "EvaluatedMetrics":{ "shape":"EvaluatedMetricsMap", "documentation":"

A map of metrics associated with the evaluation of the rule.

" + }, + "EvaluatedRule":{ + "shape":"DataQualityRuleResultDescription", + "documentation":"

The evaluated rule.

" } }, "documentation":"

Describes the result of the evaluation of a data quality rule.

" @@ -8673,7 +9155,8 @@ "type":"string", "max":2048, "min":0, - "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", + "sensitive":true }, "DataQualityRuleResultStatus":{ "type":"string", @@ -8892,6 +9375,14 @@ }, "documentation":"

The Database object represents a logical grouping of tables that might reside in a Hive metastore or an RDBMS.

" }, + "DatabaseAttributes":{ + "type":"string", + "enum":["NAME"] + }, + "DatabaseAttributesList":{ + "type":"list", + "member":{"shape":"DatabaseAttributes"} + }, "DatabaseIdentifier":{ "type":"structure", "members":{ @@ -8950,6 +9441,34 @@ "member":{"shape":"Database"} }, "DatabaseName":{"type":"string"}, + "DatabrewCondition":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Z\\_]+$" + }, + "DatabrewConditionValue":{ + "type":"string", + "max":1024 + }, + "DatapointInclusionAnnotation":{ + "type":"structure", + "members":{ + "ProfileId":{ + "shape":"HashString", + "documentation":"

The ID of the data quality profile the statistic belongs to.

" + }, + "StatisticId":{ + "shape":"HashString", + "documentation":"

The Statistic ID.

" + }, + "InclusionAnnotation":{ + "shape":"InclusionAnnotationValue", + "documentation":"

The inclusion annotation value to apply to the statistic.

" + } + }, + "documentation":"

An Inclusion Annotation.

" + }, "Datatype":{ "type":"structure", "required":[ @@ -9603,6 +10122,21 @@ } } }, + "DeleteUsageProfileRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the usage profile to delete.

" + } + } + }, + "DeleteUsageProfileResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteUserDefinedFunctionRequest":{ "type":"structure", "required":[ @@ -10172,11 +10706,11 @@ }, "EnclosedInStringProperty":{ "type":"string", - "pattern":"([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*" + "pattern":"([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*" }, "EnclosedInStringPropertyWithQuote":{ "type":"string", - "pattern":"([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n])*" + "pattern":"([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*" }, "EncryptionAtRest":{ "type":"structure", @@ -10343,7 +10877,8 @@ "EvaluatedMetricsMap":{ "type":"map", "key":{"shape":"NameString"}, - "value":{"shape":"NullableDouble"} + "value":{"shape":"NullableDouble"}, + "sensitive":true }, "EvaluationMetrics":{ "type":"structure", @@ -10473,10 +11008,15 @@ "FederationSourceErrorCode":{ "type":"string", "enum":[ + "AccessDeniedException", + "EntityNotFoundException", + "InvalidCredentialsException", + "InvalidInputException", "InvalidResponseException", "OperationTimeoutException", "OperationNotSupportedException", "InternalServiceException", + "PartialFailureException", "ThrottlingException" ] }, @@ -11251,6 +11791,71 @@ } } }, + "GetDataQualityModelRequest":{ + "type":"structure", + "required":["ProfileId"], + "members":{ + "StatisticId":{ + "shape":"HashString", + "documentation":"

The Statistic ID.

" + }, + "ProfileId":{ + "shape":"HashString", + "documentation":"

The Profile ID.

" + } + } + }, + "GetDataQualityModelResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"DataQualityModelStatus", + "documentation":"

The training status of the data quality model.

" + }, + "StartedOn":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the data quality model training started.

" + }, + "CompletedOn":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the data quality model training completed.

" + }, + "FailureReason":{ + "shape":"HashString", + "documentation":"

The training failure reason.

" + } + } + }, + "GetDataQualityModelResultRequest":{ + "type":"structure", + "required":[ + "StatisticId", + "ProfileId" + ], + "members":{ + "StatisticId":{ + "shape":"HashString", + "documentation":"

The Statistic ID.

" + }, + "ProfileId":{ + "shape":"HashString", + "documentation":"

The Profile ID.

" + } + } + }, + "GetDataQualityModelResultResponse":{ + "type":"structure", + "members":{ + "CompletedOn":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the data quality model training completed.

" + }, + "Model":{ + "shape":"StatisticModelResults", + "documentation":"

A list of StatisticModelResult

" + } + } + }, "GetDataQualityResultRequest":{ "type":"structure", "required":["ResultId"], @@ -11268,6 +11873,10 @@ "shape":"HashString", "documentation":"

A unique result ID for the data quality result.

" }, + "ProfileId":{ + "shape":"HashString", + "documentation":"

The Profile ID for the data quality result.

" + }, "Score":{ "shape":"GenericBoundedDouble", "documentation":"

An aggregate data quality score. Represents the ratio of rules that passed to the total number of rules.

" @@ -11382,6 +11991,10 @@ "CreatedRulesetName":{ "shape":"NameString", "documentation":"

The name of the ruleset that was created by the run.

" + }, + "DataQualitySecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the security configuration created with the data quality encryption option.

" } } }, @@ -11448,7 +12061,7 @@ }, "RulesetNames":{ "shape":"RulesetNames", - "documentation":"

A list of ruleset names for the run.

" + "documentation":"

A list of ruleset names for the run. Currently, this parameter takes only one Ruleset name.

" }, "ResultIds":{ "shape":"DataQualityResultIdList", @@ -11500,6 +12113,10 @@ "RecommendationRunId":{ "shape":"HashString", "documentation":"

When a ruleset was created from a recommendation run, this run ID is generated to link the two together.

" + }, + "DataQualitySecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the security configuration created with the data quality encryption option.

" } } }, @@ -11544,6 +12161,10 @@ "ResourceShareType":{ "shape":"ResourceShareType", "documentation":"

Allows you to specify that you want to list the databases shared with your account. The allowable values are FEDERATED, FOREIGN or ALL.

  • If set to FEDERATED, will list the federated databases (referencing an external entity) shared with your account.

  • If set to FOREIGN, will list the databases shared with your account.

  • If set to ALL, will list the databases shared with your account, as well as the databases in yor local account.

" + }, + "AttributesToGet":{ + "shape":"DatabaseAttributesList", + "documentation":"

Specifies the database fields returned by the GetDatabases call. This parameter doesn’t accept an empty list. The request must include the NAME.

" } } }, @@ -12659,6 +13280,10 @@ "QueryAsOfTime":{ "shape":"Timestamp", "documentation":"

The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

" + }, + "IncludeStatusDetails":{ + "shape":"BooleanNullable", + "documentation":"

Specifies whether to include status details related to a request to create or update an Glue Data Catalog view.

" } } }, @@ -12782,6 +13407,14 @@ "QueryAsOfTime":{ "shape":"Timestamp", "documentation":"

The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

" + }, + "IncludeStatusDetails":{ + "shape":"BooleanNullable", + "documentation":"

Specifies whether to include status details related to a request to create or update an Glue Data Catalog view.

" + }, + "AttributesToGet":{ + "shape":"TableAttributesList", + "documentation":"

Specifies the table fields returned by the GetTables call. This parameter doesn’t accept an empty list. The request must include NAME.

The following are the valid combinations of values:

  • NAME - Names of all tables in the database.

  • NAME, TABLE_TYPE - Names of all tables and the table types.

" } } }, @@ -13095,6 +13728,41 @@ } } }, + "GetUsageProfileRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the usage profile to retrieve.

" + } + } + }, + "GetUsageProfileResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the usage profile.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the usage profile.

" + }, + "Configuration":{ + "shape":"ProfileConfiguration", + "documentation":"

A ProfileConfiguration object specifying the job and session values for the profile.

" + }, + "CreatedOn":{ + "shape":"TimestampValue", + "documentation":"

The date and time when the usage profile was created.

" + }, + "LastModifiedOn":{ + "shape":"TimestampValue", + "documentation":"

The date and time when the usage profile was last modified.

" + } + } + }, "GetUserDefinedFunctionRequest":{ "type":"structure", "required":[ @@ -13704,6 +14372,17 @@ }, "documentation":"

Specifies configuration properties for an importing labels task run.

" }, + "InclusionAnnotationList":{ + "type":"list", + "member":{"shape":"DatapointInclusionAnnotation"} + }, + "InclusionAnnotationValue":{ + "type":"string", + "enum":[ + "INCLUDE", + "EXCLUDE" + ] + }, "Integer":{"type":"integer"}, "IntegerFlag":{ "type":"integer", @@ -13981,6 +14660,10 @@ "shape":"NameString", "documentation":"

The name you assign to this job definition.

" }, + "JobMode":{ + "shape":"JobMode", + "documentation":"

A mode that describes how a job was created. Valid values are:

  • SCRIPT - The job was created using the Glue Studio script editor.

  • VISUAL - The job was created using the Glue Studio visual editor.

  • NOTEBOOK - The job was created using an interactive sessions notebook.

When the JobMode field is missing or null, SCRIPT is assigned as the default value.

" + }, "Description":{ "shape":"DescriptionString", "documentation":"

A description of the job.

" @@ -14033,7 +14716,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

" + "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs.

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" }, "MaxCapacity":{ "shape":"NullableDouble", @@ -14070,6 +14753,14 @@ "SourceControlDetails":{ "shape":"SourceControlDetails", "documentation":"

The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository.

" + }, + "MaintenanceWindow":{ + "shape":"MaintenanceWindow", + "documentation":"

This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.

Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.

" + }, + "ProfileName":{ + "shape":"NameString", + "documentation":"

The name of an Glue usage profile associated with the job.

" } }, "documentation":"

Specifies a job definition.

" @@ -14155,6 +14846,14 @@ "type":"list", "member":{"shape":"Job"} }, + "JobMode":{ + "type":"string", + "enum":[ + "SCRIPT", + "VISUAL", + "NOTEBOOK" + ] + }, "JobName":{"type":"string"}, "JobNameList":{ "type":"list", @@ -14193,6 +14892,10 @@ "shape":"NameString", "documentation":"

The name of the job definition being used in this run.

" }, + "JobMode":{ + "shape":"JobMode", + "documentation":"

A mode that describes how a job was created. Valid values are:

  • SCRIPT - The job was created using the Glue Studio script editor.

  • VISUAL - The job was created using the Glue Studio visual editor.

  • NOTEBOOK - The job was created using an interactive sessions notebook.

When the JobMode field is missing or null, SCRIPT is assigned as the default value.

" + }, "StartedOn":{ "shape":"TimestampValue", "documentation":"

The date and time at which this job run was started.

" @@ -14233,7 +14936,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).

" + "documentation":"

The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" }, "MaxCapacity":{ "shape":"NullableDouble", @@ -14265,11 +14968,19 @@ }, "DPUSeconds":{ "shape":"NullableDouble", - "documentation":"

This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for G.025X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity.

" + "documentation":"

This field can be set for either job runs with execution class FLEX or when Auto Scaling is enabled, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for G.025X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity.

" }, "ExecutionClass":{ "shape":"ExecutionClass", "documentation":"

Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.

The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.

Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set ExecutionClass to FLEX. The flexible execution class is available for Spark jobs.

" + }, + "MaintenanceWindow":{ + "shape":"MaintenanceWindow", + "documentation":"

This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.

Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.

" + }, + "ProfileName":{ + "shape":"NameString", + "documentation":"

The name of an Glue usage profile associated with the job run.

" } }, "documentation":"

Contains information about a job run.

" @@ -14289,12 +15000,17 @@ "FAILED", "TIMEOUT", "ERROR", - "WAITING" + "WAITING", + "EXPIRED" ] }, "JobUpdate":{ "type":"structure", "members":{ + "JobMode":{ + "shape":"JobMode", + "documentation":"

A mode that describes how a job was created. Valid values are:

  • SCRIPT - The job was created using the Glue Studio script editor.

  • VISUAL - The job was created using the Glue Studio visual editor.

  • NOTEBOOK - The job was created using an interactive sessions notebook.

When the JobMode field is missing or null, SCRIPT is assigned as the default value.

" + }, "Description":{ "shape":"DescriptionString", "documentation":"

Description of the job being defined.

" @@ -14339,7 +15055,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

" + "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs.

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" }, "MaxCapacity":{ "shape":"NullableDouble", @@ -14376,6 +15092,10 @@ "SourceControlDetails":{ "shape":"SourceControlDetails", "documentation":"

The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository.

" + }, + "MaintenanceWindow":{ + "shape":"MaintenanceWindow", + "documentation":"

This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.

Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.

" } }, "documentation":"

Specifies information used to update an existing job definition. The previous job definition is completely overwritten by this information.

" @@ -15076,6 +15796,82 @@ } } }, + "ListDataQualityStatisticAnnotationsRequest":{ + "type":"structure", + "members":{ + "StatisticId":{ + "shape":"HashString", + "documentation":"

The Statistic ID.

" + }, + "ProfileId":{ + "shape":"HashString", + "documentation":"

The Profile ID.

" + }, + "TimestampFilter":{ + "shape":"TimestampFilter", + "documentation":"

A timestamp filter.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of results to return in this request.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

A pagination token to retrieve the next set of results.

" + } + } + }, + "ListDataQualityStatisticAnnotationsResponse":{ + "type":"structure", + "members":{ + "Annotations":{ + "shape":"AnnotationList", + "documentation":"

A list of StatisticAnnotation applied to the Statistic

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

A pagination token to retrieve the next set of results.

" + } + } + }, + "ListDataQualityStatisticsRequest":{ + "type":"structure", + "members":{ + "StatisticId":{ + "shape":"HashString", + "documentation":"

The Statistic ID.

" + }, + "ProfileId":{ + "shape":"HashString", + "documentation":"

The Profile ID.

" + }, + "TimestampFilter":{ + "shape":"TimestampFilter", + "documentation":"

A timestamp filter.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of results to return in this request.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

A pagination token to request the next page of results.

" + } + } + }, + "ListDataQualityStatisticsResponse":{ + "type":"structure", + "members":{ + "Statistics":{ + "shape":"StatisticSummaryList", + "documentation":"

A StatisticSummaryList.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

A pagination token to request the next page of results.

" + } + } + }, "ListDevEndpointsRequest":{ "type":"structure", "members":{ @@ -15426,6 +16222,32 @@ } } }, + "ListUsageProfilesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"OrchestrationToken", + "documentation":"

A continuation token, included if this is a continuation call.

" + }, + "MaxResults":{ + "shape":"OrchestrationPageSize200", + "documentation":"

The maximum number of usage profiles to return in a single response.

" + } + } + }, + "ListUsageProfilesResponse":{ + "type":"structure", + "members":{ + "Profiles":{ + "shape":"UsageProfileDefinitionList", + "documentation":"

A list of usage profile (UsageProfileDefinition) objects.

" + }, + "NextToken":{ + "shape":"OrchestrationToken", + "documentation":"

A continuation token, present if the current list segment is not the last.

" + } + } + }, "ListWorkflowsRequest":{ "type":"structure", "members":{ @@ -15535,6 +16357,11 @@ "documentation":"

Defines column statistics supported for integer data columns.

" }, "LongValue":{"type":"long"}, + "LongValueString":{ + "type":"string", + "max":16384, + "min":1 + }, "MLTransform":{ "type":"structure", "members":{ @@ -15650,6 +16477,10 @@ "SSE-KMS" ] }, + "MaintenanceWindow":{ + "type":"string", + "pattern":"^(Sun|Mon|Tue|Wed|Thu|Fri|Sat):([01]?[0-9]|2[0-3])$" + }, "ManyInputs":{ "type":"list", "member":{"shape":"NodeId"}, @@ -15850,6 +16681,10 @@ "shape":"NameString", "documentation":"

The name of the data quality metric used for generating the observation.

" }, + "StatisticId":{ + "shape":"HashString", + "documentation":"

The Statistic ID.

" + }, "MetricValues":{ "shape":"DataQualityMetricValues", "documentation":"

An object of type DataQualityMetricValues representing the analysis of the data quality metric value.

" @@ -16055,7 +16890,7 @@ }, "NodeName":{ "type":"string", - "pattern":"([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*" + "pattern":"([^\\r\\n])*" }, "NodeType":{ "type":"string", @@ -16154,6 +16989,76 @@ "type":"string", "box":true }, + "OAuth2ClientApplication":{ + "type":"structure", + "members":{ + "UserManagedClientApplicationClientId":{ + "shape":"UserManagedClientApplicationClientId", + "documentation":"

The client application clientID if the ClientAppType is USER_MANAGED.

" + }, + "AWSManagedClientApplicationReference":{ + "shape":"AWSManagedClientApplicationReference", + "documentation":"

The reference to the SaaS-side client app that is Amazon Web Services managed.

" + } + }, + "documentation":"

The OAuth2 client app used for the connection.

" + }, + "OAuth2GrantType":{ + "type":"string", + "enum":[ + "AUTHORIZATION_CODE", + "CLIENT_CREDENTIALS", + "JWT_BEARER" + ] + }, + "OAuth2Properties":{ + "type":"structure", + "members":{ + "OAuth2GrantType":{ + "shape":"OAuth2GrantType", + "documentation":"

The OAuth2 grant type. For example, AUTHORIZATION_CODE, JWT_BEARER, or CLIENT_CREDENTIALS.

" + }, + "OAuth2ClientApplication":{ + "shape":"OAuth2ClientApplication", + "documentation":"

The client application type. For example, AWS_MANAGED or USER_MANAGED.

" + }, + "TokenUrl":{ + "shape":"TokenUrl", + "documentation":"

The URL of the provider's authentication server, to exchange an authorization code for an access token.

" + }, + "TokenUrlParametersMap":{ + "shape":"TokenUrlParametersMap", + "documentation":"

A map of parameters that are added to the token GET request.

" + } + }, + "documentation":"

A structure containing properties for OAuth2 authentication.

" + }, + "OAuth2PropertiesInput":{ + "type":"structure", + "members":{ + "OAuth2GrantType":{ + "shape":"OAuth2GrantType", + "documentation":"

The OAuth2 grant type in the CreateConnection request. For example, AUTHORIZATION_CODE, JWT_BEARER, or CLIENT_CREDENTIALS.

" + }, + "OAuth2ClientApplication":{ + "shape":"OAuth2ClientApplication", + "documentation":"

The client application type in the CreateConnection request. For example, AWS_MANAGED or USER_MANAGED.

" + }, + "TokenUrl":{ + "shape":"TokenUrl", + "documentation":"

The URL of the provider's authentication server, to exchange an authorization code for an access token.

" + }, + "TokenUrlParametersMap":{ + "shape":"TokenUrlParametersMap", + "documentation":"

A map of parameters that are added to the token GET request.

" + }, + "AuthorizationCodeProperties":{ + "shape":"AuthorizationCodeProperties", + "documentation":"

The set of properties required for the the OAuth2 AUTHORIZATION_CODE grant type.

" + } + }, + "documentation":"

A structure containing properties for OAuth2 in the CreateConnection request.

" + }, "OneInput":{ "type":"list", "member":{"shape":"NodeId"}, @@ -16170,8 +17075,25 @@ }, "documentation":"

A structure representing an open format table.

" }, - "OperationTimeoutException":{ - "type":"structure", + "Operation":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Z\\_]+$" + }, + "OperationNotSupportedException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

The operation is not available in the region.

", + "exception":true + }, + "OperationTimeoutException":{ + "type":"structure", "members":{ "Message":{ "shape":"MessageString", @@ -16418,6 +17340,22 @@ "null" ] }, + "ParameterMap":{ + "type":"map", + "key":{"shape":"ParameterName"}, + "value":{"shape":"ParameterValue"} + }, + "ParameterName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Za-z0-9]+$" + }, + "ParameterValue":{ + "type":"string", + "max":32768, + "min":1 + }, "ParametersMap":{ "type":"map", "key":{"shape":"KeyString"}, @@ -16669,10 +17607,10 @@ }, "AvailabilityZone":{ "shape":"NameString", - "documentation":"

The connection's Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.

" + "documentation":"

The connection's Availability Zone.

" } }, - "documentation":"

Specifies the physical requirements for a connection.

" + "documentation":"

The OAuth client app in GetConnection response.

" }, "PiiType":{ "type":"string", @@ -16822,6 +17760,20 @@ "max":1, "min":0 }, + "ProfileConfiguration":{ + "type":"structure", + "members":{ + "SessionConfiguration":{ + "shape":"ConfigurationMap", + "documentation":"

A key-value map of configuration parameters for Glue sessions.

" + }, + "JobConfiguration":{ + "shape":"ConfigurationMap", + "documentation":"

A key-value map of configuration parameters for Glue jobs.

" + } + }, + "documentation":"

Specifies the job and session values that an admin configures in an Glue usage profile.

" + }, "PropertyPredicate":{ "type":"structure", "members":{ @@ -16864,6 +17816,29 @@ "members":{ } }, + "PutDataQualityProfileAnnotationRequest":{ + "type":"structure", + "required":[ + "ProfileId", + "InclusionAnnotation" + ], + "members":{ + "ProfileId":{ + "shape":"HashString", + "documentation":"

The ID of the data quality monitoring profile to annotate.

" + }, + "InclusionAnnotation":{ + "shape":"InclusionAnnotationValue", + "documentation":"

The inclusion annotation value to apply to the profile.

" + } + } + }, + "PutDataQualityProfileAnnotationResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

Left blank.

" + }, "PutResourcePolicyRequest":{ "type":"structure", "required":["PolicyInJson"], @@ -17080,8 +18055,7 @@ "type":"structure", "required":[ "Name", - "Inputs", - "RecipeReference" + "Inputs" ], "members":{ "Name":{ @@ -17095,10 +18069,29 @@ "RecipeReference":{ "shape":"RecipeReference", "documentation":"

A reference to the DataBrew recipe used by the node.

" + }, + "RecipeSteps":{ + "shape":"RecipeSteps", + "documentation":"

Transform steps used in the recipe node.

" } }, "documentation":"

A Glue Studio node that uses a Glue DataBrew recipe in Glue jobs.

" }, + "RecipeAction":{ + "type":"structure", + "required":["Operation"], + "members":{ + "Operation":{ + "shape":"Operation", + "documentation":"

The operation of the recipe action.

" + }, + "Parameters":{ + "shape":"ParameterMap", + "documentation":"

The parameters of the recipe action.

" + } + }, + "documentation":"

Actions defined in the Glue Studio data preparation recipe node.

" + }, "RecipeReference":{ "type":"structure", "required":[ @@ -17117,6 +18110,25 @@ }, "documentation":"

A reference to a Glue DataBrew recipe.

" }, + "RecipeStep":{ + "type":"structure", + "required":["Action"], + "members":{ + "Action":{ + "shape":"RecipeAction", + "documentation":"

The transformation action of the recipe step.

" + }, + "ConditionExpressions":{ + "shape":"ConditionExpressionList", + "documentation":"

The condition expressions for the recipe step.

" + } + }, + "documentation":"

A recipe step used in a Glue Studio data preparation recipe node.

" + }, + "RecipeSteps":{ + "type":"list", + "member":{"shape":"RecipeStep"} + }, "RecipeVersion":{ "type":"string", "max":16, @@ -17144,6 +18156,11 @@ }, "documentation":"

When crawling an Amazon S3 data source after the first crawl is complete, specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run. For more information, see Incremental Crawls in Glue in the developer guide.

" }, + "RedirectUri":{ + "type":"string", + "max":512, + "pattern":"^(https?):\\/\\/[^\\s/$.?#].[^\\s]*$" + }, "RedshiftSource":{ "type":"structure", "required":[ @@ -17215,6 +18232,10 @@ }, "documentation":"

Specifies a target that uses Amazon Redshift.

" }, + "ReferenceDatasetsList":{ + "type":"list", + "member":{"shape":"NameString"} + }, "RegisterSchemaVersionInput":{ "type":"structure", "required":[ @@ -17438,6 +18459,13 @@ } } }, + "ResourceAction":{ + "type":"string", + "enum":[ + "UPDATE", + "CREATE" + ] + }, "ResourceNotReadyException":{ "type":"structure", "members":{ @@ -17468,6 +18496,16 @@ "FEDERATED" ] }, + "ResourceState":{ + "type":"string", + "enum":[ + "QUEUED", + "IN_PROGRESS", + "SUCCESS", + "STOPPED", + "FAILED" + ] + }, "ResourceType":{ "type":"string", "enum":[ @@ -17545,6 +18583,20 @@ "min":1 }, "RunId":{"type":"string"}, + "RunIdentifier":{ + "type":"structure", + "members":{ + "RunId":{ + "shape":"HashString", + "documentation":"

The Run ID.

" + }, + "JobRunId":{ + "shape":"HashString", + "documentation":"

The Job Run ID.

" + } + }, + "documentation":"

A run identifier.

" + }, "RunMetrics":{ "type":"structure", "members":{ @@ -18659,6 +19711,10 @@ "ResourceShareType":{ "shape":"ResourceShareType", "documentation":"

Allows you to specify that you want to search the tables shared with your account. The allowable values are FOREIGN or ALL.

  • If set to FOREIGN, will search the tables shared with your account.

  • If set to ALL, will search the tables shared with your account, as well as the tables in yor local account.

" + }, + "IncludeStatusDetails":{ + "shape":"BooleanNullable", + "documentation":"

Specifies whether to include status details related to a request to create or update an Glue Data Catalog view.

" } } }, @@ -18675,6 +19731,10 @@ } } }, + "SecretArn":{ + "type":"string", + "pattern":"^arn:aws(-(cn|us-gov|iso(-[bef])?))?:secretsmanager:.*$" + }, "SecurityConfiguration":{ "type":"structure", "members":{ @@ -18873,6 +19933,10 @@ "IdleTimeout":{ "shape":"IdleTimeout", "documentation":"

The number of minutes when idle before the session times out.

" + }, + "ProfileName":{ + "shape":"NameString", + "documentation":"

The name of an Glue usage profile associated with the session.

" } }, "documentation":"

The period in which a remote Spark runtime environment is running.

" @@ -19473,6 +20537,10 @@ "shape":"NameString", "documentation":"

A name for the ruleset.

" }, + "DataQualitySecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the security configuration created with the data quality encryption option.

" + }, "ClientToken":{ "shape":"HashString", "documentation":"

Used for idempotency and is recommended to be set to a random ID (such as a UUID) to avoid creating or starting multiple instances of the same resource.

" @@ -19619,7 +20687,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).

" + "documentation":"

The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" }, "MaxCapacity":{ "shape":"NullableDouble", @@ -19855,6 +20923,151 @@ "ERROR" ] }, + "StatisticAnnotation":{ + "type":"structure", + "members":{ + "ProfileId":{ + "shape":"HashString", + "documentation":"

The Profile ID.

" + }, + "StatisticId":{ + "shape":"HashString", + "documentation":"

The Statistic ID.

" + }, + "StatisticRecordedOn":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the annotated statistic was recorded.

" + }, + "InclusionAnnotation":{ + "shape":"TimestampedInclusionAnnotation", + "documentation":"

The inclusion annotation applied to the statistic.

" + } + }, + "documentation":"

A Statistic Annotation.

" + }, + "StatisticEvaluationLevel":{ + "type":"string", + "enum":[ + "Dataset", + "Column", + "Multicolumn" + ] + }, + "StatisticModelResult":{ + "type":"structure", + "members":{ + "LowerBound":{ + "shape":"NullableDouble", + "documentation":"

The lower bound.

" + }, + "UpperBound":{ + "shape":"NullableDouble", + "documentation":"

The upper bound.

" + }, + "PredictedValue":{ + "shape":"NullableDouble", + "documentation":"

The predicted value.

" + }, + "ActualValue":{ + "shape":"NullableDouble", + "documentation":"

The actual value.

" + }, + "Date":{ + "shape":"Timestamp", + "documentation":"

The date.

" + }, + "InclusionAnnotation":{ + "shape":"InclusionAnnotationValue", + "documentation":"

The inclusion annotation.

" + } + }, + "documentation":"

The statistic model result.

" + }, + "StatisticModelResults":{ + "type":"list", + "member":{"shape":"StatisticModelResult"} + }, + "StatisticNameString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[A-Z][A-Za-z\\.]+" + }, + "StatisticPropertiesMap":{ + "type":"map", + "key":{"shape":"NameString"}, + "value":{"shape":"DescriptionString"}, + "sensitive":true + }, + "StatisticSummary":{ + "type":"structure", + "members":{ + "StatisticId":{ + "shape":"HashString", + "documentation":"

The Statistic ID.

" + }, + "ProfileId":{ + "shape":"HashString", + "documentation":"

The Profile ID.

" + }, + "RunIdentifier":{ + "shape":"RunIdentifier", + "documentation":"

The Run Identifier

" + }, + "StatisticName":{ + "shape":"StatisticNameString", + "documentation":"

The name of the statistic.

" + }, + "DoubleValue":{ + "shape":"double", + "documentation":"

The value of the statistic.

" + }, + "EvaluationLevel":{ + "shape":"StatisticEvaluationLevel", + "documentation":"

The evaluation level of the statistic. Possible values: Dataset, Column, Multicolumn.

" + }, + "ColumnsReferenced":{ + "shape":"ColumnNameList", + "documentation":"

The list of columns referenced by the statistic.

" + }, + "ReferencedDatasets":{ + "shape":"ReferenceDatasetsList", + "documentation":"

The list of datasets referenced by the statistic.

" + }, + "StatisticProperties":{ + "shape":"StatisticPropertiesMap", + "documentation":"

A StatisticPropertiesMap, which contains a NameString and DescriptionString

" + }, + "RecordedOn":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the statistic was recorded.

" + }, + "InclusionAnnotation":{ + "shape":"TimestampedInclusionAnnotation", + "documentation":"

The inclusion annotation for the statistic.

" + } + }, + "documentation":"

Summary information about a statistic.

" + }, + "StatisticSummaryList":{ + "type":"list", + "member":{"shape":"StatisticSummary"}, + "documentation":"

A list of StatisticSummary.

" + }, + "StatusDetails":{ + "type":"structure", + "members":{ + "RequestedChange":{ + "shape":"Table", + "documentation":"

A Table object representing the requested changes.

" + }, + "ViewValidations":{ + "shape":"ViewValidationList", + "documentation":"

A list of ViewValidation objects that contain information for an analytical engine to validate a view.

" + } + }, + "documentation":"

A structure containing information about an asynchronous change to a table.

" + }, "StopColumnStatisticsTaskRunRequest":{ "type":"structure", "required":[ @@ -20188,10 +21401,22 @@ "IsMultiDialectView":{ "shape":"NullableBoolean", "documentation":"

Specifies whether the view supports the SQL dialects of one or more different query engines and can therefore be read by those engines.

" - } + }, + "Status":{"shape":"TableStatus"} }, "documentation":"

Represents a collection of related data organized in columns and rows.

" }, + "TableAttributes":{ + "type":"string", + "enum":[ + "NAME", + "TABLE_TYPE" + ] + }, + "TableAttributesList":{ + "type":"list", + "member":{"shape":"TableAttributes"} + }, "TableError":{ "type":"structure", "members":{ @@ -20287,6 +21512,10 @@ "TargetTable":{ "shape":"TableIdentifier", "documentation":"

A TableIdentifier structure that describes a target table for resource linking.

" + }, + "ViewDefinition":{ + "shape":"ViewDefinitionInput", + "documentation":"

A structure that contains all the information that defines the view, including the dialect or dialects for the view, and the query.

" } }, "documentation":"

A structure used to define a table.

" @@ -20377,6 +21606,44 @@ "max":128, "min":0 }, + "TableStatus":{ + "type":"structure", + "members":{ + "RequestedBy":{ + "shape":"NameString", + "documentation":"

The ARN of the user who requested the asynchronous change.

" + }, + "UpdatedBy":{ + "shape":"NameString", + "documentation":"

The ARN of the user to last manually alter the asynchronous change (requesting cancellation, etc).

" + }, + "RequestTime":{ + "shape":"Timestamp", + "documentation":"

An ISO 8601 formatted date string indicating the time that the change was initiated.

" + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

An ISO 8601 formatted date string indicating the time that the state was last updated.

" + }, + "Action":{ + "shape":"ResourceAction", + "documentation":"

Indicates which action was called on the table, currently only CREATE or UPDATE.

" + }, + "State":{ + "shape":"ResourceState", + "documentation":"

A generic status for the change in progress, such as QUEUED, IN_PROGRESS, SUCCESS, or FAILED.

" + }, + "Error":{ + "shape":"ErrorDetail", + "documentation":"

An error that will only appear when the state is \"FAILED\". This is a parent level exception message, there may be different Errors for each dialect.

" + }, + "Details":{ + "shape":"StatusDetails", + "documentation":"

A StatusDetails object with information about the requested change.

" + } + }, + "documentation":"

A structure containing information about the state of an asynchronous change to a table.

" + }, "TableTypeString":{ "type":"string", "max":255 @@ -20462,6 +21729,11 @@ "max":50, "min":0 }, + "TargetColumn":{ + "type":"string", + "max":1024, + "min":1 + }, "TargetFormat":{ "type":"string", "enum":[ @@ -20626,8 +21898,56 @@ "min":1 }, "Timestamp":{"type":"timestamp"}, + "TimestampFilter":{ + "type":"structure", + "members":{ + "RecordedBefore":{ + "shape":"Timestamp", + "documentation":"

The timestamp before which statistics should be included in the results.

" + }, + "RecordedAfter":{ + "shape":"Timestamp", + "documentation":"

The timestamp after which statistics should be included in the results.

" + } + }, + "documentation":"

A timestamp filter.

" + }, "TimestampValue":{"type":"timestamp"}, + "TimestampedInclusionAnnotation":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"InclusionAnnotationValue", + "documentation":"

The inclusion annotation value.

" + }, + "LastModifiedOn":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the inclusion annotation was last modified.

" + } + }, + "documentation":"

A timestamped inclusion annotation.

" + }, "Token":{"type":"string"}, + "TokenUrl":{ + "type":"string", + "max":256, + "pattern":"^(https?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]" + }, + "TokenUrlParameterKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TokenUrlParameterValue":{ + "type":"string", + "max":512, + "min":1 + }, + "TokenUrlParametersMap":{ + "type":"map", + "key":{"shape":"TokenUrlParameterKey"}, + "value":{"shape":"TokenUrlParameterValue"} + }, "Topk":{ "type":"integer", "box":true, @@ -21828,6 +23148,14 @@ "VersionId":{ "shape":"VersionString", "documentation":"

The version ID at which to update the table contents.

" + }, + "ViewUpdateAction":{ + "shape":"ViewUpdateAction", + "documentation":"

The operation to be performed when updating the view.

" + }, + "Force":{ + "shape":"Boolean", + "documentation":"

A flag that can be set to true to ignore matching storage descriptor and subobject matching requirements.

" } } }, @@ -21862,6 +23190,36 @@ } } }, + "UpdateUsageProfileRequest":{ + "type":"structure", + "required":[ + "Name", + "Configuration" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the usage profile.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the usage profile.

" + }, + "Configuration":{ + "shape":"ProfileConfiguration", + "documentation":"

A ProfileConfiguration object specifying the job and session values for the profile.

" + } + } + }, + "UpdateUsageProfileResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the usage profile that was updated.

" + } + } + }, "UpdateUserDefinedFunctionRequest":{ "type":"structure", "required":[ @@ -21963,6 +23321,32 @@ "documentation":"

The options to configure an upsert operation when writing to a Redshift target .

" }, "UriString":{"type":"string"}, + "UsageProfileDefinition":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the usage profile.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the usage profile.

" + }, + "CreatedOn":{ + "shape":"TimestampValue", + "documentation":"

The date and time when the usage profile was created.

" + }, + "LastModifiedOn":{ + "shape":"TimestampValue", + "documentation":"

The date and time when the usage profile was last modified.

" + } + }, + "documentation":"

Describes an Glue usage profile.

" + }, + "UsageProfileDefinitionList":{ + "type":"list", + "member":{"shape":"UsageProfileDefinition"} + }, "UserDefinedFunction":{ "type":"structure", "members":{ @@ -22031,6 +23415,11 @@ "type":"list", "member":{"shape":"UserDefinedFunction"} }, + "UserManagedClientApplicationClientId":{ + "type":"string", + "max":2048, + "pattern":"\\S+" + }, "ValidationException":{ "type":"structure", "members":{ @@ -22101,6 +23490,28 @@ }, "documentation":"

A structure containing details for representations.

" }, + "ViewDefinitionInput":{ + "type":"structure", + "members":{ + "IsProtected":{ + "shape":"NullableBoolean", + "documentation":"

You can set this flag as true to instruct the engine not to push user-provided operations into the logical plan of the view during query planning. However, setting this flag does not guarantee that the engine will comply. Refer to the engine's documentation to understand the guarantees provided, if any.

" + }, + "Definer":{ + "shape":"ArnString", + "documentation":"

The definer of a view in SQL.

" + }, + "Representations":{ + "shape":"ViewRepresentationInputList", + "documentation":"

A list of structures that contains the dialect of the view, and the query that defines the view.

" + }, + "SubObjects":{ + "shape":"ViewSubObjectsList", + "documentation":"

A list of base table ARNs that make up the view.

" + } + }, + "documentation":"

A structure containing details for creating or updating an Glue view.

" + }, "ViewDialect":{ "type":"string", "enum":[ @@ -22131,7 +23542,11 @@ }, "ViewExpandedText":{ "shape":"ViewTextString", - "documentation":"

The expanded SQL for the view. This SQL is used by engines while processing a query on a view. Engines may perform operations during view creation to transform ViewOriginalText to ViewExpandedText. For example:

  • Fully qualify identifiers: SELECT * from table1 → SELECT * from db1.table1

" + "documentation":"

The expanded SQL for the view. This SQL is used by engines while processing a query on a view. Engines may perform operations during view creation to transform ViewOriginalText to ViewExpandedText. For example:

  • Fully qualified identifiers: SELECT * from table1 -> SELECT * from db1.table1

" + }, + "ValidationConnection":{ + "shape":"NameString", + "documentation":"

The name of the connection to be used to validate the specific representation of the view.

" }, "IsStale":{ "shape":"NullableBoolean", @@ -22140,6 +23555,38 @@ }, "documentation":"

A structure that contains the dialect of the view, and the query that defines the view.

" }, + "ViewRepresentationInput":{ + "type":"structure", + "members":{ + "Dialect":{ + "shape":"ViewDialect", + "documentation":"

A parameter that specifies the engine type of a specific representation.

" + }, + "DialectVersion":{ + "shape":"ViewDialectVersionString", + "documentation":"

A parameter that specifies the version of the engine of a specific representation.

" + }, + "ViewOriginalText":{ + "shape":"ViewTextString", + "documentation":"

A string that represents the original SQL query that describes the view.

" + }, + "ValidationConnection":{ + "shape":"NameString", + "documentation":"

The name of the connection to be used to validate the specific representation of the view.

" + }, + "ViewExpandedText":{ + "shape":"ViewTextString", + "documentation":"

A string that represents the SQL query that describes the view with expanded resource ARNs

" + } + }, + "documentation":"

A structure containing details of a representation to update or create a Lake Formation view.

" + }, + "ViewRepresentationInputList":{ + "type":"list", + "member":{"shape":"ViewRepresentationInput"}, + "max":10, + "min":1 + }, "ViewRepresentationList":{ "type":"list", "member":{"shape":"ViewRepresentation"}, @@ -22156,6 +23603,49 @@ "type":"string", "max":409600 }, + "ViewUpdateAction":{ + "type":"string", + "enum":[ + "ADD", + "REPLACE", + "ADD_OR_REPLACE", + "DROP" + ] + }, + "ViewValidation":{ + "type":"structure", + "members":{ + "Dialect":{ + "shape":"ViewDialect", + "documentation":"

The dialect of the query engine.

" + }, + "DialectVersion":{ + "shape":"ViewDialectVersionString", + "documentation":"

The version of the dialect of the query engine. For example, 3.0.0.

" + }, + "ViewValidationText":{ + "shape":"ViewTextString", + "documentation":"

The SELECT query that defines the view, as provided by the customer.

" + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

The time of the last update.

" + }, + "State":{ + "shape":"ResourceState", + "documentation":"

The state of the validation.

" + }, + "Error":{ + "shape":"ErrorDetail", + "documentation":"

An error associated with the validation.

" + } + }, + "documentation":"

A structure that contains information for an analytical engine to validate a view, prior to persisting the view metadata. Used in the case of direct UpdateTable or CreateTable API calls.

" + }, + "ViewValidationList":{ + "type":"list", + "member":{"shape":"ViewValidation"} + }, "WorkerType":{ "type":"string", "enum":[ @@ -22383,6 +23873,7 @@ "type":"string", "min":1 }, + "double":{"type":"double"}, "tableNameString":{ "type":"string", "min":1 diff --git a/botocore/data/grafana/2020-08-18/paginators-1.json b/botocore/data/grafana/2020-08-18/paginators-1.json index e9ad47952e..55d05f2996 100644 --- a/botocore/data/grafana/2020-08-18/paginators-1.json +++ b/botocore/data/grafana/2020-08-18/paginators-1.json @@ -17,6 +17,18 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "grafanaVersions" + }, + "ListWorkspaceServiceAccountTokens": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "serviceAccountTokens" + }, + "ListWorkspaceServiceAccounts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "serviceAccounts" } } } diff --git a/botocore/data/grafana/2020-08-18/paginators-1.sdk-extras.json b/botocore/data/grafana/2020-08-18/paginators-1.sdk-extras.json new file mode 100644 index 0000000000..421e2f65a8 --- /dev/null +++ b/botocore/data/grafana/2020-08-18/paginators-1.sdk-extras.json @@ -0,0 +1,18 @@ +{ + "version": 1.0, + "merge": { + "pagination": { + "ListWorkspaceServiceAccounts": { + "non_aggregate_keys": [ + "workspaceId" + ] + }, + "ListWorkspaceServiceAccountTokens": { + "non_aggregate_keys": [ + "serviceAccountId", + "workspaceId" + ] + } + } + } +} diff --git a/botocore/data/grafana/2020-08-18/service-2.json b/botocore/data/grafana/2020-08-18/service-2.json index 336142eee5..738bc1099c 100644 --- a/botocore/data/grafana/2020-08-18/service-2.json +++ b/botocore/data/grafana/2020-08-18/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"grafana", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Managed Grafana", "serviceId":"grafana", "signatureVersion":"v4", @@ -28,7 +29,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Assigns a Grafana Enterprise license to a workspace. Upgrading to Grafana Enterprise incurs additional fees. For more information, see Upgrade a workspace to Grafana Enterprise.

" + "documentation":"

Assigns a Grafana Enterprise license to a workspace. To upgrade, you must use ENTERPRISE for the licenseType, and pass in a valid Grafana Labs token for the grafanaToken. Upgrading to Grafana Enterprise incurs additional fees. For more information, see Upgrade a workspace to Grafana Enterprise.

" }, "CreateWorkspace":{ "name":"CreateWorkspace", @@ -68,7 +69,47 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates a Grafana API key for the workspace. This key can be used to authenticate requests sent to the workspace's HTTP API. See https://docs.aws.amazon.com/grafana/latest/userguide/Using-Grafana-APIs.html for available APIs and example requests.

" + "documentation":"

Creates a Grafana API key for the workspace. This key can be used to authenticate requests sent to the workspace's HTTP API. See https://docs.aws.amazon.com/grafana/latest/userguide/Using-Grafana-APIs.html for available APIs and example requests.

In workspaces compatible with Grafana version 9 or above, use workspace service accounts instead of API keys. API keys will be removed in a future release.

" + }, + "CreateWorkspaceServiceAccount":{ + "name":"CreateWorkspaceServiceAccount", + "http":{ + "method":"POST", + "requestUri":"/workspaces/{workspaceId}/serviceaccounts", + "responseCode":200 + }, + "input":{"shape":"CreateWorkspaceServiceAccountRequest"}, + "output":{"shape":"CreateWorkspaceServiceAccountResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a service account for the workspace. A service account can be used to call Grafana HTTP APIs, and run automated workloads. After creating the service account with the correct GrafanaRole for your use case, use CreateWorkspaceServiceAccountToken to create a token that can be used to authenticate and authorize Grafana HTTP API calls.

You can only create service accounts for workspaces that are compatible with Grafana version 9 and above.

For more information about service accounts, see Service accounts in the Amazon Managed Grafana User Guide.

For more information about the Grafana HTTP APIs, see Using Grafana HTTP APIs in the Amazon Managed Grafana User Guide.

" + }, + "CreateWorkspaceServiceAccountToken":{ + "name":"CreateWorkspaceServiceAccountToken", + "http":{ + "method":"POST", + "requestUri":"/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}/tokens", + "responseCode":200 + }, + "input":{"shape":"CreateWorkspaceServiceAccountTokenRequest"}, + "output":{"shape":"CreateWorkspaceServiceAccountTokenResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a token that can be used to authenticate and authorize Grafana HTTP API operations for the given workspace service account. The service account acts as a user for the API operations, and defines the permissions that are used by the API.

When you create the service account token, you will receive a key that is used when calling Grafana APIs. Do not lose this key, as it will not be retrievable again.

If you do lose the key, you can delete the token and recreate it to receive a new key. This will disable the initial key.

Service accounts are only available for workspaces that are compatible with Grafana version 9 and above.

" }, "DeleteWorkspace":{ "name":"DeleteWorkspace", @@ -107,7 +148,45 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes a Grafana API key for the workspace.

" + "documentation":"

Deletes a Grafana API key for the workspace.

In workspaces compatible with Grafana version 9 or above, use workspace service accounts instead of API keys. API keys will be removed in a future release.

" + }, + "DeleteWorkspaceServiceAccount":{ + "name":"DeleteWorkspaceServiceAccount", + "http":{ + "method":"DELETE", + "requestUri":"/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}", + "responseCode":200 + }, + "input":{"shape":"DeleteWorkspaceServiceAccountRequest"}, + "output":{"shape":"DeleteWorkspaceServiceAccountResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes a workspace service account from the workspace.

This will delete any tokens created for the service account, as well. If the tokens are currently in use, the will fail to authenticate / authorize after they are deleted.

Service accounts are only available for workspaces that are compatible with Grafana version 9 and above.

" + }, + "DeleteWorkspaceServiceAccountToken":{ + "name":"DeleteWorkspaceServiceAccountToken", + "http":{ + "method":"DELETE", + "requestUri":"/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}/tokens/{tokenId}", + "responseCode":200 + }, + "input":{"shape":"DeleteWorkspaceServiceAccountTokenRequest"}, + "output":{"shape":"DeleteWorkspaceServiceAccountTokenResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes a token for the workspace service account.

This will disable the key associated with the token. If any automation is currently using the key, it will no longer be authenticated or authorized to perform actions with the Grafana HTTP APIs.

Service accounts are only available for workspaces that are compatible with Grafana version 9 and above.

" }, "DescribeWorkspace":{ "name":"DescribeWorkspace", @@ -139,6 +218,7 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} @@ -234,6 +314,44 @@ ], "documentation":"

Lists available versions of Grafana. These are available when calling CreateWorkspace. Optionally, include a workspace to list the versions to which it can be upgraded.

" }, + "ListWorkspaceServiceAccountTokens":{ + "name":"ListWorkspaceServiceAccountTokens", + "http":{ + "method":"GET", + "requestUri":"/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}/tokens", + "responseCode":200 + }, + "input":{"shape":"ListWorkspaceServiceAccountTokensRequest"}, + "output":{"shape":"ListWorkspaceServiceAccountTokensResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns a list of tokens for a workspace service account.

This does not return the key for each token. You cannot access keys after they are created. To create a new key, delete the token and recreate it.

Service accounts are only available for workspaces that are compatible with Grafana version 9 and above.

" + }, + "ListWorkspaceServiceAccounts":{ + "name":"ListWorkspaceServiceAccounts", + "http":{ + "method":"GET", + "requestUri":"/workspaces/{workspaceId}/serviceaccounts", + "responseCode":200 + }, + "input":{"shape":"ListWorkspaceServiceAccountsRequest"}, + "output":{"shape":"ListWorkspaceServiceAccountsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns a list of service accounts for a workspace.

Service accounts are only available for workspaces that are compatible with Grafana version 9 and above.

" + }, "ListWorkspaces":{ "name":"ListWorkspaces", "http":{ @@ -446,7 +564,7 @@ "members":{ "grafanaToken":{ "shape":"GrafanaToken", - "documentation":"

A token from Grafana Labs that ties your Amazon Web Services account with a Grafana Labs account. For more information, see Register with Grafana Labs.

", + "documentation":"

A token from Grafana Labs that ties your Amazon Web Services account with a Grafana Labs account. For more information, see Link your account with Grafana Labs.

", "location":"header", "locationName":"Grafana-Token" }, @@ -580,7 +698,7 @@ }, "keyRole":{ "shape":"String", - "documentation":"

Specifies the permission level of the key.

Valid values: VIEWER|EDITOR|ADMIN

" + "documentation":"

Specifies the permission level of the key.

Valid values: ADMIN|EDITOR|VIEWER

" }, "secondsToLive":{ "shape":"CreateWorkspaceApiKeyRequestSecondsToLiveInteger", @@ -650,7 +768,7 @@ }, "grafanaVersion":{ "shape":"GrafanaVersion", - "documentation":"

Specifies the version of Grafana to support in the new workspace. If not specified, defaults to the latest version (for example, 9.4).

To get a list of supported versions, use the ListVersions operation.

" + "documentation":"

Specifies the version of Grafana to support in the new workspace. If not specified, defaults to the latest version (for example, 10.4).

To get a list of supported versions, use the ListVersions operation.

" }, "networkAccessControl":{ "shape":"NetworkAccessConfiguration", @@ -712,6 +830,116 @@ } } }, + "CreateWorkspaceServiceAccountRequest":{ + "type":"structure", + "required":[ + "grafanaRole", + "name", + "workspaceId" + ], + "members":{ + "grafanaRole":{ + "shape":"Role", + "documentation":"

The permission level to use for this service account.

For more information about the roles and the permissions each has, see User roles in the Amazon Managed Grafana User Guide.

" + }, + "name":{ + "shape":"ServiceAccountName", + "documentation":"

A name for the service account. The name must be unique within the workspace, as it determines the ID associated with the service account.

" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace within which to create the service account.

", + "location":"uri", + "locationName":"workspaceId" + } + } + }, + "CreateWorkspaceServiceAccountResponse":{ + "type":"structure", + "required":[ + "grafanaRole", + "id", + "name", + "workspaceId" + ], + "members":{ + "grafanaRole":{ + "shape":"Role", + "documentation":"

The permission level given to the service account.

" + }, + "id":{ + "shape":"String", + "documentation":"

The ID of the service account.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the service account.

" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The workspace with which the service account is associated.

" + } + } + }, + "CreateWorkspaceServiceAccountTokenRequest":{ + "type":"structure", + "required":[ + "name", + "secondsToLive", + "serviceAccountId", + "workspaceId" + ], + "members":{ + "name":{ + "shape":"ServiceAccountTokenName", + "documentation":"

A name for the token to create.

" + }, + "secondsToLive":{ + "shape":"CreateWorkspaceServiceAccountTokenRequestSecondsToLiveInteger", + "documentation":"

Sets how long the token will be valid, in seconds. You can set the time up to 30 days in the future.

" + }, + "serviceAccountId":{ + "shape":"String", + "documentation":"

The ID of the service account for which to create a token.

", + "location":"uri", + "locationName":"serviceAccountId" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace the service account resides within.

", + "location":"uri", + "locationName":"workspaceId" + } + } + }, + "CreateWorkspaceServiceAccountTokenRequestSecondsToLiveInteger":{ + "type":"integer", + "box":true, + "max":2592000, + "min":1 + }, + "CreateWorkspaceServiceAccountTokenResponse":{ + "type":"structure", + "required":[ + "serviceAccountId", + "serviceAccountToken", + "workspaceId" + ], + "members":{ + "serviceAccountId":{ + "shape":"String", + "documentation":"

The ID of the service account where the token was created.

" + }, + "serviceAccountToken":{ + "shape":"ServiceAccountTokenSummaryWithKey", + "documentation":"

Information about the created token, including the key. Be sure to store the key securely.

" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace where the token was created.

" + } + } + }, "DataSourceType":{ "type":"string", "enum":[ @@ -790,6 +1018,94 @@ } } }, + "DeleteWorkspaceServiceAccountRequest":{ + "type":"structure", + "required":[ + "serviceAccountId", + "workspaceId" + ], + "members":{ + "serviceAccountId":{ + "shape":"String", + "documentation":"

The ID of the service account to delete.

", + "location":"uri", + "locationName":"serviceAccountId" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace where the service account resides.

", + "location":"uri", + "locationName":"workspaceId" + } + } + }, + "DeleteWorkspaceServiceAccountResponse":{ + "type":"structure", + "required":[ + "serviceAccountId", + "workspaceId" + ], + "members":{ + "serviceAccountId":{ + "shape":"String", + "documentation":"

The ID of the service account deleted.

" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace where the service account was deleted.

" + } + } + }, + "DeleteWorkspaceServiceAccountTokenRequest":{ + "type":"structure", + "required":[ + "serviceAccountId", + "tokenId", + "workspaceId" + ], + "members":{ + "serviceAccountId":{ + "shape":"String", + "documentation":"

The ID of the service account from which to delete the token.

", + "location":"uri", + "locationName":"serviceAccountId" + }, + "tokenId":{ + "shape":"String", + "documentation":"

The ID of the token to delete.

", + "location":"uri", + "locationName":"tokenId" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace from which to delete the token.

", + "location":"uri", + "locationName":"workspaceId" + } + } + }, + "DeleteWorkspaceServiceAccountTokenResponse":{ + "type":"structure", + "required":[ + "serviceAccountId", + "tokenId", + "workspaceId" + ], + "members":{ + "serviceAccountId":{ + "shape":"String", + "documentation":"

The ID of the service account where the token was deleted.

" + }, + "tokenId":{ + "shape":"String", + "documentation":"

The ID of the token that was deleted.

" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace where the token was deleted.

" + } + } + }, "DescribeWorkspaceAuthenticationRequest":{ "type":"structure", "required":["workspaceId"], @@ -1100,6 +1416,122 @@ } } }, + "ListWorkspaceServiceAccountTokensRequest":{ + "type":"structure", + "required":[ + "serviceAccountId", + "workspaceId" + ], + "members":{ + "maxResults":{ + "shape":"ListWorkspaceServiceAccountTokensRequestMaxResultsInteger", + "documentation":"

The maximum number of tokens to include in the results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of service accounts to return. (You receive this token from a previous ListWorkspaceServiceAccountTokens operation.)

", + "location":"querystring", + "locationName":"nextToken" + }, + "serviceAccountId":{ + "shape":"String", + "documentation":"

The ID of the service account for which to return tokens.

", + "location":"uri", + "locationName":"serviceAccountId" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace for which to return tokens.

", + "location":"uri", + "locationName":"workspaceId" + } + } + }, + "ListWorkspaceServiceAccountTokensRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListWorkspaceServiceAccountTokensResponse":{ + "type":"structure", + "required":[ + "serviceAccountId", + "serviceAccountTokens", + "workspaceId" + ], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token to use when requesting the next set of service accounts.

" + }, + "serviceAccountId":{ + "shape":"String", + "documentation":"

The ID of the service account where the tokens reside.

" + }, + "serviceAccountTokens":{ + "shape":"ServiceAccountTokenList", + "documentation":"

An array of structures containing information about the tokens.

" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace where the tokens reside.

" + } + } + }, + "ListWorkspaceServiceAccountsRequest":{ + "type":"structure", + "required":["workspaceId"], + "members":{ + "maxResults":{ + "shape":"ListWorkspaceServiceAccountsRequestMaxResultsInteger", + "documentation":"

The maximum number of service accounts to include in the results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of service accounts to return. (You receive this token from a previous ListWorkspaceServiceAccounts operation.)

", + "location":"querystring", + "locationName":"nextToken" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The workspace for which to list service accounts.

", + "location":"uri", + "locationName":"workspaceId" + } + } + }, + "ListWorkspaceServiceAccountsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListWorkspaceServiceAccountsResponse":{ + "type":"structure", + "required":[ + "serviceAccounts", + "workspaceId" + ], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token to use when requesting the next set of service accounts.

" + }, + "serviceAccounts":{ + "shape":"ServiceAccountList", + "documentation":"

An array of structures containing information about the service accounts.

" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The workspace to which the service accounts are associated.

" + } + } + }, "ListWorkspacesRequest":{ "type":"structure", "members":{ @@ -1341,6 +1773,111 @@ "max":5, "min":1 }, + "ServiceAccountList":{ + "type":"list", + "member":{"shape":"ServiceAccountSummary"} + }, + "ServiceAccountName":{ + "type":"string", + "max":128, + "min":1 + }, + "ServiceAccountSummary":{ + "type":"structure", + "required":[ + "grafanaRole", + "id", + "isDisabled", + "name" + ], + "members":{ + "grafanaRole":{ + "shape":"Role", + "documentation":"

The role of the service account, which sets the permission level used when calling Grafana APIs.

" + }, + "id":{ + "shape":"String", + "documentation":"

The unique ID of the service account.

" + }, + "isDisabled":{ + "shape":"String", + "documentation":"

Returns true if the service account is disabled. Service accounts can be disabled and enabled in the Amazon Managed Grafana console.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the service account.

" + } + }, + "documentation":"

A structure that contains the information about one service account.

" + }, + "ServiceAccountTokenKey":{ + "type":"string", + "sensitive":true + }, + "ServiceAccountTokenList":{ + "type":"list", + "member":{"shape":"ServiceAccountTokenSummary"} + }, + "ServiceAccountTokenName":{ + "type":"string", + "max":128, + "min":1 + }, + "ServiceAccountTokenSummary":{ + "type":"structure", + "required":[ + "createdAt", + "expiresAt", + "id", + "name" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

When the service account token was created.

" + }, + "expiresAt":{ + "shape":"Timestamp", + "documentation":"

When the service account token will expire.

" + }, + "id":{ + "shape":"String", + "documentation":"

The unique ID of the service account token.

" + }, + "lastUsedAt":{ + "shape":"Timestamp", + "documentation":"

The last time the token was used to authorize a Grafana HTTP API.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the service account token.

" + } + }, + "documentation":"

A structure that contains the information about a service account token.

" + }, + "ServiceAccountTokenSummaryWithKey":{ + "type":"structure", + "required":[ + "id", + "key", + "name" + ], + "members":{ + "id":{ + "shape":"String", + "documentation":"

The unique ID of the service account token.

" + }, + "key":{ + "shape":"ServiceAccountTokenKey", + "documentation":"

The key for the service account token. Used when making calls to the Grafana HTTP APIs to authenticate and authorize the requests.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the service account token.

" + } + }, + "documentation":"

A structure that contains the information about a service account token.

This structure is returned when creating the token. It is important to store the key that is returned, as it is not retrievable at a later time.

If you lose the key, you can delete and recreate the token, which will create a new key.

" + }, "ServiceQuotaExceededException":{ "type":"structure", "required":[ @@ -1899,7 +2436,7 @@ }, "grafanaToken":{ "shape":"GrafanaToken", - "documentation":"

The token that ties this workspace to a Grafana Labs account. For more information, see Register with Grafana Labs.

" + "documentation":"

The token that ties this workspace to a Grafana Labs account. For more information, see Link your account with Grafana Labs.

" }, "grafanaVersion":{ "shape":"GrafanaVersion", @@ -2029,7 +2566,7 @@ }, "grafanaToken":{ "shape":"GrafanaToken", - "documentation":"

The token that ties this workspace to a Grafana Labs account. For more information, see Register with Grafana Labs.

" + "documentation":"

The token that ties this workspace to a Grafana Labs account. For more information, see Link your account with Grafana Labs.

" }, "grafanaVersion":{ "shape":"GrafanaVersion", diff --git a/botocore/data/groundstation/2019-05-23/service-2.json b/botocore/data/groundstation/2019-05-23/service-2.json index 409e09423d..1dcc0df61e 100644 --- a/botocore/data/groundstation/2019-05-23/service-2.json +++ b/botocore/data/groundstation/2019-05-23/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"groundstation", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Ground Station", "serviceId":"GroundStation", "signatureVersion":"v4", "signingName":"groundstation", - "uid":"groundstation-2019-05-23" + "uid":"groundstation-2019-05-23", + "auth":["aws.auth#sigv4"] }, "operations":{ "CancelContact":{ @@ -2676,7 +2678,7 @@ "documentation":"

Identifies the S3 object to be used as the ephemeris.

" } }, - "documentation":"

Ephemeris data in Orbit Ephemeris Message (OEM) format.

" + "documentation":"

Ephemeris data in Orbit Ephemeris Message (OEM) format.

AWS Ground Station processes OEM Customer Provided Ephemerides according to the CCSDS standard with some extra restrictions. OEM files should be in KVN format. For more detail about the OEM format that AWS Ground Station supports, see OEM ephemeris format in the AWS Ground Station user guide.

" }, "PaginationMaxResults":{ "type":"integer", diff --git a/botocore/data/guardduty/2017-11-28/service-2.json b/botocore/data/guardduty/2017-11-28/service-2.json index bb4e9914ac..8100522d2c 100644 --- a/botocore/data/guardduty/2017-11-28/service-2.json +++ b/botocore/data/guardduty/2017-11-28/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"guardduty", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon GuardDuty", "serviceId":"GuardDuty", "signatureVersion":"v4", "signingName":"guardduty", - "uid":"guardduty-2017-11-28" + "uid":"guardduty-2017-11-28", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptAdministratorInvitation":{ @@ -104,6 +106,23 @@ ], "documentation":"

Creates a new IPSet, which is called a trusted IP list in the console user interface. An IPSet is a list of IP addresses that are trusted for secure communication with Amazon Web Services infrastructure and applications. GuardDuty doesn't generate findings for IP addresses that are included in IPSets. Only users from the administrator account can use this operation.

" }, + "CreateMalwareProtectionPlan":{ + "name":"CreateMalwareProtectionPlan", + "http":{ + "method":"POST", + "requestUri":"/malware-protection-plan", + "responseCode":200 + }, + "input":{"shape":"CreateMalwareProtectionPlanRequest"}, + "output":{"shape":"CreateMalwareProtectionPlanResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Creates a new Malware Protection plan for the protected resource.

When you create a Malware Protection plan, the Amazon Web Services service terms for GuardDuty Malware Protection apply. For more information, see Amazon Web Services service terms for GuardDuty Malware Protection.

" + }, "CreateMembers":{ "name":"CreateMembers", "http":{ @@ -239,6 +258,22 @@ ], "documentation":"

Deletes invitations sent to the current member account by Amazon Web Services accounts specified by their account IDs.

" }, + "DeleteMalwareProtectionPlan":{ + "name":"DeleteMalwareProtectionPlan", + "http":{ + "method":"DELETE", + "requestUri":"/malware-protection-plan/{malwareProtectionPlanId}", + "responseCode":200 + }, + "input":{"shape":"DeleteMalwareProtectionPlanRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes the Malware Protection plan ID associated with the Malware Protection plan resource. Use this API only when you no longer want to protect the resource associated with this Malware Protection plan ID.

" + }, "DeleteMembers":{ "name":"DeleteMembers", "http":{ @@ -526,6 +561,23 @@ ], "documentation":"

Returns the count of all GuardDuty membership invitations that were sent to the current member account except the currently accepted invitation.

" }, + "GetMalwareProtectionPlan":{ + "name":"GetMalwareProtectionPlan", + "http":{ + "method":"GET", + "requestUri":"/malware-protection-plan/{malwareProtectionPlanId}", + "responseCode":200 + }, + "input":{"shape":"GetMalwareProtectionPlanRequest"}, + "output":{"shape":"GetMalwareProtectionPlanResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves the Malware Protection plan details associated with a Malware Protection plan ID.

" + }, "GetMalwareScanSettings":{ "name":"GetMalwareScanSettings", "http":{ @@ -752,6 +804,22 @@ ], "documentation":"

Lists all GuardDuty membership invitations that were sent to the current Amazon Web Services account.

" }, + "ListMalwareProtectionPlans":{ + "name":"ListMalwareProtectionPlans", + "http":{ + "method":"GET", + "requestUri":"/malware-protection-plan", + "responseCode":200 + }, + "input":{"shape":"ListMalwareProtectionPlansRequest"}, + "output":{"shape":"ListMalwareProtectionPlansResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Lists the Malware Protection plan IDs associated with the protected resources in your Amazon Web Services account.

" + }, "ListMembers":{ "name":"ListMembers", "http":{ @@ -981,6 +1049,22 @@ ], "documentation":"

Updates the IPSet specified by the IPSet ID.

" }, + "UpdateMalwareProtectionPlan":{ + "name":"UpdateMalwareProtectionPlan", + "http":{ + "method":"PATCH", + "requestUri":"/malware-protection-plan/{malwareProtectionPlanId}", + "responseCode":200 + }, + "input":{"shape":"UpdateMalwareProtectionPlanRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Updates an existing Malware Protection plan resource.

" + }, "UpdateMalwareScanSettings":{ "name":"UpdateMalwareScanSettings", "http":{ @@ -2312,6 +2396,51 @@ } } }, + "CreateMalwareProtectionPlanRequest":{ + "type":"structure", + "required":[ + "Role", + "ProtectedResource" + ], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

The idempotency token for the create request.

", + "idempotencyToken":true, + "locationName":"clientToken" + }, + "Role":{ + "shape":"String", + "documentation":"

IAM role with permissions required to scan and add tags to the associated protected resource.

", + "locationName":"role" + }, + "ProtectedResource":{ + "shape":"CreateProtectedResource", + "documentation":"

Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource.

", + "locationName":"protectedResource" + }, + "Actions":{ + "shape":"MalwareProtectionPlanActions", + "documentation":"

Information about whether the tags will be added to the S3 object after scanning.

", + "locationName":"actions" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Tags added to the Malware Protection plan resource.

", + "locationName":"tags" + } + } + }, + "CreateMalwareProtectionPlanResponse":{ + "type":"structure", + "members":{ + "MalwareProtectionPlanId":{ + "shape":"String", + "documentation":"

A unique identifier associated with the Malware Protection plan resource.

", + "locationName":"malwareProtectionPlanId" + } + } + }, "CreateMembersRequest":{ "type":"structure", "required":[ @@ -2343,6 +2472,17 @@ } } }, + "CreateProtectedResource":{ + "type":"structure", + "members":{ + "S3Bucket":{ + "shape":"CreateS3BucketResource", + "documentation":"

Information about the protected S3 bucket resource.

", + "locationName":"s3Bucket" + } + }, + "documentation":"

Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource.

" + }, "CreatePublishingDestinationRequest":{ "type":"structure", "required":[ @@ -2386,6 +2526,22 @@ } } }, + "CreateS3BucketResource":{ + "type":"structure", + "members":{ + "BucketName":{ + "shape":"String", + "documentation":"

Name of the S3 bucket.

", + "locationName":"bucketName" + }, + "ObjectPrefixes":{ + "shape":"MalwareProtectionPlanObjectPrefixesList", + "documentation":"

Information about the specified object prefixes. The S3 object will be scanned only if it belongs to any of the specified object prefixes.

", + "locationName":"objectPrefixes" + } + }, + "documentation":"

Information about the protected S3 bucket resource.

" + }, "CreateSampleFindingsRequest":{ "type":"structure", "required":["DetectorId"], @@ -2760,6 +2916,18 @@ } } }, + "DeleteMalwareProtectionPlanRequest":{ + "type":"structure", + "required":["MalwareProtectionPlanId"], + "members":{ + "MalwareProtectionPlanId":{ + "shape":"String", + "documentation":"

A unique identifier associated with Malware Protection plan resource.

", + "location":"uri", + "locationName":"malwareProtectionPlanId" + } + } + }, "DeleteMembersRequest":{ "type":"structure", "required":[ @@ -4284,6 +4452,63 @@ } } }, + "GetMalwareProtectionPlanRequest":{ + "type":"structure", + "required":["MalwareProtectionPlanId"], + "members":{ + "MalwareProtectionPlanId":{ + "shape":"String", + "documentation":"

A unique identifier associated with Malware Protection plan resource.

", + "location":"uri", + "locationName":"malwareProtectionPlanId" + } + } + }, + "GetMalwareProtectionPlanResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

Amazon Resource Name (ARN) of the protected resource.

", + "locationName":"arn" + }, + "Role":{ + "shape":"String", + "documentation":"

IAM role that includes the permissions required to scan and add tags to the associated protected resource.

", + "locationName":"role" + }, + "ProtectedResource":{ + "shape":"CreateProtectedResource", + "documentation":"

Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource.

", + "locationName":"protectedResource" + }, + "Actions":{ + "shape":"MalwareProtectionPlanActions", + "documentation":"

Information about whether the tags will be added to the S3 object after scanning.

", + "locationName":"actions" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the Malware Protection plan resource was created.

", + "locationName":"createdAt" + }, + "Status":{ + "shape":"MalwareProtectionPlanStatus", + "documentation":"

Malware Protection plan status.

", + "locationName":"status" + }, + "StatusReasons":{ + "shape":"MalwareProtectionPlanStatusReasonsList", + "documentation":"

Information about the issue code and message associated to the status of your Malware Protection plan.

", + "locationName":"statusReasons" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Tags added to the Malware Protection plan resource.

", + "locationName":"tags" + } + } + }, "GetMalwareScanSettingsRequest":{ "type":"structure", "required":["DetectorId"], @@ -4858,6 +5083,26 @@ "max":50, "min":0 }, + "ItemPath":{ + "type":"structure", + "members":{ + "NestedItemPath":{ + "shape":"String", + "documentation":"

The nested item path where the infected file was found.

", + "locationName":"nestedItemPath" + }, + "Hash":{ + "shape":"String", + "documentation":"

The hash value of the infected resource.

", + "locationName":"hash" + } + }, + "documentation":"

Information about the nested item path and hash of the protected resource.

" + }, + "ItemPaths":{ + "type":"list", + "member":{"shape":"ItemPath"} + }, "KubernetesApiCallAction":{ "type":"structure", "members":{ @@ -5505,6 +5750,32 @@ } } }, + "ListMalwareProtectionPlansRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListMalwareProtectionPlansResponse":{ + "type":"structure", + "members":{ + "MalwareProtectionPlans":{ + "shape":"MalwareProtectionPlansSummary", + "documentation":"

A list of unique identifiers associated with each Malware Protection plan.

", + "locationName":"malwareProtectionPlans" + }, + "NextToken":{ + "shape":"String", + "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "locationName":"nextToken" + } + } + }, "ListMembersRequest":{ "type":"structure", "required":["DetectorId"], @@ -5791,6 +6062,97 @@ }, "documentation":"

Provides details about Malware Protection when it is enabled as a data source.

" }, + "MalwareProtectionPlanActions":{ + "type":"structure", + "members":{ + "Tagging":{ + "shape":"MalwareProtectionPlanTaggingAction", + "documentation":"

Indicates whether the scanned S3 object will have tags about the scan result.

", + "locationName":"tagging" + } + }, + "documentation":"

Information about whether the tags will be added to the S3 object after scanning.

" + }, + "MalwareProtectionPlanObjectPrefixesList":{ + "type":"list", + "member":{"shape":"String"}, + "max":5, + "min":0 + }, + "MalwareProtectionPlanStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "WARNING", + "ERROR" + ] + }, + "MalwareProtectionPlanStatusReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "documentation":"

Issue code.

", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "documentation":"

Issue message that specifies the reason. For information about potential troubleshooting steps, see Troubleshooting Malware Protection for S3 status issues in the GuardDuty User Guide.

", + "locationName":"message" + } + }, + "documentation":"

Information about the issue code and message associated to the status of your Malware Protection plan.

" + }, + "MalwareProtectionPlanStatusReasonsList":{ + "type":"list", + "member":{"shape":"MalwareProtectionPlanStatusReason"}, + "max":50, + "min":0 + }, + "MalwareProtectionPlanSummary":{ + "type":"structure", + "members":{ + "MalwareProtectionPlanId":{ + "shape":"String", + "documentation":"

A unique identifier associated with Malware Protection plan.

", + "locationName":"malwareProtectionPlanId" + } + }, + "documentation":"

Information about the Malware Protection plan resource.

" + }, + "MalwareProtectionPlanTaggingAction":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"MalwareProtectionPlanTaggingActionStatus", + "documentation":"

Indicates whether or not the tags will added.

", + "locationName":"status" + } + }, + "documentation":"

Information about adding tags to the scanned S3 object after the scan result.

" + }, + "MalwareProtectionPlanTaggingActionStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "MalwareProtectionPlansSummary":{ + "type":"list", + "member":{"shape":"MalwareProtectionPlanSummary"} + }, + "MalwareScanDetails":{ + "type":"structure", + "members":{ + "Threats":{ + "shape":"Threats", + "documentation":"

Information about the detected threats associated with the generated GuardDuty finding.

", + "locationName":"threats" + } + }, + "documentation":"

Information about the malware scan that generated a GuardDuty finding.

" + }, "ManagementType":{ "type":"string", "enum":[ @@ -7031,7 +7393,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

InstanceArn that was scanned in the scan entry.

", + "documentation":"

Instance ARN that was scanned in the scan entry.

", "locationName":"instanceArn" } }, @@ -7041,6 +7403,24 @@ "type":"list", "member":{"shape":"String"} }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "documentation":"

The error message.

", + "locationName":"message" + }, + "Type":{ + "shape":"String", + "documentation":"

The error type.

", + "locationName":"__type" + } + }, + "documentation":"

The requested resource can't be found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, "ResourceType":{ "type":"string", "enum":[ @@ -7238,6 +7618,11 @@ "shape":"PublicAccess", "documentation":"

Describes the public access policies that apply to the S3 bucket.

", "locationName":"publicAccess" + }, + "S3ObjectDetails":{ + "shape":"S3ObjectDetails", + "documentation":"

Information about the S3 object that was scanned.

", + "locationName":"s3ObjectDetails" } }, "documentation":"

Contains information on the S3 bucket.

" @@ -7270,6 +7655,41 @@ }, "documentation":"

Describes whether S3 data event logs will be enabled as a data source.

" }, + "S3ObjectDetail":{ + "type":"structure", + "members":{ + "ObjectArn":{ + "shape":"String", + "documentation":"

Amazon Resource Name (ARN) of the S3 object.

", + "locationName":"objectArn" + }, + "Key":{ + "shape":"String", + "documentation":"

Key of the S3 object.

", + "locationName":"key" + }, + "ETag":{ + "shape":"String", + "documentation":"

The entity tag is a hash of the S3 object. The ETag reflects changes only to the contents of an object, and not its metadata.

", + "locationName":"eTag" + }, + "Hash":{ + "shape":"String", + "documentation":"

Hash of the threat detected in this finding.

", + "locationName":"hash" + }, + "VersionId":{ + "shape":"String", + "documentation":"

Version ID of the object.

", + "locationName":"versionId" + } + }, + "documentation":"

Information about the S3 object that was scanned

" + }, + "S3ObjectDetails":{ + "type":"list", + "member":{"shape":"S3ObjectDetail"} + }, "Scan":{ "type":"structure", "members":{ @@ -7449,7 +7869,7 @@ }, "VolumeArn":{ "shape":"String", - "documentation":"

EBS volume Arn details of the infected file.

", + "documentation":"

EBS volume ARN details of the infected file.

", "locationName":"volumeArn" }, "Hash":{ @@ -7687,6 +8107,11 @@ "shape":"Detection", "documentation":"

Contains information about the detected unusual behavior.

", "locationName":"detection" + }, + "MalwareScanDetails":{ + "shape":"MalwareScanDetails", + "documentation":"

Returns details from the malware scan that generated a GuardDuty finding.

", + "locationName":"malwareScanDetails" } }, "documentation":"

Contains additional information about the generated finding.

" @@ -7891,6 +8316,27 @@ "type":"list", "member":{"shape":"Tag"} }, + "Threat":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

Name of the detected threat that caused GuardDuty to generate this finding.

", + "locationName":"name" + }, + "Source":{ + "shape":"String", + "documentation":"

Source of the threat that generated this finding.

", + "locationName":"source" + }, + "ItemPaths":{ + "shape":"ItemPaths", + "documentation":"

Information about the nested item path and hash of the protected resource.

", + "locationName":"itemPaths" + } + }, + "documentation":"

Information about the detected threats associated with the generated finding.

" + }, "ThreatDetectedByName":{ "type":"structure", "members":{ @@ -7979,6 +8425,10 @@ "type":"list", "member":{"shape":"String"} }, + "Threats":{ + "type":"list", + "member":{"shape":"Threat"} + }, "ThreatsDetectedItemCount":{ "type":"structure", "members":{ @@ -8278,6 +8728,33 @@ "members":{ } }, + "UpdateMalwareProtectionPlanRequest":{ + "type":"structure", + "required":["MalwareProtectionPlanId"], + "members":{ + "MalwareProtectionPlanId":{ + "shape":"String", + "documentation":"

A unique identifier associated with the Malware Protection plan.

", + "location":"uri", + "locationName":"malwareProtectionPlanId" + }, + "Role":{ + "shape":"String", + "documentation":"

IAM role with permissions required to scan and add tags to the associated protected resource.

", + "locationName":"role" + }, + "Actions":{ + "shape":"MalwareProtectionPlanActions", + "documentation":"

Information about whether the tags will be added to the S3 object after scanning.

", + "locationName":"actions" + }, + "ProtectedResource":{ + "shape":"UpdateProtectedResource", + "documentation":"

Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource.

", + "locationName":"protectedResource" + } + } + }, "UpdateMalwareScanSettingsRequest":{ "type":"structure", "required":["DetectorId"], @@ -8389,6 +8866,17 @@ "members":{ } }, + "UpdateProtectedResource":{ + "type":"structure", + "members":{ + "S3Bucket":{ + "shape":"UpdateS3BucketResource", + "documentation":"

Information about the protected S3 bucket resource.

", + "locationName":"s3Bucket" + } + }, + "documentation":"

Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource.

" + }, "UpdatePublishingDestinationRequest":{ "type":"structure", "required":[ @@ -8420,6 +8908,17 @@ "members":{ } }, + "UpdateS3BucketResource":{ + "type":"structure", + "members":{ + "ObjectPrefixes":{ + "shape":"MalwareProtectionPlanObjectPrefixesList", + "documentation":"

Information about the specified object prefixes. The S3 object will be scanned only if it belongs to any of the specified object prefixes.

", + "locationName":"objectPrefixes" + } + }, + "documentation":"

Information about the protected S3 bucket resource.

" + }, "UpdateThreatIntelSetRequest":{ "type":"structure", "required":[ @@ -8697,7 +9196,7 @@ "members":{ "VolumeArn":{ "shape":"String", - "documentation":"

EBS volume Arn information.

", + "documentation":"

EBS volume ARN information.

", "locationName":"volumeArn" }, "VolumeType":{ @@ -8722,12 +9221,12 @@ }, "SnapshotArn":{ "shape":"String", - "documentation":"

Snapshot Arn of the EBS volume.

", + "documentation":"

Snapshot ARN of the EBS volume.

", "locationName":"snapshotArn" }, "KmsKeyArn":{ "shape":"String", - "documentation":"

KMS key Arn used to encrypt the EBS volume.

", + "documentation":"

KMS key ARN used to encrypt the EBS volume.

", "locationName":"kmsKeyArn" } }, diff --git a/botocore/data/honeycode/2020-03-01/endpoint-rule-set-1.json b/botocore/data/honeycode/2020-03-01/endpoint-rule-set-1.json deleted file mode 100644 index e485bc8046..0000000000 --- a/botocore/data/honeycode/2020-03-01/endpoint-rule-set-1.json +++ /dev/null @@ -1,314 +0,0 @@ -{ - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://honeycode-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://honeycode-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://honeycode.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://honeycode.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] -} \ No newline at end of file diff --git a/botocore/data/honeycode/2020-03-01/examples-1.json b/botocore/data/honeycode/2020-03-01/examples-1.json deleted file mode 100644 index 0ea7e3b0bb..0000000000 --- a/botocore/data/honeycode/2020-03-01/examples-1.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "version": "1.0", - "examples": { - } -} diff --git a/botocore/data/honeycode/2020-03-01/paginators-1.json b/botocore/data/honeycode/2020-03-01/paginators-1.json deleted file mode 100644 index 19ba884c5e..0000000000 --- a/botocore/data/honeycode/2020-03-01/paginators-1.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "pagination": { - "ListTableColumns": { - "input_token": "nextToken", - "output_token": "nextToken", - "result_key": "tableColumns" - }, - "ListTableRows": { - "input_token": "nextToken", - "output_token": "nextToken", - "limit_key": "maxResults", - "result_key": "rows" - }, - "ListTables": { - "input_token": "nextToken", - "output_token": "nextToken", - "limit_key": "maxResults", - "result_key": "tables" - }, - "QueryTableRows": { - "input_token": "nextToken", - "output_token": "nextToken", - "limit_key": "maxResults", - "result_key": "rows" - } - } -} diff --git a/botocore/data/honeycode/2020-03-01/paginators-1.sdk-extras.json b/botocore/data/honeycode/2020-03-01/paginators-1.sdk-extras.json deleted file mode 100644 index bcdc190050..0000000000 --- a/botocore/data/honeycode/2020-03-01/paginators-1.sdk-extras.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "version": 1.0, - "merge": { - "pagination": { - "ListTableColumns": { - "non_aggregate_keys": [ - "workbookCursor" - ] - }, - "ListTableRows": { - "non_aggregate_keys": [ - "workbookCursor", - "columnIds", - "rowIdsNotFound" - ] - }, - "ListTables": { - "non_aggregate_keys": [ - "workbookCursor" - ] - }, - "QueryTableRows": { - "non_aggregate_keys": [ - "workbookCursor", - "columnIds" - ] - } - } - } -} \ No newline at end of file diff --git a/botocore/data/honeycode/2020-03-01/service-2.json b/botocore/data/honeycode/2020-03-01/service-2.json deleted file mode 100644 index 74c726cfe4..0000000000 --- a/botocore/data/honeycode/2020-03-01/service-2.json +++ /dev/null @@ -1,1833 +0,0 @@ -{ - "version":"2.0", - "metadata":{ - "apiVersion":"2020-03-01", - "endpointPrefix":"honeycode", - "jsonVersion":"1.1", - "protocol":"rest-json", - "serviceAbbreviation":"Honeycode", - "serviceFullName":"Amazon Honeycode", - "serviceId":"Honeycode", - "signatureVersion":"v4", - "signingName":"honeycode", - "uid":"honeycode-2020-03-01" - }, - "operations":{ - "BatchCreateTableRows":{ - "name":"BatchCreateTableRows", - "http":{ - "method":"POST", - "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchcreate" - }, - "input":{"shape":"BatchCreateTableRowsRequest"}, - "output":{"shape":"BatchCreateTableRowsResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceQuotaExceededException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} - ], - "documentation":"

The BatchCreateTableRows API allows you to create one or more rows at the end of a table in a workbook. The API allows you to specify the values to set in some or all of the columns in the new rows.

If a column is not explicitly set in a specific row, then the column level formula specified in the table will be applied to the new row. If there is no column level formula but the last row of the table has a formula, then that formula will be copied down to the new row. If there is no column level formula and no formula in the last row of the table, then that column will be left blank for the new rows.

" - }, - "BatchDeleteTableRows":{ - "name":"BatchDeleteTableRows", - "http":{ - "method":"POST", - "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchdelete" - }, - "input":{"shape":"BatchDeleteTableRowsRequest"}, - "output":{"shape":"BatchDeleteTableRowsResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ValidationException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ThrottlingException"} - ], - "documentation":"

The BatchDeleteTableRows API allows you to delete one or more rows from a table in a workbook. You need to specify the ids of the rows that you want to delete from the table.

" - }, - "BatchUpdateTableRows":{ - "name":"BatchUpdateTableRows", - "http":{ - "method":"POST", - "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchupdate" - }, - "input":{"shape":"BatchUpdateTableRowsRequest"}, - "output":{"shape":"BatchUpdateTableRowsResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ValidationException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ThrottlingException"} - ], - "documentation":"

The BatchUpdateTableRows API allows you to update one or more rows in a table in a workbook.

You can specify the values to set in some or all of the columns in the table for the specified rows. If a column is not explicitly specified in a particular row, then that column will not be updated for that row. To clear out the data in a specific cell, you need to set the value as an empty string (\"\").

" - }, - "BatchUpsertTableRows":{ - "name":"BatchUpsertTableRows", - "http":{ - "method":"POST", - "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchupsert" - }, - "input":{"shape":"BatchUpsertTableRowsRequest"}, - "output":{"shape":"BatchUpsertTableRowsResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceQuotaExceededException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} - ], - "documentation":"

The BatchUpsertTableRows API allows you to upsert one or more rows in a table. The upsert operation takes a filter expression as input and evaluates it to find matching rows on the destination table. If matching rows are found, it will update the cells in the matching rows to new values specified in the request. If no matching rows are found, a new row is added at the end of the table and the cells in that row are set to the new values specified in the request.

You can specify the values to set in some or all of the columns in the table for the matching or newly appended rows. If a column is not explicitly specified for a particular row, then that column will not be updated for that row. To clear out the data in a specific cell, you need to set the value as an empty string (\"\").

" - }, - "DescribeTableDataImportJob":{ - "name":"DescribeTableDataImportJob", - "http":{ - "method":"GET", - "requestUri":"/workbooks/{workbookId}/tables/{tableId}/import/{jobId}" - }, - "input":{"shape":"DescribeTableDataImportJobRequest"}, - "output":{"shape":"DescribeTableDataImportJobResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"RequestTimeoutException"} - ], - "documentation":"

The DescribeTableDataImportJob API allows you to retrieve the status and details of a table data import job.

" - }, - "GetScreenData":{ - "name":"GetScreenData", - "http":{ - "method":"POST", - "requestUri":"/screendata" - }, - "input":{"shape":"GetScreenDataRequest"}, - "output":{"shape":"GetScreenDataResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} - ], - "documentation":"

The GetScreenData API allows retrieval of data from a screen in a Honeycode app. The API allows setting local variables in the screen to filter, sort or otherwise affect what will be displayed on the screen.

" - }, - "InvokeScreenAutomation":{ - "name":"InvokeScreenAutomation", - "http":{ - "method":"POST", - "requestUri":"/workbooks/{workbookId}/apps/{appId}/screens/{screenId}/automations/{automationId}" - }, - "input":{"shape":"InvokeScreenAutomationRequest"}, - "output":{"shape":"InvokeScreenAutomationResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"AutomationExecutionException"}, - {"shape":"AutomationExecutionTimeoutException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ServiceQuotaExceededException"} - ], - "documentation":"

The InvokeScreenAutomation API allows invoking an action defined in a screen in a Honeycode app. The API allows setting local variables, which can then be used in the automation being invoked. This allows automating the Honeycode app interactions to write, update or delete data in the workbook.

" - }, - "ListTableColumns":{ - "name":"ListTableColumns", - "http":{ - "method":"GET", - "requestUri":"/workbooks/{workbookId}/tables/{tableId}/columns" - }, - "input":{"shape":"ListTableColumnsRequest"}, - "output":{"shape":"ListTableColumnsResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} - ], - "documentation":"

The ListTableColumns API allows you to retrieve a list of all the columns in a table in a workbook.

" - }, - "ListTableRows":{ - "name":"ListTableRows", - "http":{ - "method":"POST", - "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/list" - }, - "input":{"shape":"ListTableRowsRequest"}, - "output":{"shape":"ListTableRowsResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ValidationException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ThrottlingException"} - ], - "documentation":"

The ListTableRows API allows you to retrieve a list of all the rows in a table in a workbook.

" - }, - "ListTables":{ - "name":"ListTables", - "http":{ - "method":"GET", - "requestUri":"/workbooks/{workbookId}/tables" - }, - "input":{"shape":"ListTablesRequest"}, - "output":{"shape":"ListTablesResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} - ], - "documentation":"

The ListTables API allows you to retrieve a list of all the tables in a workbook.

" - }, - "ListTagsForResource":{ - "name":"ListTagsForResource", - "http":{ - "method":"GET", - "requestUri":"/tags/{resourceArn}" - }, - "input":{"shape":"ListTagsForResourceRequest"}, - "output":{"shape":"ListTagsForResourceResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} - ], - "documentation":"

The ListTagsForResource API allows you to return a resource's tags.

" - }, - "QueryTableRows":{ - "name":"QueryTableRows", - "http":{ - "method":"POST", - "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/query" - }, - "input":{"shape":"QueryTableRowsRequest"}, - "output":{"shape":"QueryTableRowsResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} - ], - "documentation":"

The QueryTableRows API allows you to use a filter formula to query for specific rows in a table.

" - }, - "StartTableDataImportJob":{ - "name":"StartTableDataImportJob", - "http":{ - "method":"POST", - "requestUri":"/workbooks/{workbookId}/tables/{tableId}/import" - }, - "input":{"shape":"StartTableDataImportJobRequest"}, - "output":{"shape":"StartTableDataImportJobResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ServiceQuotaExceededException"} - ], - "documentation":"

The StartTableDataImportJob API allows you to start an import job on a table. This API will only return the id of the job that was started. To find out the status of the import request, you need to call the DescribeTableDataImportJob API.

" - }, - "TagResource":{ - "name":"TagResource", - "http":{ - "method":"POST", - "requestUri":"/tags/{resourceArn}" - }, - "input":{"shape":"TagResourceRequest"}, - "output":{"shape":"TagResourceResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} - ], - "documentation":"

The TagResource API allows you to add tags to an ARN-able resource. Resource includes workbook, table, screen and screen-automation.

" - }, - "UntagResource":{ - "name":"UntagResource", - "http":{ - "method":"DELETE", - "requestUri":"/tags/{resourceArn}" - }, - "input":{"shape":"UntagResourceRequest"}, - "output":{"shape":"UntagResourceResult"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"RequestTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} - ], - "documentation":"

The UntagResource API allows you to removes tags from an ARN-able resource. Resource includes workbook, table, screen and screen-automation.

" - } - }, - "shapes":{ - "AccessDeniedException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

You do not have sufficient access to perform this action. Check that the workbook is owned by you and your IAM policy allows access to the resource in the request.

", - "error":{"httpStatusCode":403}, - "exception":true - }, - "AutomationExecutionException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

The automation execution did not end successfully.

", - "error":{"httpStatusCode":400}, - "exception":true - }, - "AutomationExecutionTimeoutException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

The automation execution timed out.

", - "error":{ - "httpStatusCode":504, - "senderFault":true - }, - "exception":true - }, - "AwsUserArn":{ - "type":"string", - "max":2048, - "min":20 - }, - "BatchCreateTableRowsRequest":{ - "type":"structure", - "required":[ - "workbookId", - "tableId", - "rowsToCreate" - ], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook where the new rows are being added.

If a workbook with the specified ID could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"workbookId" - }, - "tableId":{ - "shape":"ResourceId", - "documentation":"

The ID of the table where the new rows are being added.

If a table with the specified ID could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"tableId" - }, - "rowsToCreate":{ - "shape":"CreateRowDataList", - "documentation":"

The list of rows to create at the end of the table. Each item in this list needs to have a batch item id to uniquely identify the element in the request and the cells to create for that row. You need to specify at least one item in this list.

Note that if one of the column ids in any of the rows in the request does not exist in the table, then the request fails and no updates are made to the table.

" - }, - "clientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The request token for performing the batch create operation. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the operation again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" - } - } - }, - "BatchCreateTableRowsResult":{ - "type":"structure", - "required":[ - "workbookCursor", - "createdRows" - ], - "members":{ - "workbookCursor":{ - "shape":"WorkbookCursor", - "documentation":"

The updated workbook cursor after adding the new rows at the end of the table.

" - }, - "createdRows":{ - "shape":"CreatedRowsMap", - "documentation":"

The map of batch item id to the row id that was created for that item.

" - }, - "failedBatchItems":{ - "shape":"FailedBatchItems", - "documentation":"

The list of batch items in the request that could not be added to the table. Each element in this list contains one item from the request that could not be added to the table along with the reason why that item could not be added.

" - } - } - }, - "BatchDeleteTableRowsRequest":{ - "type":"structure", - "required":[ - "workbookId", - "tableId", - "rowIds" - ], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook where the rows are being deleted.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"workbookId" - }, - "tableId":{ - "shape":"ResourceId", - "documentation":"

The ID of the table where the rows are being deleted.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"tableId" - }, - "rowIds":{ - "shape":"RowIdList", - "documentation":"

The list of row ids to delete from the table. You need to specify at least one row id in this list.

Note that if one of the row ids provided in the request does not exist in the table, then the request fails and no rows are deleted from the table.

" - }, - "clientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The request token for performing the delete action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" - } - } - }, - "BatchDeleteTableRowsResult":{ - "type":"structure", - "required":["workbookCursor"], - "members":{ - "workbookCursor":{ - "shape":"WorkbookCursor", - "documentation":"

The updated workbook cursor after deleting the rows from the table.

" - }, - "failedBatchItems":{ - "shape":"FailedBatchItems", - "documentation":"

The list of row ids in the request that could not be deleted from the table. Each element in this list contains one row id from the request that could not be deleted along with the reason why that item could not be deleted.

" - } - } - }, - "BatchErrorMessage":{ - "type":"string", - "pattern":"^(?!\\s*$).+" - }, - "BatchItemId":{ - "type":"string", - "max":64, - "min":1, - "pattern":"^(?!\\s*$).+" - }, - "BatchUpdateTableRowsRequest":{ - "type":"structure", - "required":[ - "workbookId", - "tableId", - "rowsToUpdate" - ], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook where the rows are being updated.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"workbookId" - }, - "tableId":{ - "shape":"ResourceId", - "documentation":"

The ID of the table where the rows are being updated.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"tableId" - }, - "rowsToUpdate":{ - "shape":"UpdateRowDataList", - "documentation":"

The list of rows to update in the table. Each item in this list needs to contain the row id to update along with the map of column id to cell values for each column in that row that needs to be updated. You need to specify at least one row in this list, and for each row, you need to specify at least one column to update.

Note that if one of the row or column ids in the request does not exist in the table, then the request fails and no updates are made to the table.

" - }, - "clientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" - } - } - }, - "BatchUpdateTableRowsResult":{ - "type":"structure", - "required":["workbookCursor"], - "members":{ - "workbookCursor":{ - "shape":"WorkbookCursor", - "documentation":"

The updated workbook cursor after adding the new rows at the end of the table.

" - }, - "failedBatchItems":{ - "shape":"FailedBatchItems", - "documentation":"

The list of batch items in the request that could not be updated in the table. Each element in this list contains one item from the request that could not be updated in the table along with the reason why that item could not be updated.

" - } - } - }, - "BatchUpsertTableRowsRequest":{ - "type":"structure", - "required":[ - "workbookId", - "tableId", - "rowsToUpsert" - ], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook where the rows are being upserted.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"workbookId" - }, - "tableId":{ - "shape":"ResourceId", - "documentation":"

The ID of the table where the rows are being upserted.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"tableId" - }, - "rowsToUpsert":{ - "shape":"UpsertRowDataList", - "documentation":"

The list of rows to upsert in the table. Each item in this list needs to have a batch item id to uniquely identify the element in the request, a filter expression to find the rows to update for that element and the cell values to set for each column in the upserted rows. You need to specify at least one item in this list.

Note that if one of the filter formulas in the request fails to evaluate because of an error or one of the column ids in any of the rows does not exist in the table, then the request fails and no updates are made to the table.

" - }, - "clientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" - } - } - }, - "BatchUpsertTableRowsResult":{ - "type":"structure", - "required":[ - "rows", - "workbookCursor" - ], - "members":{ - "rows":{ - "shape":"UpsertRowsResultMap", - "documentation":"

A map with the batch item id as the key and the result of the upsert operation as the value. The result of the upsert operation specifies whether existing rows were updated or a new row was appended, along with the list of row ids that were affected.

" - }, - "workbookCursor":{ - "shape":"WorkbookCursor", - "documentation":"

The updated workbook cursor after updating or appending rows in the table.

" - }, - "failedBatchItems":{ - "shape":"FailedBatchItems", - "documentation":"

The list of batch items in the request that could not be updated or appended in the table. Each element in this list contains one item from the request that could not be updated in the table along with the reason why that item could not be updated or appended.

" - } - } - }, - "Cell":{ - "type":"structure", - "members":{ - "formula":{ - "shape":"Formula", - "documentation":"

The formula contained in the cell. This field is empty if a cell does not have a formula.

" - }, - "format":{ - "shape":"Format", - "documentation":"

The format of the cell. If this field is empty, then the format is either not specified in the workbook or the format is set to AUTO.

" - }, - "rawValue":{ - "shape":"RawValue", - "documentation":"

The raw value of the data contained in the cell. The raw value depends on the format of the data in the cell. However the attribute in the API return value is always a string containing the raw value.

Cells with format DATE, DATE_TIME or TIME have the raw value as a floating point number where the whole number represents the number of days since 1/1/1900 and the fractional part represents the fraction of the day since midnight. For example, a cell with date 11/3/2020 has the raw value \"44138\". A cell with the time 9:00 AM has the raw value \"0.375\" and a cell with date/time value of 11/3/2020 9:00 AM has the raw value \"44138.375\". Notice that even though the raw value is a number in all three cases, it is still represented as a string.

Cells with format NUMBER, CURRENCY, PERCENTAGE and ACCOUNTING have the raw value of the data as the number representing the data being displayed. For example, the number 1.325 with two decimal places in the format will have it's raw value as \"1.325\" and formatted value as \"1.33\". A currency value for $10 will have the raw value as \"10\" and formatted value as \"$10.00\". A value representing 20% with two decimal places in the format will have its raw value as \"0.2\" and the formatted value as \"20.00%\". An accounting value of -$25 will have \"-25\" as the raw value and \"$ (25.00)\" as the formatted value.

Cells with format TEXT will have the raw text as the raw value. For example, a cell with text \"John Smith\" will have \"John Smith\" as both the raw value and the formatted value.

Cells with format CONTACT will have the name of the contact as a formatted value and the email address of the contact as the raw value. For example, a contact for John Smith will have \"John Smith\" as the formatted value and \"john.smith@example.com\" as the raw value.

Cells with format ROWLINK (aka picklist) will have the first column of the linked row as the formatted value and the row id of the linked row as the raw value. For example, a cell containing a picklist to a table that displays task status might have \"Completed\" as the formatted value and \"row:dfcefaee-5b37-4355-8f28-40c3e4ff5dd4/ca432b2f-b8eb-431d-9fb5-cbe0342f9f03\" as the raw value.

Cells with format ROWSET (aka multi-select or multi-record picklist) will by default have the first column of each of the linked rows as the formatted value in the list, and the rowset id of the linked rows as the raw value. For example, a cell containing a multi-select picklist to a table that contains items might have \"Item A\", \"Item B\" in the formatted value list and \"rows:b742c1f4-6cb0-4650-a845-35eb86fcc2bb/ [fdea123b-8f68-474a-aa8a-5ff87aa333af,6daf41f0-a138-4eee-89da-123086d36ecf]\" as the raw value.

Cells with format ATTACHMENT will have the name of the attachment as the formatted value and the attachment id as the raw value. For example, a cell containing an attachment named \"image.jpeg\" will have \"image.jpeg\" as the formatted value and \"attachment:ca432b2f-b8eb-431d-9fb5-cbe0342f9f03\" as the raw value.

Cells with format AUTO or cells without any format that are auto-detected as one of the formats above will contain the raw and formatted values as mentioned above, based on the auto-detected formats. If there is no auto-detected format, the raw and formatted values will be the same as the data in the cell.

" - }, - "formattedValue":{ - "shape":"FormattedValue", - "documentation":"

The formatted value of the cell. This is the value that you see displayed in the cell in the UI.

Note that the formatted value of a cell is always represented as a string irrespective of the data that is stored in the cell. For example, if a cell contains a date, the formatted value of the cell is the string representation of the formatted date being shown in the cell in the UI. See details in the rawValue field below for how cells of different formats will have different raw and formatted values.

" - }, - "formattedValues":{ - "shape":"FormattedValuesList", - "documentation":"

A list of formatted values of the cell. This field is only returned when the cell is ROWSET format (aka multi-select or multi-record picklist). Values in the list are always represented as strings. The formattedValue field will be empty if this field is returned.

" - } - }, - "documentation":"

An object that represents a single cell in a table.

", - "sensitive":true - }, - "CellInput":{ - "type":"structure", - "members":{ - "fact":{ - "shape":"Fact", - "documentation":"

Fact represents the data that is entered into a cell. This data can be free text or a formula. Formulas need to start with the equals (=) sign.

" - }, - "facts":{ - "shape":"FactList", - "documentation":"

A list representing the values that are entered into a ROWSET cell. Facts list can have either only values or rowIDs, and rowIDs should from the same table.

" - } - }, - "documentation":"

CellInput object contains the data needed to create or update cells in a table.

CellInput object has only a facts field or a fact field, but not both. A 400 bad request will be thrown if both fact and facts field are present.

" - }, - "Cells":{ - "type":"list", - "member":{"shape":"Cell"} - }, - "ClientRequestToken":{ - "type":"string", - "max":64, - "min":32, - "pattern":"^(?!\\s*$).+" - }, - "ColumnMetadata":{ - "type":"structure", - "required":[ - "name", - "format" - ], - "members":{ - "name":{ - "shape":"Name", - "documentation":"

The name of the column.

" - }, - "format":{ - "shape":"Format", - "documentation":"

The format of the column.

" - } - }, - "documentation":"

Metadata for column in the table.

" - }, - "CreateRowData":{ - "type":"structure", - "required":[ - "batchItemId", - "cellsToCreate" - ], - "members":{ - "batchItemId":{ - "shape":"BatchItemId", - "documentation":"

An external identifier that represents the single row that is being created as part of the BatchCreateTableRows request. This can be any string that you can use to identify the row in the request. The BatchCreateTableRows API puts the batch item id in the results to allow you to link data in the request to data in the results.

" - }, - "cellsToCreate":{ - "shape":"RowDataInput", - "documentation":"

A map representing the cells to create in the new row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell.

" - } - }, - "documentation":"

Data needed to create a single row in a table as part of the BatchCreateTableRows request.

" - }, - "CreateRowDataList":{ - "type":"list", - "member":{"shape":"CreateRowData"}, - "max":100, - "min":1 - }, - "CreatedRowsMap":{ - "type":"map", - "key":{"shape":"BatchItemId"}, - "value":{"shape":"RowId"} - }, - "DataItem":{ - "type":"structure", - "members":{ - "overrideFormat":{ - "shape":"Format", - "documentation":"

The overrideFormat is optional and is specified only if a particular row of data has a different format for the data than the default format defined on the screen or the table.

" - }, - "rawValue":{ - "shape":"RawValue", - "documentation":"

The raw value of the data. e.g. jsmith@example.com

" - }, - "formattedValue":{ - "shape":"FormattedValue", - "documentation":"

The formatted value of the data. e.g. John Smith.

" - } - }, - "documentation":"

The data in a particular data cell defined on the screen.

", - "sensitive":true - }, - "DataItems":{ - "type":"list", - "member":{"shape":"DataItem"} - }, - "DelimitedTextDelimiter":{ - "type":"string", - "max":1, - "min":1, - "pattern":"^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]?$" - }, - "DelimitedTextImportOptions":{ - "type":"structure", - "required":["delimiter"], - "members":{ - "delimiter":{ - "shape":"DelimitedTextDelimiter", - "documentation":"

The delimiter to use for separating columns in a single row of the input.

" - }, - "hasHeaderRow":{ - "shape":"HasHeaderRow", - "documentation":"

Indicates whether the input file has a header row at the top containing the column names.

" - }, - "ignoreEmptyRows":{ - "shape":"IgnoreEmptyRows", - "documentation":"

A parameter to indicate whether empty rows should be ignored or be included in the import.

" - }, - "dataCharacterEncoding":{ - "shape":"ImportDataCharacterEncoding", - "documentation":"

The encoding of the data in the input file.

" - } - }, - "documentation":"

An object that contains the options relating to parsing delimited text as part of an import request.

" - }, - "DescribeTableDataImportJobRequest":{ - "type":"structure", - "required":[ - "workbookId", - "tableId", - "jobId" - ], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook into which data was imported.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"workbookId" - }, - "tableId":{ - "shape":"ResourceId", - "documentation":"

The ID of the table into which data was imported.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"tableId" - }, - "jobId":{ - "shape":"JobId", - "documentation":"

The ID of the job that was returned by the StartTableDataImportJob request.

If a job with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"jobId" - } - } - }, - "DescribeTableDataImportJobResult":{ - "type":"structure", - "required":[ - "jobStatus", - "message", - "jobMetadata" - ], - "members":{ - "jobStatus":{ - "shape":"TableDataImportJobStatus", - "documentation":"

The current status of the import job.

" - }, - "message":{ - "shape":"TableDataImportJobMessage", - "documentation":"

A message providing more details about the current status of the import job.

" - }, - "jobMetadata":{ - "shape":"TableDataImportJobMetadata", - "documentation":"

The metadata about the job that was submitted for import.

" - }, - "errorCode":{ - "shape":"ErrorCode", - "documentation":"

If job status is failed, error code to understand reason for the failure.

" - } - } - }, - "DestinationOptions":{ - "type":"structure", - "members":{ - "columnMap":{ - "shape":"ImportColumnMap", - "documentation":"

A map of the column id to the import properties for each column.

" - } - }, - "documentation":"

An object that contains the options relating to the destination of the import request.

" - }, - "Email":{ - "type":"string", - "max":254, - "min":3, - "pattern":"^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$", - "sensitive":true - }, - "ErrorCode":{ - "type":"string", - "enum":[ - "ACCESS_DENIED", - "INVALID_URL_ERROR", - "INVALID_IMPORT_OPTIONS_ERROR", - "INVALID_TABLE_ID_ERROR", - "INVALID_TABLE_COLUMN_ID_ERROR", - "TABLE_NOT_FOUND_ERROR", - "FILE_EMPTY_ERROR", - "INVALID_FILE_TYPE_ERROR", - "FILE_PARSING_ERROR", - "FILE_SIZE_LIMIT_ERROR", - "FILE_NOT_FOUND_ERROR", - "UNKNOWN_ERROR", - "RESOURCE_NOT_FOUND_ERROR", - "SYSTEM_LIMIT_ERROR" - ] - }, - "ErrorMessage":{"type":"string"}, - "Fact":{ - "type":"string", - "max":8192, - "min":0, - "pattern":"[\\s\\S]*", - "sensitive":true - }, - "FactList":{ - "type":"list", - "member":{"shape":"Fact"}, - "max":220, - "min":0 - }, - "FailedBatchItem":{ - "type":"structure", - "required":[ - "id", - "errorMessage" - ], - "members":{ - "id":{ - "shape":"BatchItemId", - "documentation":"

The id of the batch item that failed. This is the batch item id for the BatchCreateTableRows and BatchUpsertTableRows operations and the row id for the BatchUpdateTableRows and BatchDeleteTableRows operations.

" - }, - "errorMessage":{ - "shape":"BatchErrorMessage", - "documentation":"

The error message that indicates why the batch item failed.

" - } - }, - "documentation":"

A single item in a batch that failed to perform the intended action because of an error preventing it from succeeding.

" - }, - "FailedBatchItems":{ - "type":"list", - "member":{"shape":"FailedBatchItem"}, - "max":100, - "min":0 - }, - "Filter":{ - "type":"structure", - "required":["formula"], - "members":{ - "formula":{ - "shape":"Formula", - "documentation":"

A formula representing a filter function that returns zero or more matching rows from a table. Valid formulas in this field return a list of rows from a table. The most common ways of writing a formula to return a list of rows are to use the FindRow() or Filter() functions. Any other formula that returns zero or more rows is also acceptable. For example, you can use a formula that points to a cell that contains a filter function.

" - }, - "contextRowId":{ - "shape":"RowId", - "documentation":"

The optional contextRowId attribute can be used to specify the row id of the context row if the filter formula contains unqualified references to table columns and needs a context row to evaluate them successfully.

" - } - }, - "documentation":"

An object that represents a filter formula along with the id of the context row under which the filter function needs to evaluate.

" - }, - "Format":{ - "type":"string", - "enum":[ - "AUTO", - "NUMBER", - "CURRENCY", - "DATE", - "TIME", - "DATE_TIME", - "PERCENTAGE", - "TEXT", - "ACCOUNTING", - "CONTACT", - "ROWLINK", - "ROWSET" - ] - }, - "FormattedValue":{ - "type":"string", - "max":8192, - "min":0, - "pattern":"[\\s\\S]*" - }, - "FormattedValuesList":{ - "type":"list", - "member":{"shape":"FormattedValue"}, - "max":220, - "min":0 - }, - "Formula":{ - "type":"string", - "max":8192, - "min":0, - "pattern":"^=.*", - "sensitive":true - }, - "GetScreenDataRequest":{ - "type":"structure", - "required":[ - "workbookId", - "appId", - "screenId" - ], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook that contains the screen.

" - }, - "appId":{ - "shape":"ResourceId", - "documentation":"

The ID of the app that contains the screen.

" - }, - "screenId":{ - "shape":"ResourceId", - "documentation":"

The ID of the screen.

" - }, - "variables":{ - "shape":"VariableValueMap", - "documentation":"

Variables are optional and are needed only if the screen requires them to render correctly. Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen.

" - }, - "maxResults":{ - "shape":"MaxResults", - "documentation":"

The number of results to be returned on a single page. Specify a number between 1 and 100. The maximum value is 100.

This parameter is optional. If you don't specify this parameter, the default page size is 100.

" - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

" - } - } - }, - "GetScreenDataResult":{ - "type":"structure", - "required":[ - "results", - "workbookCursor" - ], - "members":{ - "results":{ - "shape":"ResultSetMap", - "documentation":"

A map of all the rows on the screen keyed by block name.

" - }, - "workbookCursor":{ - "shape":"WorkbookCursor", - "documentation":"

Indicates the cursor of the workbook at which the data returned by this workbook is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the query has been loaded.

" - } - } - }, - "HasHeaderRow":{"type":"boolean"}, - "IgnoreEmptyRows":{"type":"boolean"}, - "ImportColumnMap":{ - "type":"map", - "key":{"shape":"ResourceId"}, - "value":{"shape":"SourceDataColumnProperties"}, - "max":100 - }, - "ImportDataCharacterEncoding":{ - "type":"string", - "enum":[ - "UTF-8", - "US-ASCII", - "ISO-8859-1", - "UTF-16BE", - "UTF-16LE", - "UTF-16" - ] - }, - "ImportDataSource":{ - "type":"structure", - "required":["dataSourceConfig"], - "members":{ - "dataSourceConfig":{ - "shape":"ImportDataSourceConfig", - "documentation":"

The configuration parameters for the data source of the import

" - } - }, - "documentation":"

An object that has details about the source of the data that was submitted for import.

" - }, - "ImportDataSourceConfig":{ - "type":"structure", - "members":{ - "dataSourceUrl":{ - "shape":"SecureURL", - "documentation":"

The URL from which source data will be downloaded for the import request.

" - } - }, - "documentation":"

An object that contains the configuration parameters for the data source of an import request.

" - }, - "ImportJobSubmitter":{ - "type":"structure", - "members":{ - "email":{ - "shape":"Email", - "documentation":"

The email id of the submitter of the import job, if available.

" - }, - "userArn":{ - "shape":"AwsUserArn", - "documentation":"

The AWS user ARN of the submitter of the import job, if available.

" - } - }, - "documentation":"

An object that contains the attributes of the submitter of the import job.

" - }, - "ImportOptions":{ - "type":"structure", - "members":{ - "destinationOptions":{ - "shape":"DestinationOptions", - "documentation":"

Options relating to the destination of the import request.

" - }, - "delimitedTextOptions":{ - "shape":"DelimitedTextImportOptions", - "documentation":"

Options relating to parsing delimited text. Required if dataFormat is DELIMITED_TEXT.

" - } - }, - "documentation":"

An object that contains the options specified by the sumitter of the import request.

" - }, - "ImportSourceDataFormat":{ - "type":"string", - "enum":["DELIMITED_TEXT"] - }, - "InternalServerException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

There were unexpected errors from the server.

", - "error":{"httpStatusCode":500}, - "exception":true - }, - "InvokeScreenAutomationRequest":{ - "type":"structure", - "required":[ - "workbookId", - "appId", - "screenId", - "screenAutomationId" - ], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook that contains the screen automation.

", - "location":"uri", - "locationName":"workbookId" - }, - "appId":{ - "shape":"ResourceId", - "documentation":"

The ID of the app that contains the screen automation.

", - "location":"uri", - "locationName":"appId" - }, - "screenId":{ - "shape":"ResourceId", - "documentation":"

The ID of the screen that contains the screen automation.

", - "location":"uri", - "locationName":"screenId" - }, - "screenAutomationId":{ - "shape":"ResourceId", - "documentation":"

The ID of the automation action to be performed.

", - "location":"uri", - "locationName":"automationId" - }, - "variables":{ - "shape":"VariableValueMap", - "documentation":"

Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen. Any variables defined in a screen are required to be passed in the call.

" - }, - "rowId":{ - "shape":"RowId", - "documentation":"

The row ID for the automation if the automation is defined inside a block with source or list.

" - }, - "clientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The request token for performing the automation action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will return the response of the previous call rather than performing the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" - } - } - }, - "InvokeScreenAutomationResult":{ - "type":"structure", - "required":["workbookCursor"], - "members":{ - "workbookCursor":{ - "shape":"WorkbookCursor", - "documentation":"

The updated workbook cursor after performing the automation action.

" - } - } - }, - "JobId":{ - "type":"string", - "max":100, - "min":1, - "pattern":"^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$" - }, - "ListTableColumnsRequest":{ - "type":"structure", - "required":[ - "workbookId", - "tableId" - ], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook that contains the table whose columns are being retrieved.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"workbookId" - }, - "tableId":{ - "shape":"ResourceId", - "documentation":"

The ID of the table whose columns are being retrieved.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"tableId" - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

", - "location":"querystring", - "locationName":"nextToken" - } - } - }, - "ListTableColumnsResult":{ - "type":"structure", - "required":["tableColumns"], - "members":{ - "tableColumns":{ - "shape":"TableColumns", - "documentation":"

The list of columns in the table.

" - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

" - }, - "workbookCursor":{ - "shape":"WorkbookCursor", - "documentation":"

Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" - } - } - }, - "ListTableRowsRequest":{ - "type":"structure", - "required":[ - "workbookId", - "tableId" - ], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook that contains the table whose rows are being retrieved.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"workbookId" - }, - "tableId":{ - "shape":"ResourceId", - "documentation":"

The ID of the table whose rows are being retrieved.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"tableId" - }, - "rowIds":{ - "shape":"RowIdList", - "documentation":"

This parameter is optional. If one or more row ids are specified in this list, then only the specified row ids are returned in the result. If no row ids are specified here, then all the rows in the table are returned.

" - }, - "maxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of rows to return in each page of the results.

" - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

" - } - } - }, - "ListTableRowsResult":{ - "type":"structure", - "required":[ - "columnIds", - "rows", - "workbookCursor" - ], - "members":{ - "columnIds":{ - "shape":"ResourceIds", - "documentation":"

The list of columns in the table whose row data is returned in the result.

" - }, - "rows":{ - "shape":"TableRows", - "documentation":"

The list of rows in the table. Note that this result is paginated, so this list contains a maximum of 100 rows.

" - }, - "rowIdsNotFound":{ - "shape":"RowIdList", - "documentation":"

The list of row ids included in the request that were not found in the table.

" - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

" - }, - "workbookCursor":{ - "shape":"WorkbookCursor", - "documentation":"

Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" - } - } - }, - "ListTablesRequest":{ - "type":"structure", - "required":["workbookId"], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook whose tables are being retrieved.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"workbookId" - }, - "maxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of tables to return in each page of the results.

", - "location":"querystring", - "locationName":"maxResults" - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

", - "location":"querystring", - "locationName":"nextToken" - } - } - }, - "ListTablesResult":{ - "type":"structure", - "required":["tables"], - "members":{ - "tables":{ - "shape":"Tables", - "documentation":"

The list of tables in the workbook.

" - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

" - }, - "workbookCursor":{ - "shape":"WorkbookCursor", - "documentation":"

Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" - } - } - }, - "ListTagsForResourceRequest":{ - "type":"structure", - "required":["resourceArn"], - "members":{ - "resourceArn":{ - "shape":"ResourceArn", - "documentation":"

The resource's Amazon Resource Name (ARN).

", - "location":"uri", - "locationName":"resourceArn" - } - } - }, - "ListTagsForResourceResult":{ - "type":"structure", - "members":{ - "tags":{ - "shape":"TagsMap", - "documentation":"

The resource's tags.

" - } - } - }, - "MaxResults":{ - "type":"integer", - "box":true, - "max":100, - "min":1 - }, - "Name":{ - "type":"string", - "sensitive":true - }, - "PaginationToken":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"^(?!\\s*$).+" - }, - "QueryTableRowsRequest":{ - "type":"structure", - "required":[ - "workbookId", - "tableId", - "filterFormula" - ], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook whose table rows are being queried.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"workbookId" - }, - "tableId":{ - "shape":"ResourceId", - "documentation":"

The ID of the table whose rows are being queried.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"tableId" - }, - "filterFormula":{ - "shape":"Filter", - "documentation":"

An object that represents a filter formula along with the id of the context row under which the filter function needs to evaluate.

" - }, - "maxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of rows to return in each page of the results.

" - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

" - } - } - }, - "QueryTableRowsResult":{ - "type":"structure", - "required":[ - "columnIds", - "rows", - "workbookCursor" - ], - "members":{ - "columnIds":{ - "shape":"ResourceIds", - "documentation":"

The list of columns in the table whose row data is returned in the result.

" - }, - "rows":{ - "shape":"TableRows", - "documentation":"

The list of rows in the table that match the query filter.

" - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

" - }, - "workbookCursor":{ - "shape":"WorkbookCursor", - "documentation":"

Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" - } - } - }, - "RawValue":{ - "type":"string", - "max":32767, - "min":0, - "pattern":"[\\s\\S]*" - }, - "RequestTimeoutException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

The request timed out.

", - "error":{ - "httpStatusCode":504, - "senderFault":true - }, - "exception":true - }, - "ResourceArn":{ - "type":"string", - "max":256, - "min":1, - "pattern":"^arn:aws:honeycode:.+:[0-9]{12}:.+:.+$" - }, - "ResourceId":{ - "type":"string", - "max":36, - "min":36, - "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" - }, - "ResourceIds":{ - "type":"list", - "member":{"shape":"ResourceId"}, - "max":100, - "min":1 - }, - "ResourceNotFoundException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

A Workbook, Table, App, Screen or Screen Automation was not found with the given ID.

", - "error":{"httpStatusCode":404}, - "exception":true - }, - "ResultHeader":{ - "type":"list", - "member":{"shape":"ColumnMetadata"} - }, - "ResultRow":{ - "type":"structure", - "required":["dataItems"], - "members":{ - "rowId":{ - "shape":"RowId", - "documentation":"

The ID for a particular row.

" - }, - "dataItems":{ - "shape":"DataItems", - "documentation":"

List of all the data cells in a row.

" - } - }, - "documentation":"

A single row in the ResultSet.

" - }, - "ResultRows":{ - "type":"list", - "member":{"shape":"ResultRow"} - }, - "ResultSet":{ - "type":"structure", - "required":[ - "headers", - "rows" - ], - "members":{ - "headers":{ - "shape":"ResultHeader", - "documentation":"

List of headers for all the data cells in the block. The header identifies the name and default format of the data cell. Data cells appear in the same order in all rows as defined in the header. The names and formats are not repeated in the rows. If a particular row does not have a value for a data cell, a blank value is used.

For example, a task list that displays the task name, due date and assigned person might have headers [ { \"name\": \"Task Name\"}, {\"name\": \"Due Date\", \"format\": \"DATE\"}, {\"name\": \"Assigned\", \"format\": \"CONTACT\"} ]. Every row in the result will have the task name as the first item, due date as the second item and assigned person as the third item. If a particular task does not have a due date, that row will still have a blank value in the second element and the assigned person will still be in the third element.

" - }, - "rows":{ - "shape":"ResultRows", - "documentation":"

List of rows returned by the request. Each row has a row Id and a list of data cells in that row. The data cells will be present in the same order as they are defined in the header.

" - } - }, - "documentation":"

ResultSet contains the results of the request for a single block or list defined on the screen.

" - }, - "ResultSetMap":{ - "type":"map", - "key":{"shape":"Name"}, - "value":{"shape":"ResultSet"} - }, - "RowDataInput":{ - "type":"map", - "key":{"shape":"ResourceId"}, - "value":{"shape":"CellInput"}, - "max":100, - "min":1 - }, - "RowId":{ - "type":"string", - "max":77, - "min":77, - "pattern":"row:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\\/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" - }, - "RowIdList":{ - "type":"list", - "member":{"shape":"RowId"}, - "max":100, - "min":1 - }, - "SecureURL":{ - "type":"string", - "max":8000, - "min":1, - "pattern":"^https:\\/\\/[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$", - "sensitive":true - }, - "ServiceQuotaExceededException":{ - "type":"structure", - "required":["message"], - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

The request caused service quota to be breached.

", - "error":{"httpStatusCode":402}, - "exception":true - }, - "ServiceUnavailableException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

Remote service is unreachable.

", - "error":{"httpStatusCode":503}, - "exception":true - }, - "SourceDataColumnIndex":{ - "type":"integer", - "min":1 - }, - "SourceDataColumnProperties":{ - "type":"structure", - "members":{ - "columnIndex":{ - "shape":"SourceDataColumnIndex", - "documentation":"

The index of the column in the input file.

" - } - }, - "documentation":"

An object that contains the properties for importing data to a specific column in a table.

" - }, - "StartTableDataImportJobRequest":{ - "type":"structure", - "required":[ - "workbookId", - "dataSource", - "dataFormat", - "destinationTableId", - "importOptions", - "clientRequestToken" - ], - "members":{ - "workbookId":{ - "shape":"ResourceId", - "documentation":"

The ID of the workbook where the rows are being imported.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"workbookId" - }, - "dataSource":{ - "shape":"ImportDataSource", - "documentation":"

The source of the data that is being imported. The size of source must be no larger than 100 MB. Source must have no more than 100,000 cells and no more than 1,000 rows.

" - }, - "dataFormat":{ - "shape":"ImportSourceDataFormat", - "documentation":"

The format of the data that is being imported. Currently the only option supported is \"DELIMITED_TEXT\".

" - }, - "destinationTableId":{ - "shape":"ResourceId", - "documentation":"

The ID of the table where the rows are being imported.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", - "location":"uri", - "locationName":"tableId" - }, - "importOptions":{ - "shape":"ImportOptions", - "documentation":"

The options for customizing this import request.

" - }, - "clientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" - } - } - }, - "StartTableDataImportJobResult":{ - "type":"structure", - "required":[ - "jobId", - "jobStatus" - ], - "members":{ - "jobId":{ - "shape":"JobId", - "documentation":"

The id that is assigned to this import job. Future requests to find out the status of this import job need to send this id in the appropriate parameter in the request.

" - }, - "jobStatus":{ - "shape":"TableDataImportJobStatus", - "documentation":"

The status of the import job immediately after submitting the request.

" - } - } - }, - "Table":{ - "type":"structure", - "members":{ - "tableId":{ - "shape":"ResourceId", - "documentation":"

The id of the table.

" - }, - "tableName":{ - "shape":"TableName", - "documentation":"

The name of the table.

" - } - }, - "documentation":"

An object representing the properties of a table in a workbook.

" - }, - "TableColumn":{ - "type":"structure", - "members":{ - "tableColumnId":{ - "shape":"ResourceId", - "documentation":"

The id of the column in the table.

" - }, - "tableColumnName":{ - "shape":"TableColumnName", - "documentation":"

The name of the column in the table.

" - }, - "format":{ - "shape":"Format", - "documentation":"

The column level format that is applied in the table. An empty value in this field means that the column format is the default value 'AUTO'.

" - } - }, - "documentation":"

An object that contains attributes about a single column in a table

" - }, - "TableColumnName":{"type":"string"}, - "TableColumns":{ - "type":"list", - "member":{"shape":"TableColumn"} - }, - "TableDataImportJobMessage":{"type":"string"}, - "TableDataImportJobMetadata":{ - "type":"structure", - "required":[ - "submitter", - "submitTime", - "importOptions", - "dataSource" - ], - "members":{ - "submitter":{ - "shape":"ImportJobSubmitter", - "documentation":"

Details about the submitter of the import request.

" - }, - "submitTime":{ - "shape":"TimestampInMillis", - "documentation":"

The timestamp when the job was submitted for import.

" - }, - "importOptions":{ - "shape":"ImportOptions", - "documentation":"

The options that was specified at the time of submitting the import request.

" - }, - "dataSource":{ - "shape":"ImportDataSource", - "documentation":"

The source of the data that was submitted for import.

" - } - }, - "documentation":"

The metadata associated with the table data import job that was submitted.

" - }, - "TableDataImportJobStatus":{ - "type":"string", - "enum":[ - "SUBMITTED", - "IN_PROGRESS", - "COMPLETED", - "FAILED" - ] - }, - "TableName":{"type":"string"}, - "TableRow":{ - "type":"structure", - "required":[ - "rowId", - "cells" - ], - "members":{ - "rowId":{ - "shape":"RowId", - "documentation":"

The id of the row in the table.

" - }, - "cells":{ - "shape":"Cells", - "documentation":"

A list of cells in the table row. The cells appear in the same order as the columns of the table.

" - } - }, - "documentation":"

An object that contains attributes about a single row in a table

" - }, - "TableRows":{ - "type":"list", - "member":{"shape":"TableRow"} - }, - "Tables":{ - "type":"list", - "member":{"shape":"Table"} - }, - "TagKey":{ - "type":"string", - "max":100, - "min":1, - "pattern":"^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$" - }, - "TagKeysList":{ - "type":"list", - "member":{"shape":"TagKey"}, - "documentation":"

A list of tag keys

", - "max":100 - }, - "TagResourceRequest":{ - "type":"structure", - "required":[ - "resourceArn", - "tags" - ], - "members":{ - "resourceArn":{ - "shape":"ResourceArn", - "documentation":"

The resource's Amazon Resource Name (ARN).

", - "location":"uri", - "locationName":"resourceArn" - }, - "tags":{ - "shape":"TagsMap", - "documentation":"

A list of tags to apply to the resource.

" - } - } - }, - "TagResourceResult":{ - "type":"structure", - "members":{ - } - }, - "TagValue":{ - "type":"string", - "max":100, - "min":1, - "pattern":"^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$" - }, - "TagsMap":{ - "type":"map", - "key":{"shape":"TagKey"}, - "value":{"shape":"TagValue"}, - "documentation":"

A string to string map representing tags

", - "max":100 - }, - "ThrottlingException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

Tps(transactions per second) rate reached.

", - "error":{"httpStatusCode":429}, - "exception":true - }, - "TimestampInMillis":{"type":"timestamp"}, - "UntagResourceRequest":{ - "type":"structure", - "required":[ - "resourceArn", - "tagKeys" - ], - "members":{ - "resourceArn":{ - "shape":"ResourceArn", - "documentation":"

The resource's Amazon Resource Name (ARN).

", - "location":"uri", - "locationName":"resourceArn" - }, - "tagKeys":{ - "shape":"TagKeysList", - "documentation":"

A list of tag keys to remove from the resource.

", - "location":"querystring", - "locationName":"tagKeys" - } - } - }, - "UntagResourceResult":{ - "type":"structure", - "members":{ - } - }, - "UpdateRowData":{ - "type":"structure", - "required":[ - "rowId", - "cellsToUpdate" - ], - "members":{ - "rowId":{ - "shape":"RowId", - "documentation":"

The id of the row that needs to be updated.

" - }, - "cellsToUpdate":{ - "shape":"RowDataInput", - "documentation":"

A map representing the cells to update in the given row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell.

" - } - }, - "documentation":"

Data needed to create a single row in a table as part of the BatchCreateTableRows request.

" - }, - "UpdateRowDataList":{ - "type":"list", - "member":{"shape":"UpdateRowData"}, - "max":100, - "min":1 - }, - "UpsertAction":{ - "type":"string", - "enum":[ - "UPDATED", - "APPENDED" - ] - }, - "UpsertRowData":{ - "type":"structure", - "required":[ - "batchItemId", - "filter", - "cellsToUpdate" - ], - "members":{ - "batchItemId":{ - "shape":"BatchItemId", - "documentation":"

An external identifier that represents a single item in the request that is being upserted as part of the BatchUpsertTableRows request. This can be any string that you can use to identify the item in the request. The BatchUpsertTableRows API puts the batch item id in the results to allow you to link data in the request to data in the results.

" - }, - "filter":{ - "shape":"Filter", - "documentation":"

The filter formula to use to find existing matching rows to update. The formula needs to return zero or more rows. If the formula returns 0 rows, then a new row will be appended in the target table. If the formula returns one or more rows, then the returned rows will be updated.

Note that the filter formula needs to return rows from the target table for the upsert operation to succeed. If the filter formula has a syntax error or it doesn't evaluate to zero or more rows in the target table for any one item in the input list, then the entire BatchUpsertTableRows request fails and no updates are made to the table.

" - }, - "cellsToUpdate":{ - "shape":"RowDataInput", - "documentation":"

A map representing the cells to update for the matching rows or an appended row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell.

" - } - }, - "documentation":"

Data needed to upsert rows in a table as part of a single item in the BatchUpsertTableRows request.

" - }, - "UpsertRowDataList":{ - "type":"list", - "member":{"shape":"UpsertRowData"} - }, - "UpsertRowsResult":{ - "type":"structure", - "required":[ - "rowIds", - "upsertAction" - ], - "members":{ - "rowIds":{ - "shape":"RowIdList", - "documentation":"

The list of row ids that were changed as part of an upsert row operation. If the upsert resulted in an update, this list could potentially contain multiple rows that matched the filter and hence got updated. If the upsert resulted in an append, this list would only have the single row that was appended.

" - }, - "upsertAction":{ - "shape":"UpsertAction", - "documentation":"

The result of the upsert action.

" - } - }, - "documentation":"

An object that represents the result of a single upsert row request.

" - }, - "UpsertRowsResultMap":{ - "type":"map", - "key":{"shape":"BatchItemId"}, - "value":{"shape":"UpsertRowsResult"} - }, - "ValidationException":{ - "type":"structure", - "required":["message"], - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

Request is invalid. The message in the response contains details on why the request is invalid.

", - "error":{"httpStatusCode":400}, - "exception":true - }, - "VariableName":{ - "type":"string", - "max":255, - "min":1, - "pattern":"^(?!\\s*$).+", - "sensitive":true - }, - "VariableValue":{ - "type":"structure", - "required":["rawValue"], - "members":{ - "rawValue":{ - "shape":"RawValue", - "documentation":"

Raw value of the variable.

" - } - }, - "documentation":"

The input variables to the app to be used by the InvokeScreenAutomation action request.

", - "sensitive":true - }, - "VariableValueMap":{ - "type":"map", - "key":{"shape":"VariableName"}, - "value":{"shape":"VariableValue"}, - "sensitive":true - }, - "WorkbookCursor":{"type":"long"} - }, - "documentation":"

Amazon Honeycode is a fully managed service that allows you to quickly build mobile and web apps for teams—without programming. Build Honeycode apps for managing almost anything, like projects, customers, operations, approvals, resources, and even your team.

" -} diff --git a/botocore/data/iam/2010-05-08/service-2.json b/botocore/data/iam/2010-05-08/service-2.json index b234fae85f..ae76ce0d36 100644 --- a/botocore/data/iam/2010-05-08/service-2.json +++ b/botocore/data/iam/2010-05-08/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"iam", "globalEndpoint":"iam.amazonaws.com", "protocol":"query", + "protocols":["query"], "serviceAbbreviation":"IAM", "serviceFullName":"AWS Identity and Access Management", "serviceId":"IAM", "signatureVersion":"v4", "uid":"iam-2010-05-08", - "xmlNamespace":"https://iam.amazonaws.com/doc/2010-05-08/" + "xmlNamespace":"https://iam.amazonaws.com/doc/2010-05-08/", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddClientIDToOpenIDConnectProvider":{ @@ -235,7 +237,7 @@ {"shape":"ServiceFailureException"}, {"shape":"OpenIdIdpCommunicationErrorException"} ], - "documentation":"

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.

If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.

When you create the IAM OIDC provider, you specify the following:

  • The URL of the OIDC identity provider (IdP) to trust

  • A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider

  • A list of tags that are attached to the specified IAM OIDC provider

  • A list of thumbprints of one or more server certificates that the IdP uses

You get all of this information from the OIDC IdP you want to use to access Amazon Web Services.

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation. These OIDC IdPs include Auth0, GitHub, GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint.

The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

" + "documentation":"

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.

If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.

When you create the IAM OIDC provider, you specify the following:

  • The URL of the OIDC identity provider (IdP) to trust

  • A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider

  • A list of tags that are attached to the specified IAM OIDC provider

  • A list of thumbprints of one or more server certificates that the IdP uses

You get all of this information from the OIDC IdP you want to use to access Amazon Web Services.

Amazon Web Services secures communication with OIDC identity providers (IdPs) using our library of trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS) endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed by one of these trusted CAs, only then we secure communication using the thumbprints set in the IdP's configuration.

The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

" }, "CreatePolicy":{ "name":"CreatePolicy", @@ -1334,7 +1336,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the account alias associated with the Amazon Web Services account (Note: you can have only one). For information about using an Amazon Web Services account alias, see Creating, deleting, and listing an Amazon Web Services account alias in the Amazon Web Services Sign-In User Guide.

" + "documentation":"

Lists the account alias associated with the Amazon Web Services account (Note: you can have only one). For information about using an Amazon Web Services account alias, see Creating, deleting, and listing an Amazon Web Services account alias in the IAM User Guide.

" }, "ListAttachedGroupPolicies":{ "name":"ListAttachedGroupPolicies", @@ -2424,7 +2426,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.

The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)

Typically, you need to update a thumbprint only when the identity provider certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation. These OIDC IdPs include Auth0, GitHub, GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint.

Trust for the OIDC provider is derived from the provider certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly privileged users.

" + "documentation":"

Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.

The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)

Typically, you need to update a thumbprint only when the identity provider certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.

Amazon Web Services secures communication with OIDC identity providers (IdPs) using our library of trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS) endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed by one of these trusted CAs, only then we secure communication using the thumbprints set in the IdP's configuration.

Trust for the OIDC provider is derived from the provider certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly privileged users.

" }, "UpdateRole":{ "name":"UpdateRole", @@ -2698,7 +2700,6 @@ "AccessKeyLastUsed":{ "type":"structure", "required":[ - "LastUsedDate", "ServiceName", "Region" ], diff --git a/botocore/data/imagebuilder/2019-12-02/service-2.json b/botocore/data/imagebuilder/2019-12-02/service-2.json index 3e67a6dd61..b3751ef166 100644 --- a/botocore/data/imagebuilder/2019-12-02/service-2.json +++ b/botocore/data/imagebuilder/2019-12-02/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"imagebuilder", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"imagebuilder", "serviceFullName":"EC2 Image Builder", "serviceId":"imagebuilder", @@ -1705,7 +1706,7 @@ }, "ComponentBuildVersionArn":{ "type":"string", - "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" }, "ComponentConfiguration":{ "type":"structure", @@ -1947,11 +1948,11 @@ }, "ComponentVersionArn":{ "type":"string", - "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" }, "ComponentVersionArnOrBuildVersionArn":{ "type":"string", - "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" }, "ComponentVersionList":{ "type":"list", @@ -4043,11 +4044,11 @@ }, "ImageBuildVersionArn":{ "type":"string", - "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" }, "ImageBuilderArn":{ "type":"string", - "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$" + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$" }, "ImagePackage":{ "type":"structure", @@ -4622,11 +4623,11 @@ }, "ImageVersionArn":{ "type":"string", - "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" }, "ImageVersionArnOrBuildVersionArn":{ "type":"string", - "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" }, "ImageVersionList":{ "type":"list", @@ -7641,7 +7642,7 @@ "WorkflowBuildVersionArn":{ "type":"string", "max":1024, - "pattern":"^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" + "pattern":"^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" }, "WorkflowConfiguration":{ "type":"structure", @@ -7753,7 +7754,7 @@ }, "WorkflowNameArn":{ "type":"string", - "pattern":"^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/x\\.x\\.x$" + "pattern":"^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/x\\.x\\.x$" }, "WorkflowParameter":{ "type":"structure", @@ -8096,11 +8097,11 @@ }, "WorkflowVersionArn":{ "type":"string", - "pattern":"^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" + "pattern":"^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" }, "WorkflowVersionArnOrBuildVersionArn":{ "type":"string", - "pattern":"^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" + "pattern":"^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" }, "WorkflowVersionList":{ "type":"list", @@ -8108,7 +8109,7 @@ }, "WorkflowWildcardVersionArn":{ "type":"string", - "pattern":"^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:[0-9]+|x)\\.(?:[0-9]+|x)\\.(?:[0-9]+|x)$" + "pattern":"^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:[0-9]+|x)\\.(?:[0-9]+|x)\\.(?:[0-9]+|x)$" } }, "documentation":"

EC2 Image Builder is a fully managed Amazon Web Services service that makes it easier to automate the creation, management, and deployment of customized, secure, and up-to-date \"golden\" server images that are pre-installed and pre-configured with software and settings to meet specific IT standards.

" diff --git a/botocore/data/inspector2/2020-06-08/service-2.json b/botocore/data/inspector2/2020-06-08/service-2.json index 9c28347bc9..6e1551274f 100644 --- a/botocore/data/inspector2/2020-06-08/service-2.json +++ b/botocore/data/inspector2/2020-06-08/service-2.json @@ -7849,7 +7849,7 @@ }, "TagValueList":{ "type":"list", - "member":{"shape":"String"}, + "member":{"shape":"TargetResourceTagsValue"}, "max":5, "min":1 }, @@ -7870,11 +7870,22 @@ }, "TargetResourceTags":{ "type":"map", - "key":{"shape":"NonEmptyString"}, + "key":{"shape":"TargetResourceTagsKey"}, "value":{"shape":"TagValueList"}, "max":5, "min":1 }, + "TargetResourceTagsKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}\\p{Z}\\p{N}_.:/=\\-@]*$" + }, + "TargetResourceTagsValue":{ + "type":"string", + "max":256, + "min":1 + }, "TargetStatusFilterList":{ "type":"list", "member":{"shape":"CisTargetStatusFilter"}, diff --git a/botocore/data/iotfleetwise/2021-06-17/service-2.json b/botocore/data/iotfleetwise/2021-06-17/service-2.json index 26960e3e92..655b9f2727 100644 --- a/botocore/data/iotfleetwise/2021-06-17/service-2.json +++ b/botocore/data/iotfleetwise/2021-06-17/service-2.json @@ -1419,7 +1419,7 @@ }, "signalCatalogArn":{ "shape":"arn", - "documentation":"

(Optional) The Amazon Resource Name (ARN) of the signal catalog to associate with the campaign.

" + "documentation":"

The Amazon Resource Name (ARN) of the signal catalog to associate with the campaign.

" }, "targetArn":{ "shape":"arn", @@ -3251,6 +3251,14 @@ "shape":"arn", "documentation":"

The Amazon Resource Name (ARN) of a vehicle model (model manifest). You can use this optional parameter to list only the vehicles created from a certain vehicle model.

" }, + "attributeNames":{ + "shape":"attributeNamesList", + "documentation":"

The fully qualified names of the attributes. For example, the fully qualified name of an attribute might be Vehicle.Body.Engine.Type.

" + }, + "attributeValues":{ + "shape":"attributeValuesList", + "documentation":"

Static information about a vehicle attribute value in string format. For example:

\"1.3 L R2\"

" + }, "nextToken":{ "shape":"nextToken", "documentation":"

A pagination token for the next set of results.

If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next set of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.

" @@ -4884,7 +4892,19 @@ "min":1, "pattern":"[a-zA-Z0-9_.-]+" }, + "attributeNamesList":{ + "type":"list", + "member":{"shape":"attributeName"}, + "max":5, + "min":1 + }, "attributeValue":{"type":"string"}, + "attributeValuesList":{ + "type":"list", + "member":{"shape":"attributeValue"}, + "max":5, + "min":1 + }, "attributesMap":{ "type":"map", "key":{"shape":"attributeName"}, diff --git a/botocore/data/iotsitewise/2019-12-02/service-2.json b/botocore/data/iotsitewise/2019-12-02/service-2.json index efc72d8ec2..725291f7ee 100644 --- a/botocore/data/iotsitewise/2019-12-02/service-2.json +++ b/botocore/data/iotsitewise/2019-12-02/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"iotsitewise", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS IoT SiteWise", "serviceId":"IoTSiteWise", "signatureVersion":"v4", "signingName":"iotsitewise", - "uid":"iotsitewise-2019-12-02" + "uid":"iotsitewise-2019-12-02", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateAssets":{ @@ -438,6 +440,7 @@ "errors":[ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictingOperationException"}, {"shape":"InternalFailureException"}, {"shape":"ThrottlingException"} ], @@ -1398,7 +1401,7 @@ {"shape":"ThrottlingException"}, {"shape":"ConflictingOperationException"} ], - "documentation":"

Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the IoT SiteWise User Guide.

This operation overwrites the existing model with the provided model. To avoid deleting your asset model's properties or hierarchies, you must include their IDs and definitions in the updated asset model payload. For more information, see DescribeAssetModel.

If you remove a property from an asset model, IoT SiteWise deletes all previous data for that property. If you remove a hierarchy definition from an asset model, IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the type or data type of an existing property.

", + "documentation":"

Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the IoT SiteWise User Guide.

If you remove a property from an asset model, IoT SiteWise deletes all previous data for that property. You can’t change the type or data type of an existing property.

To replace an existing asset model property with a new one with the same name, do the following:

  1. Submit an UpdateAssetModel request with the entire existing property removed.

  2. Submit a second UpdateAssetModel request that includes the new property. The new asset property will have the same name as the previous one and IoT SiteWise will generate a new unique id.

", "endpoint":{"hostPrefix":"api."} }, "UpdateAssetModelCompositeModel":{ @@ -2017,7 +2020,7 @@ "members":{ "id":{ "shape":"ID", - "documentation":"

The ID of the the composite model that this summary describes..

" + "documentation":"

The ID of the composite model that this summary describes..

" }, "externalId":{ "shape":"ExternalId", @@ -2025,15 +2028,15 @@ }, "name":{ "shape":"Name", - "documentation":"

The name of the the composite model that this summary describes..

" + "documentation":"

The name of the composite model that this summary describes..

" }, "type":{ "shape":"Name", - "documentation":"

The type of asset model.

  • ASSET_MODEL – (default) An asset model that you can use to create assets. Can't be included as a component in another asset model.

  • COMPONENT_MODEL – A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model.

" + "documentation":"

The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY.

" }, "description":{ "shape":"Description", - "documentation":"

The description of the the composite model that this summary describes..

" + "documentation":"

The description of the composite model that this summary describes..

" }, "path":{ "shape":"AssetModelCompositeModelPath", @@ -3469,7 +3472,7 @@ }, "CapabilityConfiguration":{ "type":"string", - "max":104857600, + "max":10000000, "min":1 }, "CapabilityNamespace":{ @@ -3484,7 +3487,8 @@ "IN_SYNC", "OUT_OF_SYNC", "SYNC_FAILED", - "UNKNOWN" + "UNKNOWN", + "NOT_APPLICABLE" ] }, "ClientToken":{ @@ -3690,7 +3694,8 @@ "CoreDeviceThingName":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":"^[a-zA-Z0-9:_-]+$" }, "CreateAccessPolicyRequest":{ "type":"structure", @@ -3772,7 +3777,7 @@ }, "assetModelCompositeModelName":{ "shape":"Name", - "documentation":"

A unique, friendly name for the composite model.

" + "documentation":"

A unique name for the composite model.

" }, "assetModelCompositeModelType":{ "shape":"Name", @@ -3785,11 +3790,11 @@ }, "composedAssetModelId":{ "shape":"CustomID", - "documentation":"

The ID of a composite model on this asset.

" + "documentation":"

The ID of a component model which is reused to create this composite model.

" }, "assetModelCompositeModelProperties":{ "shape":"AssetModelPropertyDefinitions", - "documentation":"

The property definitions of the composite model. For more information, see <LINK>.

You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide.

" + "documentation":"

The property definitions of the composite model. For more information, see Inline custom composite models in the IoT SiteWise User Guide.

You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide.

" } } }, @@ -3818,7 +3823,7 @@ "members":{ "assetModelName":{ "shape":"Name", - "documentation":"

A unique, friendly name for the asset model.

" + "documentation":"

A unique name for the asset model.

" }, "assetModelDescription":{ "shape":"Description", @@ -3834,7 +3839,7 @@ }, "assetModelCompositeModels":{ "shape":"AssetModelCompositeModelDefinitions", - "documentation":"

The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model.

When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see <LINK>.

" + "documentation":"

The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model.

When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see Creating custom composite models (Components) in the IoT SiteWise User Guide.

" }, "clientToken":{ "shape":"ClientToken", @@ -4063,8 +4068,8 @@ ], "members":{ "gatewayName":{ - "shape":"Name", - "documentation":"

A unique, friendly name for the gateway.

" + "shape":"GatewayName", + "documentation":"

A unique name for the gateway.

" }, "gatewayPlatform":{ "shape":"GatewayPlatform", @@ -5276,7 +5281,7 @@ "documentation":"

The ID of the gateway device.

" }, "gatewayName":{ - "shape":"Name", + "shape":"GatewayName", "documentation":"

The name of the gateway.

" }, "gatewayArn":{ @@ -5938,6 +5943,12 @@ }, "documentation":"

Contains a summary of a gateway capability configuration.

" }, + "GatewayName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, "GatewayPlatform":{ "type":"structure", "members":{ @@ -5948,6 +5959,10 @@ "greengrassV2":{ "shape":"GreengrassV2", "documentation":"

A gateway that runs on IoT Greengrass V2.

" + }, + "siemensIE":{ + "shape":"SiemensIE", + "documentation":"

A SiteWise Edge gateway that runs on a Siemens Industrial Edge Device.

" } }, "documentation":"

Contains a gateway's platform information.

" @@ -5970,8 +5985,8 @@ "documentation":"

The ID of the gateway device.

" }, "gatewayName":{ - "shape":"Name", - "documentation":"

The name of the asset.

" + "shape":"GatewayName", + "documentation":"

The name of the gateway.

" }, "gatewayPlatform":{"shape":"GatewayPlatform"}, "gatewayCapabilitySummaries":{ @@ -6303,7 +6318,7 @@ "members":{ "groupArn":{ "shape":"ARN", - "documentation":"

The ARN of the Greengrass group. For more information about how to find a group's ARN, see ListGroups and GetGroup in the IoT Greengrass API Reference.

" + "documentation":"

The ARN of the Greengrass group. For more information about how to find a group's ARN, see ListGroups and GetGroup in the IoT Greengrass V1 API Reference.

" } }, "documentation":"

Contains details for a gateway that runs on IoT Greengrass. To create a gateway that runs on IoT Greengrass, you must add the IoT SiteWise connector to a Greengrass group and deploy it. Your Greengrass group must also have permissions to upload data to IoT SiteWise. For more information, see Ingesting data using a gateway in the IoT SiteWise User Guide.

" @@ -6513,6 +6528,12 @@ "error":{"httpStatusCode":400}, "exception":true }, + "IotCoreThingName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9:_-]+$" + }, "JobConfiguration":{ "type":"structure", "required":["fileFormat"], @@ -8075,6 +8096,17 @@ "exception":true, "fault":true }, + "SiemensIE":{ + "type":"structure", + "required":["iotCoreThingName"], + "members":{ + "iotCoreThingName":{ + "shape":"IotCoreThingName", + "documentation":"

The name of the IoT Thing for your SiteWise Edge gateway.

" + } + }, + "documentation":"

Contains details for a SiteWise Edge gateway that runs on a Siemens Industrial Edge Device.

" + }, "StorageType":{ "type":"string", "enum":[ @@ -8424,7 +8456,7 @@ }, "assetModelCompositeModelName":{ "shape":"Name", - "documentation":"

A unique, friendly name for the composite model.

" + "documentation":"

A unique name for the composite model.

" }, "clientToken":{ "shape":"ClientToken", @@ -8433,7 +8465,7 @@ }, "assetModelCompositeModelProperties":{ "shape":"AssetModelProperties", - "documentation":"

The property definitions of the composite model. For more information, see <LINK>.

You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide.

" + "documentation":"

The property definitions of the composite model. For more information, see Inline custom composite models in the IoT SiteWise User Guide.

You can specify up to 200 properties per composite model. For more information, see Quotas in the IoT SiteWise User Guide.

" } } }, @@ -8466,7 +8498,7 @@ }, "assetModelName":{ "shape":"Name", - "documentation":"

A unique, friendly name for the asset model.

" + "documentation":"

A unique name for the asset model.

" }, "assetModelDescription":{ "shape":"Description", @@ -8482,7 +8514,7 @@ }, "assetModelCompositeModels":{ "shape":"AssetModelCompositeModels", - "documentation":"

The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model.

When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see <LINK>.

" + "documentation":"

The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model.

When creating custom composite models, you need to use CreateAssetModelCompositeModel. For more information, see Creating custom composite models (Components) in the IoT SiteWise User Guide.

" }, "clientToken":{ "shape":"ClientToken", @@ -8678,8 +8710,8 @@ "locationName":"gatewayId" }, "gatewayName":{ - "shape":"Name", - "documentation":"

A unique, friendly name for the gateway.

" + "shape":"GatewayName", + "documentation":"

A unique name for the gateway.

" } } }, @@ -8832,7 +8864,7 @@ }, "integerValue":{ "shape":"PropertyValueIntegerValue", - "documentation":"

Asset property data of type integer (number that's greater than or equal to zero).

" + "documentation":"

Asset property data of type integer (whole number).

" }, "doubleValue":{ "shape":"PropertyValueDoubleValue", diff --git a/botocore/data/iottwinmaker/2021-11-29/service-2.json b/botocore/data/iottwinmaker/2021-11-29/service-2.json index de4d97bc0d..4b2aa7d943 100644 --- a/botocore/data/iottwinmaker/2021-11-29/service-2.json +++ b/botocore/data/iottwinmaker/2021-11-29/service-2.json @@ -3,8 +3,8 @@ "metadata":{ "apiVersion":"2021-11-29", "endpointPrefix":"iottwinmaker", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS IoT TwinMaker", "serviceId":"IoTTwinMaker", "signatureVersion":"v4", @@ -4096,7 +4096,8 @@ "enum":[ "UPDATE", "DELETE", - "CREATE" + "CREATE", + "RESET_VALUE" ] }, "PropertyValue":{ diff --git a/botocore/data/iotwireless/2020-11-22/service-2.json b/botocore/data/iotwireless/2020-11-22/service-2.json index 840f1342f1..43ad9c1702 100644 --- a/botocore/data/iotwireless/2020-11-22/service-2.json +++ b/botocore/data/iotwireless/2020-11-22/service-2.json @@ -4,11 +4,13 @@ "apiVersion":"2020-11-22", "endpointPrefix":"api.iotwireless", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS IoT Wireless", "serviceId":"IoT Wireless", "signatureVersion":"v4", "signingName":"iotwireless", - "uid":"iotwireless-2020-11-22" + "uid":"iotwireless-2020-11-22", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateAwsAccountWithPartnerAccount":{ @@ -3936,7 +3938,8 @@ "MissingFrag", "MemoryError", "MICError", - "Successful" + "Successful", + "Device_exist_in_conflict_fuota_task" ] }, "FuotaTask":{ @@ -6630,6 +6633,8 @@ "enum":[ "DeviceRSSI", "DeviceSNR", + "DeviceRoamingRSSI", + "DeviceRoamingSNR", "DeviceUplinkCount", "DeviceDownlinkCount", "DeviceUplinkLostCount", diff --git a/botocore/data/ivs-realtime/2020-07-14/paginators-1.json b/botocore/data/ivs-realtime/2020-07-14/paginators-1.json index ea142457a6..c0d2208c7c 100644 --- a/botocore/data/ivs-realtime/2020-07-14/paginators-1.json +++ b/botocore/data/ivs-realtime/2020-07-14/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListPublicKeys": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "publicKeys" + } + } } diff --git a/botocore/data/ivs-realtime/2020-07-14/service-2.json b/botocore/data/ivs-realtime/2020-07-14/service-2.json index 66e5b9e9ab..76bbe212f3 100644 --- a/botocore/data/ivs-realtime/2020-07-14/service-2.json +++ b/botocore/data/ivs-realtime/2020-07-14/service-2.json @@ -3,8 +3,8 @@ "metadata":{ "apiVersion":"2020-07-14", "endpointPrefix":"ivsrealtime", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"ivsrealtime", "serviceFullName":"Amazon Interactive Video Service RealTime", "serviceId":"IVS RealTime", @@ -107,6 +107,24 @@ ], "documentation":"

Deletes an EncoderConfiguration resource. Ensures that no Compositions are using this template; otherwise, returns an error.

" }, + "DeletePublicKey":{ + "name":"DeletePublicKey", + "http":{ + "method":"POST", + "requestUri":"/DeletePublicKey", + "responseCode":200 + }, + "input":{"shape":"DeletePublicKeyRequest"}, + "output":{"shape":"DeletePublicKeyResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

Deletes the specified public key used to sign stage participant tokens. This invalidates future participant tokens generated using the key pair’s private key.

" + }, "DeleteStage":{ "name":"DeleteStage", "http":{ @@ -215,6 +233,22 @@ ], "documentation":"

Gets information about the specified participant token.

" }, + "GetPublicKey":{ + "name":"GetPublicKey", + "http":{ + "method":"POST", + "requestUri":"/GetPublicKey", + "responseCode":200 + }, + "input":{"shape":"GetPublicKeyRequest"}, + "output":{"shape":"GetPublicKeyResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Gets information for the specified public key.

" + }, "GetStage":{ "name":"GetStage", "http":{ @@ -266,6 +300,24 @@ ], "documentation":"

Gets the storage configuration for the specified ARN.

" }, + "ImportPublicKey":{ + "name":"ImportPublicKey", + "http":{ + "method":"POST", + "requestUri":"/ImportPublicKey", + "responseCode":200 + }, + "input":{"shape":"ImportPublicKeyRequest"}, + "output":{"shape":"ImportPublicKeyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

Import a public key to be used for signing stage participant tokens.

" + }, "ListCompositions":{ "name":"ListCompositions", "http":{ @@ -332,6 +384,21 @@ ], "documentation":"

Lists all participants in a specified stage session.

" }, + "ListPublicKeys":{ + "name":"ListPublicKeys", + "http":{ + "method":"POST", + "requestUri":"/ListPublicKeys", + "responseCode":200 + }, + "input":{"shape":"ListPublicKeysRequest"}, + "output":{"shape":"ListPublicKeysResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Gets summary information about all public keys in your account, in the AWS region where the API request is processed.

" + }, "ListStageSessions":{ "name":"ListStageSessions", "http":{ @@ -509,7 +576,28 @@ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" + }, + "AutoParticipantRecordingConfiguration":{ + "type":"structure", + "required":["storageConfigurationArn"], + "members":{ + "storageConfigurationArn":{ + "shape":"AutoParticipantRecordingStorageConfigurationArn", + "documentation":"

ARN of the StorageConfiguration resource to use for individual participant recording. Default: \"\" (empty string, no storage configuration is specified). Individual participant recording cannot be started unless a storage configuration is specified, when a Stage is created or updated.

" + }, + "mediaTypes":{ + "shape":"ParticipantRecordingMediaTypeList", + "documentation":"

Types of media to be recorded. Default: AUDIO_VIDEO.

" + } + }, + "documentation":"

Object specifying a configuration for individual participant recording.

" + }, + "AutoParticipantRecordingStorageConfigurationArn":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:storage-configuration/[a-zA-Z0-9-]+$" }, "Bitrate":{ "type":"integer", @@ -521,7 +609,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:channel/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:channel/[a-zA-Z0-9-]+" }, "ChannelDestinationConfiguration":{ "type":"structure", @@ -542,43 +630,43 @@ "type":"structure", "required":[ "arn", - "destinations", - "layout", "stageArn", - "state" + "state", + "layout", + "destinations" ], "members":{ "arn":{ "shape":"CompositionArn", "documentation":"

ARN of the Composition resource.

" }, - "destinations":{ - "shape":"DestinationList", - "documentation":"

Array of Destination objects. A Composition can contain either one destination (channel or s3) or two (one channel and one s3).

" - }, - "endTime":{ - "shape":"Time", - "documentation":"

UTC time of the Composition end. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, - "layout":{ - "shape":"LayoutConfiguration", - "documentation":"

Layout object to configure composition parameters.

" - }, "stageArn":{ "shape":"StageArn", "documentation":"

ARN of the stage used as input

" }, - "startTime":{ - "shape":"Time", - "documentation":"

UTC time of the Composition start. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, "state":{ "shape":"CompositionState", "documentation":"

State of the Composition.

" }, + "layout":{ + "shape":"LayoutConfiguration", + "documentation":"

Layout object to configure composition parameters.

" + }, + "destinations":{ + "shape":"DestinationList", + "documentation":"

Array of Destination objects. A Composition can contain either one destination (channel or s3) or two (one channel and one s3).

" + }, "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" + }, + "startTime":{ + "shape":"Time", + "documentation":"

UTC time of the Composition start. This is an ISO 8601 timestamp; note that this is returned as a string.

" + }, + "endTime":{ + "shape":"Time", + "documentation":"

UTC time of the Composition end. This is an ISO 8601 timestamp; note that this is returned as a string.

" } }, "documentation":"

Object specifying a Composition resource.

" @@ -587,13 +675,13 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:composition/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:composition/[a-zA-Z0-9-]+" }, "CompositionClientToken":{ "type":"string", "max":64, "min":1, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" }, "CompositionState":{ "type":"string", @@ -609,8 +697,8 @@ "type":"structure", "required":[ "arn", - "destinations", "stageArn", + "destinations", "state" ], "members":{ @@ -618,21 +706,13 @@ "shape":"CompositionArn", "documentation":"

ARN of the Composition resource.

" }, - "destinations":{ - "shape":"DestinationSummaryList", - "documentation":"

Array of Destination objects.

" - }, - "endTime":{ - "shape":"Time", - "documentation":"

UTC time of the Composition end. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, "stageArn":{ "shape":"StageArn", "documentation":"

ARN of the attached stage.

" }, - "startTime":{ - "shape":"Time", - "documentation":"

UTC time of the Composition start. This is an ISO 8601 timestamp; note that this is returned as a string.

" + "destinations":{ + "shape":"DestinationSummaryList", + "documentation":"

Array of Destination objects.

" }, "state":{ "shape":"CompositionState", @@ -641,6 +721,14 @@ "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" + }, + "startTime":{ + "shape":"Time", + "documentation":"

UTC time of the Composition start. This is an ISO 8601 timestamp; note that this is returned as a string.

" + }, + "endTime":{ + "shape":"Time", + "documentation":"

UTC time of the Composition end. This is an ISO 8601 timestamp; note that this is returned as a string.

" } }, "documentation":"

Summary information about a Composition.

" @@ -671,13 +759,13 @@ "shape":"EncoderConfigurationName", "documentation":"

Optional name to identify the resource.

" }, - "tags":{ - "shape":"Tags", - "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" - }, "video":{ "shape":"Video", "documentation":"

Video configuration. Default: video resolution 1280x720, bitrate 2500 kbps, 30 fps.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" } } }, @@ -694,25 +782,25 @@ "type":"structure", "required":["stageArn"], "members":{ - "attributes":{ - "shape":"ParticipantTokenAttributes", - "documentation":"

Application-provided attributes to encode into the token and attach to a stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" - }, - "capabilities":{ - "shape":"ParticipantTokenCapabilities", - "documentation":"

Set of capabilities that the user is allowed to perform in the stage. Default: PUBLISH, SUBSCRIBE.

" + "stageArn":{ + "shape":"StageArn", + "documentation":"

ARN of the stage to which this token is scoped.

" }, "duration":{ "shape":"ParticipantTokenDurationMinutes", "documentation":"

Duration (in minutes), after which the token expires. Default: 720 (12 hours).

" }, - "stageArn":{ - "shape":"StageArn", - "documentation":"

ARN of the stage to which this token is scoped.

" - }, "userId":{ "shape":"ParticipantTokenUserId", "documentation":"

Name that can be specified to help identify the token. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + }, + "attributes":{ + "shape":"ParticipantTokenAttributes", + "documentation":"

Application-provided attributes to encode into the token and attach to a stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + }, + "capabilities":{ + "shape":"ParticipantTokenCapabilities", + "documentation":"

Set of capabilities that the user is allowed to perform in the stage. Default: PUBLISH, SUBSCRIBE.

" } } }, @@ -739,19 +827,23 @@ "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" + }, + "autoParticipantRecordingConfiguration":{ + "shape":"AutoParticipantRecordingConfiguration", + "documentation":"

Configuration object for individual participant recording, to attach to the new stage.

" } } }, "CreateStageResponse":{ "type":"structure", "members":{ - "participantTokens":{ - "shape":"ParticipantTokenList", - "documentation":"

Participant tokens attached to the stage. These correspond to the participants in the request.

" - }, "stage":{ "shape":"Stage", "documentation":"

The stage that was created.

" + }, + "participantTokens":{ + "shape":"ParticipantTokenList", + "documentation":"

Participant tokens attached to the stage. These correspond to the participants in the request.

" } } }, @@ -797,6 +889,21 @@ "members":{ } }, + "DeletePublicKeyRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"PublicKeyArn", + "documentation":"

ARN of the public key to be deleted.

" + } + } + }, + "DeletePublicKeyResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteStageRequest":{ "type":"structure", "required":["arn"], @@ -830,34 +937,34 @@ "Destination":{ "type":"structure", "required":[ - "configuration", "id", - "state" + "state", + "configuration" ], "members":{ - "configuration":{ - "shape":"DestinationConfiguration", - "documentation":"

Configuration used to create this destination.

" - }, - "detail":{ - "shape":"DestinationDetail", - "documentation":"

Optional details regarding the status of the destination.

" - }, - "endTime":{ - "shape":"Time", - "documentation":"

UTC time of the destination end. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, "id":{ "shape":"String", "documentation":"

Unique identifier for this destination, assigned by IVS.

" }, + "state":{ + "shape":"DestinationState", + "documentation":"

State of the Composition Destination.

" + }, "startTime":{ "shape":"Time", "documentation":"

UTC time of the destination start. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, - "state":{ - "shape":"DestinationState", - "documentation":"

State of the Composition Destination.

" + "endTime":{ + "shape":"Time", + "documentation":"

UTC time of the destination end. This is an ISO 8601 timestamp; note that this is returned as a string.

" + }, + "configuration":{ + "shape":"DestinationConfiguration", + "documentation":"

Configuration used to create this destination.

" + }, + "detail":{ + "shape":"DestinationDetail", + "documentation":"

Optional details regarding the status of the destination.

" } }, "documentation":"

Object specifying the status of a Destination.

" @@ -865,14 +972,14 @@ "DestinationConfiguration":{ "type":"structure", "members":{ - "channel":{ - "shape":"ChannelDestinationConfiguration", - "documentation":"

An IVS channel to be used for broadcasting, for server-side composition. Either a channel or an s3 must be specified.

" - }, "name":{ "shape":"DestinationConfigurationName", "documentation":"

Name that can be specified to help identify the destination.

" }, + "channel":{ + "shape":"ChannelDestinationConfiguration", + "documentation":"

An IVS channel to be used for broadcasting, for server-side composition. Either a channel or an s3 must be specified.

" + }, "s3":{ "shape":"S3DestinationConfiguration", "documentation":"

An S3 storage configuration to be used for recording video data. Either a channel or an s3 must be specified.

" @@ -890,7 +997,7 @@ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" }, "DestinationDetail":{ "type":"structure", @@ -926,21 +1033,21 @@ "state" ], "members":{ - "endTime":{ - "shape":"Time", - "documentation":"

UTC time of the destination end. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, "id":{ "shape":"String", "documentation":"

Unique identifier for this destination, assigned by IVS.

" }, + "state":{ + "shape":"DestinationState", + "documentation":"

State of the Composition Destination.

" + }, "startTime":{ "shape":"Time", "documentation":"

UTC time of the destination start. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, - "state":{ - "shape":"DestinationState", - "documentation":"

State of the Composition Destination.

" + "endTime":{ + "shape":"Time", + "documentation":"

UTC time of the destination end. This is an ISO 8601 timestamp; note that this is returned as a string.

" } }, "documentation":"

Summary information about a Destination.

" @@ -959,10 +1066,14 @@ "DisconnectParticipantRequest":{ "type":"structure", "required":[ - "participantId", - "stageArn" + "stageArn", + "participantId" ], "members":{ + "stageArn":{ + "shape":"StageArn", + "documentation":"

ARN of the stage to which the participant is attached.

" + }, "participantId":{ "shape":"ParticipantTokenId", "documentation":"

Identifier of the participant to be disconnected. This is assigned by IVS and returned by CreateParticipantToken.

" @@ -970,10 +1081,6 @@ "reason":{ "shape":"DisconnectParticipantReason", "documentation":"

Description of why this participant is being disconnected.

" - }, - "stageArn":{ - "shape":"StageArn", - "documentation":"

ARN of the stage to which the participant is attached.

" } } }, @@ -994,13 +1101,13 @@ "shape":"EncoderConfigurationName", "documentation":"

Optional name to identify the resource.

" }, - "tags":{ - "shape":"Tags", - "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" - }, "video":{ "shape":"Video", "documentation":"

Video configuration. Default: video resolution 1280x720, bitrate 2500 kbps, 30 fps

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" } }, "documentation":"

Settings for transcoding.

" @@ -1009,7 +1116,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:encoder-configuration/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:encoder-configuration/[a-zA-Z0-9-]+" }, "EncoderConfigurationArnList":{ "type":"list", @@ -1021,7 +1128,7 @@ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" }, "EncoderConfigurationSummary":{ "type":"structure", @@ -1049,14 +1156,6 @@ "Event":{ "type":"structure", "members":{ - "errorCode":{ - "shape":"EventErrorCode", - "documentation":"

If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null. INSUFFICIENT_CAPABILITIES indicates that the participant tried to take an action that the participant’s token is not allowed to do. For more information about participant capabilities, see the capabilities field in CreateParticipantToken. QUOTA_EXCEEDED indicates that the number of participants who want to publish/subscribe to a stage exceeds the quota; for more information, see Service Quotas. PUBLISHER_NOT_FOUND indicates that the participant tried to subscribe to a publisher that doesn’t exist.

" - }, - "eventTime":{ - "shape":"Time", - "documentation":"

ISO 8601 timestamp (returned as a string) for when the event occurred.

" - }, "name":{ "shape":"EventName", "documentation":"

The name of the event.

" @@ -1065,9 +1164,17 @@ "shape":"ParticipantId", "documentation":"

Unique identifier for the participant who triggered the event. This is assigned by IVS.

" }, + "eventTime":{ + "shape":"Time", + "documentation":"

ISO 8601 timestamp (returned as a string) for when the event occurred.

" + }, "remoteParticipantId":{ "shape":"ParticipantId", "documentation":"

Unique identifier for the remote participant. For a subscribe event, this is the publisher. For a publish or join event, this is null. This is assigned by IVS.

" + }, + "errorCode":{ + "shape":"EventErrorCode", + "documentation":"

If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null. INSUFFICIENT_CAPABILITIES indicates that the participant tried to take an action that the participant’s token is not allowed to do. For more information about participant capabilities, see the capabilities field in CreateParticipantToken. QUOTA_EXCEEDED indicates that the number of participants who want to publish/subscribe to a stage exceeds the quota; for more information, see Service Quotas. PUBLISHER_NOT_FOUND indicates that the participant tried to subscribe to a publisher that doesn’t exist.

" } }, "documentation":"

An occurrence during a stage session.

" @@ -1145,22 +1252,22 @@ "GetParticipantRequest":{ "type":"structure", "required":[ - "participantId", + "stageArn", "sessionId", - "stageArn" + "participantId" ], "members":{ - "participantId":{ - "shape":"ParticipantId", - "documentation":"

Unique identifier for the participant. This is assigned by IVS and returned by CreateParticipantToken.

" - }, - "sessionId":{ - "shape":"StageSessionId", - "documentation":"

ID of a session within the stage.

" - }, "stageArn":{ "shape":"StageArn", "documentation":"

Stage ARN.

" + }, + "sessionId":{ + "shape":"StageSessionId", + "documentation":"

ID of a session within the stage.

" + }, + "participantId":{ + "shape":"ParticipantId", + "documentation":"

Unique identifier for the participant. This is assigned by IVS and returned by CreateParticipantToken.

" } } }, @@ -1173,6 +1280,25 @@ } } }, + "GetPublicKeyRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"PublicKeyArn", + "documentation":"

ARN of the public key for which the information is to be retrieved.

" + } + } + }, + "GetPublicKeyResponse":{ + "type":"structure", + "members":{ + "publicKey":{ + "shape":"PublicKey", + "documentation":"

The public key that is returned.

" + } + } + }, "GetStageRequest":{ "type":"structure", "required":["arn"], @@ -1195,17 +1321,17 @@ "GetStageSessionRequest":{ "type":"structure", "required":[ - "sessionId", - "stageArn" + "stageArn", + "sessionId" ], "members":{ - "sessionId":{ - "shape":"StageSessionId", - "documentation":"

ID of a session within the stage.

" - }, "stageArn":{ "shape":"StageArn", "documentation":"

ARN of the stage for which the information is to be retrieved.

" + }, + "sessionId":{ + "shape":"StageSessionId", + "documentation":"

ID of a session within the stage.

" } } }, @@ -1242,11 +1368,7 @@ "members":{ "featuredParticipantAttribute":{ "shape":"AttributeKey", - "documentation":"

This attribute name identifies the featured slot. A participant with this attribute set to \"true\" (as a string value) in ParticipantTokenConfiguration is placed in the featured slot.

" - }, - "gridGap":{ - "shape":"GridGap", - "documentation":"

Specifies the spacing between participant tiles in pixels. Default: 2.

" + "documentation":"

This attribute name identifies the featured slot. A participant with this attribute set to \"true\" (as a string value) in ParticipantTokenConfiguration is placed in the featured slot. Default: \"\" (no featured participant).

" }, "omitStoppedVideo":{ "shape":"OmitStoppedVideo", @@ -1254,11 +1376,15 @@ }, "videoAspectRatio":{ "shape":"VideoAspectRatio", - "documentation":"

Sets the non-featured participant display mode. Default: VIDEO.

" + "documentation":"

Sets the non-featured participant display mode, to control the aspect ratio of video tiles. VIDEO is 16:9, SQUARE is 1:1, and PORTRAIT is 3:4. Default: VIDEO.

" }, "videoFillMode":{ "shape":"VideoFillMode", - "documentation":"

Defines how video fits within the participant tile. When not set, videoFillMode defaults to COVER fill mode for participants in the grid and to CONTAIN fill mode for featured participants.

" + "documentation":"

Defines how video content fits within the participant tile: FILL (stretched), COVER (cropped), or CONTAIN (letterboxed). When not set, videoFillMode defaults to COVER fill mode for participants in the grid and to CONTAIN fill mode for featured participants.

" + }, + "gridGap":{ + "shape":"GridGap", + "documentation":"

Specifies the spacing between participant tiles in pixels. Default: 2.

" } }, "documentation":"

Configuration information specific to Grid layout, for server-side composition. See \"Layouts\" in Server-Side Composition.

" @@ -1273,6 +1399,33 @@ "max":1920, "min":1 }, + "ImportPublicKeyRequest":{ + "type":"structure", + "required":["publicKeyMaterial"], + "members":{ + "publicKeyMaterial":{ + "shape":"PublicKeyMaterial", + "documentation":"

The content of the public key to be imported.

" + }, + "name":{ + "shape":"PublicKeyName", + "documentation":"

Name of the public key to be imported.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" + } + } + }, + "ImportPublicKeyResponse":{ + "type":"structure", + "members":{ + "publicKey":{ + "shape":"PublicKey", + "documentation":"

The public key that was imported.

" + } + } + }, "InternalServerException":{ "type":"structure", "members":{ @@ -1303,21 +1456,21 @@ "ListCompositionsRequest":{ "type":"structure", "members":{ - "filterByEncoderConfigurationArn":{ - "shape":"EncoderConfigurationArn", - "documentation":"

Filters the Composition list to match the specified EncoderConfiguration attached to at least one of its output.

" - }, "filterByStageArn":{ "shape":"StageArn", "documentation":"

Filters the Composition list to match the specified Stage ARN.

" }, - "maxResults":{ - "shape":"MaxCompositionResults", - "documentation":"

Maximum number of results to return. Default: 100.

" + "filterByEncoderConfigurationArn":{ + "shape":"EncoderConfigurationArn", + "documentation":"

Filters the Composition list to match the specified EncoderConfiguration attached to at least one of its output.

" }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first Composition to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxCompositionResults", + "documentation":"

Maximum number of results to return. Default: 100.

" } } }, @@ -1338,13 +1491,13 @@ "ListEncoderConfigurationsRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxEncoderConfigurationResults", - "documentation":"

Maximum number of results to return. Default: 100.

" - }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first encoder configuration to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxEncoderConfigurationResults", + "documentation":"

Maximum number of results to return. Default: 100.

" } } }, @@ -1365,30 +1518,30 @@ "ListParticipantEventsRequest":{ "type":"structure", "required":[ - "participantId", + "stageArn", "sessionId", - "stageArn" + "participantId" ], "members":{ - "maxResults":{ - "shape":"MaxParticipantEventResults", - "documentation":"

Maximum number of results to return. Default: 50.

" + "stageArn":{ + "shape":"StageArn", + "documentation":"

Stage ARN.

" }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

The first participant event to retrieve. This is used for pagination; see the nextToken response field.

" + "sessionId":{ + "shape":"StageSessionId", + "documentation":"

ID of a session within the stage.

" }, "participantId":{ "shape":"ParticipantId", "documentation":"

Unique identifier for this participant. This is assigned by IVS and returned by CreateParticipantToken.

" }, - "sessionId":{ - "shape":"StageSessionId", - "documentation":"

ID of a session within the stage.

" + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The first participant event to retrieve. This is used for pagination; see the nextToken response field.

" }, - "stageArn":{ - "shape":"StageArn", - "documentation":"

Stage ARN.

" + "maxResults":{ + "shape":"MaxParticipantEventResults", + "documentation":"

Maximum number of results to return. Default: 50.

" } } }, @@ -1409,37 +1562,41 @@ "ListParticipantsRequest":{ "type":"structure", "required":[ - "sessionId", - "stageArn" + "stageArn", + "sessionId" ], "members":{ - "filterByPublished":{ - "shape":"Published", - "documentation":"

Filters the response list to only show participants who published during the stage session. Only one of filterByUserId, filterByPublished, or filterByState can be provided per request.

" + "stageArn":{ + "shape":"StageArn", + "documentation":"

Stage ARN.

" }, - "filterByState":{ - "shape":"ParticipantState", - "documentation":"

Filters the response list to only show participants in the specified state. Only one of filterByUserId, filterByPublished, or filterByState can be provided per request.

" + "sessionId":{ + "shape":"StageSessionId", + "documentation":"

ID of the session within the stage.

" }, "filterByUserId":{ "shape":"UserId", - "documentation":"

Filters the response list to match the specified user ID. Only one of filterByUserId, filterByPublished, or filterByState can be provided per request. A userId is a customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems.

" + "documentation":"

Filters the response list to match the specified user ID. Only one of filterByUserId, filterByPublished, filterByState, or filterByRecordingState can be provided per request. A userId is a customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems.

" }, - "maxResults":{ - "shape":"MaxParticipantResults", - "documentation":"

Maximum number of results to return. Default: 50.

" + "filterByPublished":{ + "shape":"Published", + "documentation":"

Filters the response list to only show participants who published during the stage session. Only one of filterByUserId, filterByPublished, filterByState, or filterByRecordingState can be provided per request.

" + }, + "filterByState":{ + "shape":"ParticipantState", + "documentation":"

Filters the response list to only show participants in the specified state. Only one of filterByUserId, filterByPublished, filterByState, or filterByRecordingState can be provided per request.

" }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first participant to retrieve. This is used for pagination; see the nextToken response field.

" }, - "sessionId":{ - "shape":"StageSessionId", - "documentation":"

ID of the session within the stage.

" + "maxResults":{ + "shape":"MaxParticipantResults", + "documentation":"

Maximum number of results to return. Default: 50.

" }, - "stageArn":{ - "shape":"StageArn", - "documentation":"

Stage ARN.

" + "filterByRecordingState":{ + "shape":"ParticipantRecordingFilterByRecordingState", + "documentation":"

Filters the response list to only show participants with the specified recording state. Only one of filterByUserId, filterByPublished, filterByState, or filterByRecordingState can be provided per request.

" } } }, @@ -1447,31 +1604,58 @@ "type":"structure", "required":["participants"], "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

If there are more participants than maxResults, use nextToken in the request to get the next set.

" - }, "participants":{ "shape":"ParticipantList", "documentation":"

List of the matching participants (summary information only).

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more participants than maxResults, use nextToken in the request to get the next set.

" } } }, - "ListStageSessionsRequest":{ + "ListPublicKeysRequest":{ "type":"structure", - "required":["stageArn"], "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The first public key to retrieve. This is used for pagination; see the nextToken response field.

" + }, "maxResults":{ - "shape":"MaxStageSessionResults", + "shape":"MaxPublicKeyResults", "documentation":"

Maximum number of results to return. Default: 50.

" + } + } + }, + "ListPublicKeysResponse":{ + "type":"structure", + "required":["publicKeys"], + "members":{ + "publicKeys":{ + "shape":"PublicKeyList", + "documentation":"

List of the matching public keys (summary information only).

" }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The first stage session to retrieve. This is used for pagination; see the nextToken response field.

" - }, + "documentation":"

If there are more public keys than maxResults, use nextToken in the request to get the next set.

" + } + } + }, + "ListStageSessionsRequest":{ + "type":"structure", + "required":["stageArn"], + "members":{ "stageArn":{ "shape":"StageArn", "documentation":"

Stage ARN.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The first stage session to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxStageSessionResults", + "documentation":"

Maximum number of results to return. Default: 50.

" } } }, @@ -1479,26 +1663,26 @@ "type":"structure", "required":["stageSessions"], "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

If there are more stage sessions than maxResults, use nextToken in the request to get the next set.

" - }, "stageSessions":{ "shape":"StageSessionList", "documentation":"

List of matching stage sessions.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more stage sessions than maxResults, use nextToken in the request to get the next set.

" } } }, "ListStagesRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxStageResults", - "documentation":"

Maximum number of results to return. Default: 50.

" - }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first stage to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxStageResults", + "documentation":"

Maximum number of results to return. Default: 50.

" } } }, @@ -1506,26 +1690,26 @@ "type":"structure", "required":["stages"], "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

If there are more stages than maxResults, use nextToken in the request to get the next set.

" - }, "stages":{ "shape":"StageSummaryList", "documentation":"

List of the matching stages (summary information only).

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more stages than maxResults, use nextToken in the request to get the next set.

" } } }, "ListStorageConfigurationsRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxStorageConfigurationResults", - "documentation":"

Maximum number of storage configurations to return. Default: your service quota or 100, whichever is smaller.

" - }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first storage configuration to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxStorageConfigurationResults", + "documentation":"

Maximum number of storage configurations to return. Default: your service quota or 100, whichever is smaller.

" } } }, @@ -1533,13 +1717,13 @@ "type":"structure", "required":["storageConfigurations"], "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

If there are more storage configurations than maxResults, use nextToken in the request to get the next set.

" - }, "storageConfigurations":{ "shape":"StorageConfigurationSummaryList", "documentation":"

List of the matching storage configurations.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more storage configurations than maxResults, use nextToken in the request to get the next set.

" } } }, @@ -1589,6 +1773,12 @@ "max":100, "min":1 }, + "MaxPublicKeyResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, "MaxStageResults":{ "type":"integer", "box":true, @@ -1612,27 +1802,35 @@ "type":"string", "max":1024, "min":0, - "pattern":"^[a-zA-Z0-9+/=_-]*$" + "pattern":"[a-zA-Z0-9+/=_-]*" }, "Participant":{ "type":"structure", "members":{ - "attributes":{ - "shape":"ParticipantAttributes", - "documentation":"

Application-provided attributes to encode into the token and attach to a stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + "participantId":{ + "shape":"ParticipantId", + "documentation":"

Unique identifier for this participant, assigned by IVS.

" }, - "browserName":{ - "shape":"ParticipantClientAttribute", - "documentation":"

The participant’s browser.

" + "userId":{ + "shape":"UserId", + "documentation":"

Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" }, - "browserVersion":{ - "shape":"ParticipantClientAttribute", - "documentation":"

The participant’s browser version.

" + "state":{ + "shape":"ParticipantState", + "documentation":"

Whether the participant is connected to or disconnected from the stage.

" }, "firstJoinTime":{ "shape":"Time", "documentation":"

ISO 8601 timestamp (returned as a string) when the participant first joined the stage session.

" }, + "attributes":{ + "shape":"ParticipantAttributes", + "documentation":"

Application-provided attributes to encode into the token and attach to a stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + }, + "published":{ + "shape":"Published", + "documentation":"

Whether the participant ever published to the stage session.

" + }, "ispName":{ "shape":"ParticipantClientAttribute", "documentation":"

The participant’s Internet Service Provider.

" @@ -1645,25 +1843,29 @@ "shape":"ParticipantClientAttribute", "documentation":"

The participant’s operating system version.

" }, - "participantId":{ - "shape":"ParticipantId", - "documentation":"

Unique identifier for this participant, assigned by IVS.

" + "browserName":{ + "shape":"ParticipantClientAttribute", + "documentation":"

The participant’s browser.

" }, - "published":{ - "shape":"Published", - "documentation":"

Whether the participant ever published to the stage session.

" + "browserVersion":{ + "shape":"ParticipantClientAttribute", + "documentation":"

The participant’s browser version.

" }, "sdkVersion":{ "shape":"ParticipantClientAttribute", "documentation":"

The participant’s SDK version.

" }, - "state":{ - "shape":"ParticipantState", - "documentation":"

Whether the participant is connected to or disconnected from the stage.

" + "recordingS3BucketName":{ + "shape":"ParticipantRecordingS3BucketName", + "documentation":"

Name of the S3 bucket to where the participant is being recorded, if individual participant recording is enabled, or \"\" (empty string), if recording is not enabled.

" }, - "userId":{ - "shape":"UserId", - "documentation":"

Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + "recordingS3Prefix":{ + "shape":"ParticipantRecordingS3Prefix", + "documentation":"

S3 prefix of the S3 bucket where the participant is being recorded, if individual participant recording is enabled, or \"\" (empty string), if recording is not enabled.

" + }, + "recordingState":{ + "shape":"ParticipantRecordingState", + "documentation":"

The participant’s recording state.

" } }, "documentation":"

Object describing a participant that has joined a stage.

" @@ -1677,18 +1879,64 @@ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_.,:;\\s]*$" + "pattern":"[a-zA-Z0-9-_.,:;\\s]*" }, "ParticipantId":{ "type":"string", "max":64, "min":0, - "pattern":"^[a-zA-Z0-9-]*$" + "pattern":"[a-zA-Z0-9-]*" }, "ParticipantList":{ "type":"list", "member":{"shape":"ParticipantSummary"} }, + "ParticipantRecordingFilterByRecordingState":{ + "type":"string", + "enum":[ + "STARTING", + "ACTIVE", + "STOPPING", + "STOPPED", + "FAILED" + ] + }, + "ParticipantRecordingMediaType":{ + "type":"string", + "enum":[ + "AUDIO_VIDEO", + "AUDIO_ONLY" + ] + }, + "ParticipantRecordingMediaTypeList":{ + "type":"list", + "member":{"shape":"ParticipantRecordingMediaType"}, + "max":1, + "min":0 + }, + "ParticipantRecordingS3BucketName":{ + "type":"string", + "max":63, + "min":0, + "pattern":"[a-z0-9-.]*" + }, + "ParticipantRecordingS3Prefix":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[a-zA-Z0-9-]*" + }, + "ParticipantRecordingState":{ + "type":"string", + "enum":[ + "STARTING", + "ACTIVE", + "STOPPING", + "STOPPED", + "FAILED", + "DISABLED" + ] + }, "ParticipantState":{ "type":"string", "enum":[ @@ -1699,25 +1947,29 @@ "ParticipantSummary":{ "type":"structure", "members":{ - "firstJoinTime":{ - "shape":"Time", - "documentation":"

ISO 8601 timestamp (returned as a string) when the participant first joined the stage session.

" - }, "participantId":{ "shape":"ParticipantId", "documentation":"

Unique identifier for this participant, assigned by IVS.

" }, - "published":{ - "shape":"Published", - "documentation":"

Whether the participant ever published to the stage session.

" + "userId":{ + "shape":"UserId", + "documentation":"

Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" }, "state":{ "shape":"ParticipantState", "documentation":"

Whether the participant is connected to or disconnected from the stage.

" }, - "userId":{ - "shape":"UserId", - "documentation":"

Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + "firstJoinTime":{ + "shape":"Time", + "documentation":"

ISO 8601 timestamp (returned as a string) when the participant first joined the stage session.

" + }, + "published":{ + "shape":"Published", + "documentation":"

Whether the participant ever published to the stage session.

" + }, + "recordingState":{ + "shape":"ParticipantRecordingState", + "documentation":"

The participant’s recording state.

" } }, "documentation":"

Summary object describing a participant that has joined a stage.

" @@ -1725,22 +1977,6 @@ "ParticipantToken":{ "type":"structure", "members":{ - "attributes":{ - "shape":"ParticipantTokenAttributes", - "documentation":"

Application-provided attributes to encode into the token and attach to a stage. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" - }, - "capabilities":{ - "shape":"ParticipantTokenCapabilities", - "documentation":"

Set of capabilities that the user is allowed to perform in the stage.

" - }, - "duration":{ - "shape":"ParticipantTokenDurationMinutes", - "documentation":"

Duration (in minutes), after which the participant token expires. Default: 720 (12 hours).

" - }, - "expirationTime":{ - "shape":"ParticipantTokenExpirationTime", - "documentation":"

ISO 8601 timestamp (returned as a string) for when this token expires.

" - }, "participantId":{ "shape":"ParticipantTokenId", "documentation":"

Unique identifier for this participant token, assigned by IVS.

" @@ -1752,6 +1988,22 @@ "userId":{ "shape":"ParticipantTokenUserId", "documentation":"

Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + }, + "attributes":{ + "shape":"ParticipantTokenAttributes", + "documentation":"

Application-provided attributes to encode into the token and attach to a stage. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + }, + "duration":{ + "shape":"ParticipantTokenDurationMinutes", + "documentation":"

Duration (in minutes), after which the participant token expires. Default: 720 (12 hours).

" + }, + "capabilities":{ + "shape":"ParticipantTokenCapabilities", + "documentation":"

Set of capabilities that the user is allowed to perform in the stage.

" + }, + "expirationTime":{ + "shape":"ParticipantTokenExpirationTime", + "documentation":"

ISO 8601 timestamp (returned as a string) for when this token expires.

" } }, "documentation":"

Object specifying a participant token in a stage.

Important: Treat tokens as opaque; i.e., do not build functionality based on token contents. The format of tokens could change in the future.

" @@ -1777,14 +2029,6 @@ "ParticipantTokenConfiguration":{ "type":"structure", "members":{ - "attributes":{ - "shape":"ParticipantTokenAttributes", - "documentation":"

Application-provided attributes to encode into the corresponding participant token and attach to a stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" - }, - "capabilities":{ - "shape":"ParticipantTokenCapabilities", - "documentation":"

Set of capabilities that the user is allowed to perform in the stage.

" - }, "duration":{ "shape":"ParticipantTokenDurationMinutes", "documentation":"

Duration (in minutes), after which the corresponding participant token expires. Default: 720 (12 hours).

" @@ -1792,6 +2036,14 @@ "userId":{ "shape":"ParticipantTokenUserId", "documentation":"

Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + }, + "attributes":{ + "shape":"ParticipantTokenAttributes", + "documentation":"

Application-provided attributes to encode into the corresponding participant token and attach to a stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + }, + "capabilities":{ + "shape":"ParticipantTokenCapabilities", + "documentation":"

Set of capabilities that the user is allowed to perform in the stage.

" } }, "documentation":"

Object specifying a participant token configuration in a stage.

" @@ -1853,32 +2105,32 @@ "members":{ "featuredParticipantAttribute":{ "shape":"AttributeKey", - "documentation":"

This attribute name identifies the featured slot. A participant with this attribute set to \"true\" (as a string value) in ParticipantTokenConfiguration is placed in the featured slot.

" + "documentation":"

This attribute name identifies the featured slot. A participant with this attribute set to \"true\" (as a string value) in ParticipantTokenConfiguration is placed in the featured slot. Default: \"\" (no featured participant).

" + }, + "omitStoppedVideo":{ + "shape":"OmitStoppedVideo", + "documentation":"

Determines whether to omit participants with stopped video in the composition. Default: false.

" + }, + "videoFillMode":{ + "shape":"VideoFillMode", + "documentation":"

Defines how video content fits within the participant tile: FILL (stretched), COVER (cropped), or CONTAIN (letterboxed). Default: COVER.

" }, "gridGap":{ "shape":"GridGap", "documentation":"

Specifies the spacing between participant tiles in pixels. Default: 0.

" }, - "omitStoppedVideo":{ - "shape":"OmitStoppedVideo", - "documentation":"

Determines whether to omit participants with stopped video in the composition. Default: false.

" + "pipParticipantAttribute":{ + "shape":"AttributeKey", + "documentation":"

Specifies the participant for the PiP window. A participant with this attribute set to \"true\" (as a string value) in ParticipantTokenConfiguration is placed in the PiP slot. Default: \"\" (no PiP participant).

" }, "pipBehavior":{ "shape":"PipBehavior", - "documentation":"

Defines PiP behavior when all participants have left. Default: STATIC.

" - }, - "pipHeight":{ - "shape":"PipHeight", - "documentation":"

Specifies the height of the PiP window in pixels. When this is not set explicitly, pipHeight’s value will be based on the size of the composition and the aspect ratio of the participant’s video.

" + "documentation":"

Defines PiP behavior when all participants have left: STATIC (maintains original position/size) or DYNAMIC (expands to full composition). Default: STATIC.

" }, "pipOffset":{ "shape":"PipOffset", "documentation":"

Sets the PiP window’s offset position in pixels from the closest edges determined by PipPosition. Default: 0.

" }, - "pipParticipantAttribute":{ - "shape":"AttributeKey", - "documentation":"

Identifies the PiP slot. A participant with this attribute set to \"true\" (as a string value) in ParticipantTokenConfiguration is placed in the PiP slot.

" - }, "pipPosition":{ "shape":"PipPosition", "documentation":"

Determines the corner position of the PiP window. Default: BOTTOM_RIGHT.

" @@ -1887,9 +2139,9 @@ "shape":"PipWidth", "documentation":"

Specifies the width of the PiP window in pixels. When this is not set explicitly, pipWidth’s value will be based on the size of the composition and the aspect ratio of the participant’s video.

" }, - "videoFillMode":{ - "shape":"VideoFillMode", - "documentation":"

Defines how video fits within the participant tile. Default: COVER.

" + "pipHeight":{ + "shape":"PipHeight", + "documentation":"

Specifies the height of the PiP window in pixels. When this is not set explicitly, pipHeight’s value will be based on the size of the composition and the aspect ratio of the participant’s video.

" } }, "documentation":"

Configuration information specific to Picture-in-Picture (PiP) layout, for server-side composition.

" @@ -1917,6 +2169,71 @@ "box":true, "min":1 }, + "PublicKey":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"PublicKeyArn", + "documentation":"

Public key ARN.

" + }, + "name":{ + "shape":"PublicKeyName", + "documentation":"

Public key name.

" + }, + "publicKeyMaterial":{ + "shape":"PublicKeyMaterial", + "documentation":"

Public key material.

" + }, + "fingerprint":{ + "shape":"PublicKeyFingerprint", + "documentation":"

The public key fingerprint, a short string used to identify or verify the full public key.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" + } + }, + "documentation":"

Object specifying a public key used to sign stage participant tokens.

" + }, + "PublicKeyArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:public-key/[a-zA-Z0-9-]+" + }, + "PublicKeyFingerprint":{"type":"string"}, + "PublicKeyList":{ + "type":"list", + "member":{"shape":"PublicKeySummary"} + }, + "PublicKeyMaterial":{ + "type":"string", + "pattern":".*-----BEGIN PUBLIC KEY-----\\r?\\n([a-zA-Z0-9+/=\\r\\n]+)\\r?\\n-----END PUBLIC KEY-----(\\r?\\n)?.*" + }, + "PublicKeyName":{ + "type":"string", + "max":128, + "min":0, + "pattern":"[a-zA-Z0-9-_]*" + }, + "PublicKeySummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"PublicKeyArn", + "documentation":"

Public key ARN.

" + }, + "name":{ + "shape":"PublicKeyName", + "documentation":"

Public key name.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" + } + }, + "documentation":"

Summary information about a public key.

" + }, "Published":{"type":"boolean"}, "RecordingConfiguration":{ "type":"structure", @@ -1936,7 +2253,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:[a-z-]/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:[a-z-]/[a-zA-Z0-9-]+" }, "ResourceNotFoundException":{ "type":"structure", @@ -1957,15 +2274,19 @@ "type":"string", "max":63, "min":3, - "pattern":"^[a-z0-9-.]+$" + "pattern":"[a-z0-9-.]+" }, "S3DestinationConfiguration":{ "type":"structure", "required":[ - "encoderConfigurationArns", - "storageConfigurationArn" + "storageConfigurationArn", + "encoderConfigurationArns" ], "members":{ + "storageConfigurationArn":{ + "shape":"StorageConfigurationArn", + "documentation":"

ARN of the StorageConfiguration where recorded videos will be stored.

" + }, "encoderConfigurationArns":{ "shape":"EncoderConfigurationArnList", "documentation":"

ARNs of the EncoderConfiguration resource. The encoder configuration and stage resources must be in the same AWS account and region.

" @@ -1973,10 +2294,6 @@ "recordingConfiguration":{ "shape":"RecordingConfiguration", "documentation":"

Array of maps, each of the form string:string (key:value). This is an optional customer specification, currently used only to specify the recording format for storing a recording in Amazon S3.

" - }, - "storageConfigurationArn":{ - "shape":"StorageConfigurationArn", - "documentation":"

ARN of the StorageConfiguration where recorded videos will be stored.

" } }, "documentation":"

A complex type that describes an S3 location where recorded videos will be stored.

" @@ -2022,10 +2339,6 @@ "type":"structure", "required":["arn"], "members":{ - "activeSessionId":{ - "shape":"StageSessionId", - "documentation":"

ID of the active session within the stage.

" - }, "arn":{ "shape":"StageArn", "documentation":"

Stage ARN.

" @@ -2034,9 +2347,21 @@ "shape":"StageName", "documentation":"

Stage name.

" }, + "activeSessionId":{ + "shape":"StageSessionId", + "documentation":"

ID of the active session within the stage.

" + }, "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" + }, + "autoParticipantRecordingConfiguration":{ + "shape":"AutoParticipantRecordingConfiguration", + "documentation":"

Configuration object for individual participant recording, attached to the stage.

" + }, + "endpoints":{ + "shape":"StageEndpoints", + "documentation":"

Summary information about various endpoints for a stage.

" } }, "documentation":"

Object specifying a stage.

" @@ -2045,21 +2370,36 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:stage/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:stage/[a-zA-Z0-9-]+" + }, + "StageEndpoint":{ + "type":"string", + "max":2048, + "min":0 + }, + "StageEndpoints":{ + "type":"structure", + "members":{ + "events":{ + "shape":"StageEndpoint", + "documentation":"

Events endpoint.

" + }, + "whip":{ + "shape":"StageEndpoint", + "documentation":"

WHIP endpoint.

" + } + }, + "documentation":"

Summary information about various endpoints for a stage.

" }, "StageName":{ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" }, "StageSession":{ "type":"structure", "members":{ - "endTime":{ - "shape":"Time", - "documentation":"

ISO 8601 timestamp (returned as a string) when the stage session ended. This is null if the stage is active.

" - }, "sessionId":{ "shape":"StageSessionId", "documentation":"

ID of the session within the stage.

" @@ -2067,6 +2407,10 @@ "startTime":{ "shape":"Time", "documentation":"

ISO 8601 timestamp (returned as a string) when this stage session began.

" + }, + "endTime":{ + "shape":"Time", + "documentation":"

ISO 8601 timestamp (returned as a string) when the stage session ended. This is null if the stage is active.

" } }, "documentation":"

A stage session begins when the first participant joins a stage and ends after the last participant leaves the stage. A stage session helps with debugging stages by grouping events and participants into shorter periods of time (i.e., a session), which is helpful when stages are used over long periods of time.

" @@ -2075,7 +2419,7 @@ "type":"string", "max":16, "min":16, - "pattern":"^st-[a-zA-Z0-9]+$" + "pattern":"st-[a-zA-Z0-9]+" }, "StageSessionList":{ "type":"list", @@ -2084,10 +2428,6 @@ "StageSessionSummary":{ "type":"structure", "members":{ - "endTime":{ - "shape":"Time", - "documentation":"

ISO 8601 timestamp (returned as a string) when the stage session ended. This is null if the stage is active.

" - }, "sessionId":{ "shape":"StageSessionId", "documentation":"

ID of the session within the stage.

" @@ -2095,6 +2435,10 @@ "startTime":{ "shape":"Time", "documentation":"

ISO 8601 timestamp (returned as a string) when this stage session began.

" + }, + "endTime":{ + "shape":"Time", + "documentation":"

ISO 8601 timestamp (returned as a string) when the stage session ended. This is null if the stage is active.

" } }, "documentation":"

Summary information about a stage session.

" @@ -2103,10 +2447,6 @@ "type":"structure", "required":["arn"], "members":{ - "activeSessionId":{ - "shape":"StageSessionId", - "documentation":"

ID of the active session within the stage.

" - }, "arn":{ "shape":"StageArn", "documentation":"

Stage ARN.

" @@ -2115,6 +2455,10 @@ "shape":"StageName", "documentation":"

Stage name.

" }, + "activeSessionId":{ + "shape":"StageSessionId", + "documentation":"

ID of the active session within the stage.

" + }, "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented there.

" @@ -2129,13 +2473,13 @@ "StartCompositionRequest":{ "type":"structure", "required":[ - "destinations", - "stageArn" + "stageArn", + "destinations" ], "members":{ - "destinations":{ - "shape":"DestinationConfigurationList", - "documentation":"

Array of destination configuration.

" + "stageArn":{ + "shape":"StageArn", + "documentation":"

ARN of the stage to be used for compositing.

" }, "idempotencyToken":{ "shape":"CompositionClientToken", @@ -2146,9 +2490,9 @@ "shape":"LayoutConfiguration", "documentation":"

Layout object to configure composition parameters.

" }, - "stageArn":{ - "shape":"StageArn", - "documentation":"

ARN of the stage to be used for compositing.

" + "destinations":{ + "shape":"DestinationConfigurationList", + "documentation":"

Array of destination configuration.

" }, "tags":{ "shape":"Tags", @@ -2207,13 +2551,13 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:storage-configuration/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:storage-configuration/[a-zA-Z0-9-]+" }, "StorageConfigurationName":{ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" }, "StorageConfigurationSummary":{ "type":"structure", @@ -2331,6 +2675,10 @@ "name":{ "shape":"StageName", "documentation":"

Name of the stage to be updated.

" + }, + "autoParticipantRecordingConfiguration":{ + "shape":"AutoParticipantRecordingConfiguration", + "documentation":"

Configuration object for individual participant recording, to attach to the stage. Note that this cannot be updated while recording is active.

" } } }, @@ -2366,21 +2714,21 @@ "Video":{ "type":"structure", "members":{ - "bitrate":{ - "shape":"Bitrate", - "documentation":"

Bitrate for generated output, in bps. Default: 2500000.

" - }, - "framerate":{ - "shape":"Framerate", - "documentation":"

Video frame rate, in fps. Default: 30.

" + "width":{ + "shape":"Width", + "documentation":"

Video-resolution width. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 1280.

" }, "height":{ "shape":"Height", "documentation":"

Video-resolution height. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 720.

" }, - "width":{ - "shape":"Width", - "documentation":"

Video-resolution width. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 1280.

" + "framerate":{ + "shape":"Framerate", + "documentation":"

Video frame rate, in fps. Default: 30.

" + }, + "bitrate":{ + "shape":"Bitrate", + "documentation":"

Bitrate for generated output, in bps. Default: 2500000.

" } }, "documentation":"

Settings for video.

" @@ -2410,5 +2758,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"

Introduction

The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

Terminology:

  • A stage is a virtual space where participants can exchange video in real time.

  • A participant token is a token that authenticates a participant when they join a stage.

  • A participant object represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants.

  • Server-side composition: The composition process composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition endpoints support this process.

  • Server-side composition: A composition controls the look of the outputs, including how participants are positioned in the video.

Resources

The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS Real-Time Streaming):

  • Stage — A stage is a virtual space where participants can exchange video in real time.

Tagging

A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS real-time API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage.

At most 50 tags can be applied to a resource.

Stages Endpoints

  • CreateParticipantToken — Creates an additional token for a specified stage. This can be done after stage creation or when tokens expire.

  • CreateStage — Creates a new stage (and optionally participant tokens).

  • DeleteStage — Shuts down and deletes the specified stage (disconnecting all participants).

  • DisconnectParticipant — Disconnects a specified participant and revokes the participant permanently from a specified stage.

  • GetParticipant — Gets information about the specified participant token.

  • GetStage — Gets information for the specified stage.

  • GetStageSession — Gets information for the specified stage session.

  • ListParticipantEvents — Lists events for a specified participant that occurred during a specified stage session.

  • ListParticipants — Lists all participants in a specified stage session.

  • ListStages — Gets summary information about all stages in your account, in the AWS region where the API request is processed.

  • ListStageSessions — Gets all sessions for a specified stage.

  • UpdateStage — Updates a stage’s configuration.

Composition Endpoints

  • GetComposition — Gets information about the specified Composition resource.

  • ListCompositions — Gets summary information about all Compositions in your account, in the AWS region where the API request is processed.

  • StartComposition — Starts a Composition from a stage based on the configuration provided in the request.

  • StopComposition — Stops and deletes a Composition resource. Any broadcast from the Composition resource is stopped.

EncoderConfiguration Endpoints

StorageConfiguration Endpoints

Tags Endpoints

  • ListTagsForResource — Gets information about AWS tags for the specified ARN.

  • TagResource — Adds or updates tags for the AWS resource with the specified ARN.

  • UntagResource — Removes tags from the resource with the specified ARN.

" + "documentation":"

The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

Key Concepts

  • Stage — A virtual space where participants can exchange video in real time.

  • Participant token — A token that authenticates a participant when they join a stage.

  • Participant object — Represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants.

For server-side composition:

  • Composition process — Composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition endpoints support this process.

  • Composition — Controls the look of the outputs, including how participants are positioned in the video.

For more information about your IVS live stream, also see Getting Started with Amazon IVS Real-Time Streaming.

Tagging

A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS real-time API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage.

At most 50 tags can be applied to a resource.

" } diff --git a/botocore/data/ivs-realtime/2020-07-14/waiters-2.json b/botocore/data/ivs-realtime/2020-07-14/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/ivs-realtime/2020-07-14/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/ivs/2020-07-14/service-2.json b/botocore/data/ivs/2020-07-14/service-2.json index 635dd0c199..e97e89e4cf 100644 --- a/botocore/data/ivs/2020-07-14/service-2.json +++ b/botocore/data/ivs/2020-07-14/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2020-07-14", + "auth":["aws.auth#sigv4"], "endpointPrefix":"ivs", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"Amazon IVS", "serviceFullName":"Amazon Interactive Video Service", "serviceId":"ivs", @@ -612,21 +613,21 @@ "AudioConfiguration":{ "type":"structure", "members":{ - "channels":{ - "shape":"Integer", - "documentation":"

Number of audio channels.

" - }, "codec":{ "shape":"String", "documentation":"

Codec used for the audio encoding.

" }, + "targetBitrate":{ + "shape":"Integer", + "documentation":"

The expected ingest bitrate (bits per second). This is configured in the encoder.

" + }, "sampleRate":{ "shape":"Integer", "documentation":"

Number of audio samples recorded per second.

" }, - "targetBitrate":{ + "channels":{ "shape":"Integer", - "documentation":"

The expected ingest bitrate (bits per second). This is configured in the encoder.

" + "documentation":"

Number of audio channels.

" } }, "documentation":"

Object specifying a stream’s audio configuration, as set up by the broadcaster (usually in an encoder). This is part of the IngestConfiguration object and used for monitoring stream health.

" @@ -689,13 +690,13 @@ "BatchGetStreamKeyResponse":{ "type":"structure", "members":{ - "errors":{ - "shape":"BatchErrors", - "documentation":"

" - }, "streamKeys":{ "shape":"StreamKeys", "documentation":"

" + }, + "errors":{ + "shape":"BatchErrors", + "documentation":"

" } } }, @@ -710,6 +711,10 @@ "shape":"ChannelArn", "documentation":"

Channel ARN.

" }, + "viewerId":{ + "shape":"ViewerId", + "documentation":"

The ID of the viewer session to revoke.

" + }, "code":{ "shape":"errorCode", "documentation":"

Error code.

" @@ -717,10 +722,6 @@ "message":{ "shape":"errorMessage", "documentation":"

Error message, determined by the application.

" - }, - "viewerId":{ - "shape":"ViewerId", - "documentation":"

The ID of the viewer session to revoke.

" } }, "documentation":"

Error for a request in the batch for BatchStartViewerSessionRevocation. Each error is related to a specific channel-ARN and viewer-ID pair.

" @@ -784,53 +785,53 @@ "shape":"ChannelArn", "documentation":"

Channel ARN.

" }, - "authorized":{ - "shape":"IsAuthorized", - "documentation":"

Whether the channel is private (enabled for playback authorization). Default: false.

" - }, - "ingestEndpoint":{ - "shape":"IngestEndpoint", - "documentation":"

Channel ingest endpoint, part of the definition of an ingest server, used when you set up streaming software.

" - }, - "insecureIngest":{ - "shape":"InsecureIngest", - "documentation":"

Whether the channel allows insecure RTMP ingest. Default: false.

" + "name":{ + "shape":"ChannelName", + "documentation":"

Channel name.

" }, "latencyMode":{ "shape":"ChannelLatencyMode", "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW.

" }, - "name":{ - "shape":"ChannelName", - "documentation":"

Channel name.

" + "type":{ + "shape":"ChannelType", + "documentation":"

Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types.

" }, - "playbackRestrictionPolicyArn":{ - "shape":"ChannelPlaybackRestrictionPolicyArn", - "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. Default: \"\" (empty string, no playback restriction policy is applied).

" + "recordingConfigurationArn":{ + "shape":"ChannelRecordingConfigurationArn", + "documentation":"

Recording-configuration ARN. A valid ARN value here both specifies the ARN and enables recording. Default: \"\" (empty string, recording is disabled).

" + }, + "ingestEndpoint":{ + "shape":"IngestEndpoint", + "documentation":"

Channel ingest endpoint, part of the definition of an ingest server, used when you set up streaming software.

" }, "playbackUrl":{ "shape":"PlaybackURL", "documentation":"

Channel playback URL.

" }, + "authorized":{ + "shape":"IsAuthorized", + "documentation":"

Whether the channel is private (enabled for playback authorization). Default: false.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Tags attached to the resource. Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

" + }, + "insecureIngest":{ + "shape":"InsecureIngest", + "documentation":"

Whether the channel allows insecure RTMP ingest. Default: false.

" + }, "preset":{ "shape":"TranscodePreset", "documentation":"

Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

" }, - "recordingConfigurationArn":{ - "shape":"ChannelRecordingConfigurationArn", - "documentation":"

Recording-configuration ARN. A valid ARN value here both specifies the ARN and enables recording. Default: \"\" (empty string, recording is disabled).

" - }, "srt":{ "shape":"Srt", "documentation":"

Specifies the endpoint and optional passphrase for streaming with the SRT protocol.

" }, - "tags":{ - "shape":"Tags", - "documentation":"

Tags attached to the resource. Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

" - }, - "type":{ - "shape":"ChannelType", - "documentation":"

Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types.

" + "playbackRestrictionPolicyArn":{ + "shape":"ChannelPlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. Default: \"\" (empty string, no playback restriction policy is applied).

" } }, "documentation":"

Object specifying a channel.

" @@ -839,7 +840,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:channel/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:channel/[a-zA-Z0-9-]+" }, "ChannelArnList":{ "type":"list", @@ -862,7 +863,7 @@ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" }, "ChannelNotBroadcasting":{ "type":"structure", @@ -898,29 +899,17 @@ "shape":"ChannelArn", "documentation":"

Channel ARN.

" }, - "authorized":{ - "shape":"IsAuthorized", - "documentation":"

Whether the channel is private (enabled for playback authorization). Default: false.

" - }, - "insecureIngest":{ - "shape":"InsecureIngest", - "documentation":"

Whether the channel allows insecure RTMP ingest. Default: false.

" - }, - "latencyMode":{ - "shape":"ChannelLatencyMode", - "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW.

" - }, "name":{ "shape":"ChannelName", "documentation":"

Channel name.

" }, - "playbackRestrictionPolicyArn":{ - "shape":"ChannelPlaybackRestrictionPolicyArn", - "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. Default: \"\" (empty string, no playback restriction policy is applied).

" + "latencyMode":{ + "shape":"ChannelLatencyMode", + "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW.

" }, - "preset":{ - "shape":"TranscodePreset", - "documentation":"

Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

" + "authorized":{ + "shape":"IsAuthorized", + "documentation":"

Whether the channel is private (enabled for playback authorization). Default: false.

" }, "recordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", @@ -930,9 +919,21 @@ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

" }, + "insecureIngest":{ + "shape":"InsecureIngest", + "documentation":"

Whether the channel allows insecure RTMP ingest. Default: false.

" + }, "type":{ "shape":"ChannelType", "documentation":"

Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types.

" + }, + "preset":{ + "shape":"TranscodePreset", + "documentation":"

Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

" + }, + "playbackRestrictionPolicyArn":{ + "shape":"ChannelPlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. Default: \"\" (empty string, no playback restriction policy is applied).

" } }, "documentation":"

Summary information about a channel.

" @@ -968,29 +969,21 @@ "CreateChannelRequest":{ "type":"structure", "members":{ - "authorized":{ - "shape":"Boolean", - "documentation":"

Whether the channel is private (enabled for playback authorization). Default: false.

" - }, - "insecureIngest":{ - "shape":"Boolean", - "documentation":"

Whether the channel allows insecure RTMP and SRT ingest. Default: false.

" + "name":{ + "shape":"ChannelName", + "documentation":"

Channel name.

" }, "latencyMode":{ "shape":"ChannelLatencyMode", "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW.

" }, - "name":{ - "shape":"ChannelName", - "documentation":"

Channel name.

" - }, - "playbackRestrictionPolicyArn":{ - "shape":"ChannelPlaybackRestrictionPolicyArn", - "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. Default: \"\" (empty string, no playback restriction policy is applied).

" + "type":{ + "shape":"ChannelType", + "documentation":"

Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types.

" }, - "preset":{ - "shape":"TranscodePreset", - "documentation":"

Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

" + "authorized":{ + "shape":"Boolean", + "documentation":"

Whether the channel is private (enabled for playback authorization). Default: false.

" }, "recordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", @@ -1000,9 +993,17 @@ "shape":"Tags", "documentation":"

Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

" }, - "type":{ - "shape":"ChannelType", - "documentation":"

Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types.

" + "insecureIngest":{ + "shape":"Boolean", + "documentation":"

Whether the channel allows insecure RTMP and SRT ingest. Default: false.

" + }, + "preset":{ + "shape":"TranscodePreset", + "documentation":"

Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

" + }, + "playbackRestrictionPolicyArn":{ + "shape":"ChannelPlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. Default: \"\" (empty string, no playback restriction policy is applied).

" } } }, @@ -1057,21 +1058,13 @@ "type":"structure", "required":["destinationConfiguration"], "members":{ - "destinationConfiguration":{ - "shape":"DestinationConfiguration", - "documentation":"

A complex type that contains a destination configuration for where recorded video will be stored.

" - }, "name":{ "shape":"RecordingConfigurationName", "documentation":"

Recording-configuration name. The value does not need to be unique.

" }, - "recordingReconnectWindowSeconds":{ - "shape":"RecordingReconnectWindowSeconds", - "documentation":"

If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. Default: 0.

" - }, - "renditionConfiguration":{ - "shape":"RenditionConfiguration", - "documentation":"

Object that describes which renditions should be recorded for a stream.

" + "destinationConfiguration":{ + "shape":"DestinationConfiguration", + "documentation":"

A complex type that contains a destination configuration for where recorded video will be stored.

" }, "tags":{ "shape":"Tags", @@ -1080,6 +1073,14 @@ "thumbnailConfiguration":{ "shape":"ThumbnailConfiguration", "documentation":"

A complex type that allows you to enable/disable the recording of thumbnails for a live session and modify the interval at which thumbnails are generated for the live session.

" + }, + "recordingReconnectWindowSeconds":{ + "shape":"RecordingReconnectWindowSeconds", + "documentation":"

If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. Default: 0.

" + }, + "renditionConfiguration":{ + "shape":"RenditionConfiguration", + "documentation":"

Object that describes which renditions should be recorded for a stream.

" } } }, @@ -1321,14 +1322,14 @@ "type":"structure", "required":["publicKeyMaterial"], "members":{ - "name":{ - "shape":"PlaybackKeyPairName", - "documentation":"

Playback-key-pair name. The value does not need to be unique.

" - }, "publicKeyMaterial":{ "shape":"PlaybackPublicKeyMaterial", "documentation":"

The public portion of a customer-generated key pair.

" }, + "name":{ + "shape":"PlaybackKeyPairName", + "documentation":"

Playback-key-pair name. The value does not need to be unique.

" + }, "tags":{ "shape":"Tags", "documentation":"

Any tags provided with the request are added to the playback key pair tags. See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

" @@ -1347,13 +1348,13 @@ "IngestConfiguration":{ "type":"structure", "members":{ - "audio":{ - "shape":"AudioConfiguration", - "documentation":"

Encoder settings for audio.

" - }, "video":{ "shape":"VideoConfiguration", "documentation":"

Encoder settings for video.

" + }, + "audio":{ + "shape":"AudioConfiguration", + "documentation":"

Encoder settings for audio.

" } }, "documentation":"

Object specifying the ingest configuration set up by the broadcaster, usually in an encoder.

" @@ -1382,21 +1383,21 @@ "shape":"ChannelName", "documentation":"

Filters the channel list to match the specified name.

" }, - "filterByPlaybackRestrictionPolicyArn":{ - "shape":"ChannelPlaybackRestrictionPolicyArn", - "documentation":"

Filters the channel list to match the specified policy.

" - }, "filterByRecordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", "documentation":"

Filters the channel list to match the specified recording-configuration ARN.

" }, - "maxResults":{ - "shape":"MaxChannelResults", - "documentation":"

Maximum number of channels to return. Default: 100.

" + "filterByPlaybackRestrictionPolicyArn":{ + "shape":"ChannelPlaybackRestrictionPolicyArn", + "documentation":"

Filters the channel list to match the specified policy.

" }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first channel to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxChannelResults", + "documentation":"

Maximum number of channels to return. Default: 100.

" } } }, @@ -1417,13 +1418,13 @@ "ListPlaybackKeyPairsRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxPlaybackKeyPairResults", - "documentation":"

Maximum number of key pairs to return. Default: your service quota or 100, whichever is smaller.

" - }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first key pair to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxPlaybackKeyPairResults", + "documentation":"

Maximum number of key pairs to return. Default: your service quota or 100, whichever is smaller.

" } } }, @@ -1444,13 +1445,13 @@ "ListPlaybackRestrictionPoliciesRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxPlaybackRestrictionPolicyResults", - "documentation":"

Maximum number of policies to return. Default: 1.

" - }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first policy to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxPlaybackRestrictionPolicyResults", + "documentation":"

Maximum number of policies to return. Default: 1.

" } } }, @@ -1458,26 +1459,26 @@ "type":"structure", "required":["playbackRestrictionPolicies"], "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

If there are more channels than maxResults, use nextToken in the request to get the next set.

" - }, "playbackRestrictionPolicies":{ "shape":"PlaybackRestrictionPolicyList", "documentation":"

List of the matching policies.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more channels than maxResults, use nextToken in the request to get the next set.

" } } }, "ListRecordingConfigurationsRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxRecordingConfigurationResults", - "documentation":"

Maximum number of recording configurations to return. Default: your service quota or 100, whichever is smaller.

" - }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first recording configuration to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxRecordingConfigurationResults", + "documentation":"

Maximum number of recording configurations to return. Default: your service quota or 100, whichever is smaller.

" } } }, @@ -1485,13 +1486,13 @@ "type":"structure", "required":["recordingConfigurations"], "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

If there are more recording configurations than maxResults, use nextToken in the request to get the next set.

" - }, "recordingConfigurations":{ "shape":"RecordingConfigurationList", "documentation":"

List of the matching recording configurations.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more recording configurations than maxResults, use nextToken in the request to get the next set.

" } } }, @@ -1503,13 +1504,13 @@ "shape":"ChannelArn", "documentation":"

Channel ARN used to filter the list.

" }, - "maxResults":{ - "shape":"MaxStreamKeyResults", - "documentation":"

Maximum number of streamKeys to return. Default: 1.

" - }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first stream key to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxStreamKeyResults", + "documentation":"

Maximum number of streamKeys to return. Default: 1.

" } } }, @@ -1517,13 +1518,13 @@ "type":"structure", "required":["streamKeys"], "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

If there are more stream keys than maxResults, use nextToken in the request to get the next set.

" - }, "streamKeys":{ "shape":"StreamKeyList", "documentation":"

List of stream keys.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more stream keys than maxResults, use nextToken in the request to get the next set.

" } } }, @@ -1535,13 +1536,13 @@ "shape":"ChannelArn", "documentation":"

Channel ARN used to filter the list.

" }, - "maxResults":{ - "shape":"MaxStreamResults", - "documentation":"

Maximum number of streams to return. Default: 100.

" - }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first stream to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxStreamResults", + "documentation":"

Maximum number of streams to return. Default: 100.

" } } }, @@ -1549,13 +1550,13 @@ "type":"structure", "required":["streamSessions"], "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

If there are more streams than maxResults, use nextToken in the request to get the next set.

" - }, "streamSessions":{ "shape":"StreamSessionList", "documentation":"

List of stream sessions.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more streams than maxResults, use nextToken in the request to get the next set.

" } } }, @@ -1566,13 +1567,13 @@ "shape":"StreamFilters", "documentation":"

Filters the stream list to match the specified criterion.

" }, - "maxResults":{ - "shape":"MaxStreamResults", - "documentation":"

Maximum number of streams to return. Default: 100.

" - }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first stream to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxStreamResults", + "documentation":"

Maximum number of streams to return. Default: 100.

" } } }, @@ -1580,13 +1581,13 @@ "type":"structure", "required":["streams"], "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

If there are more streams than maxResults, use nextToken in the request to get the next set.

" - }, "streams":{ "shape":"StreamList", "documentation":"

List of streams.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more streams than maxResults, use nextToken in the request to get the next set.

" } } }, @@ -1652,7 +1653,7 @@ "type":"string", "max":1024, "min":0, - "pattern":"^[a-zA-Z0-9+/=_-]*$" + "pattern":"[a-zA-Z0-9+/=_-]*" }, "PendingVerification":{ "type":"structure", @@ -1676,14 +1677,14 @@ "shape":"PlaybackKeyPairArn", "documentation":"

Key-pair ARN.

" }, - "fingerprint":{ - "shape":"PlaybackKeyPairFingerprint", - "documentation":"

Key-pair identifier.

" - }, "name":{ "shape":"PlaybackKeyPairName", "documentation":"

Playback-key-pair name. The value does not need to be unique.

" }, + "fingerprint":{ + "shape":"PlaybackKeyPairFingerprint", + "documentation":"

Key-pair identifier.

" + }, "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

" @@ -1695,7 +1696,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-key/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-key/[a-zA-Z0-9-]+" }, "PlaybackKeyPairFingerprint":{"type":"string"}, "PlaybackKeyPairList":{ @@ -1706,7 +1707,7 @@ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" }, "PlaybackKeyPairSummary":{ "type":"structure", @@ -1730,11 +1731,15 @@ "PlaybackRestrictionPolicy":{ "type":"structure", "required":[ + "arn", "allowedCountries", - "allowedOrigins", - "arn" + "allowedOrigins" ], "members":{ + "arn":{ + "shape":"PlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN

" + }, "allowedCountries":{ "shape":"PlaybackRestrictionPolicyAllowedCountryList", "documentation":"

A list of country codes that control geoblocking restriction. Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries (an empty array).

" @@ -1743,10 +1748,6 @@ "shape":"PlaybackRestrictionPolicyAllowedOriginList", "documentation":"

A list of origin sites that control CORS restriction. Allowed values are the same as valid values of the Origin header defined at https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin. Default: All origins (an empty array).

" }, - "arn":{ - "shape":"PlaybackRestrictionPolicyArn", - "documentation":"

Playback-restriction-policy ARN

" - }, "enableStrictOriginEnforcement":{ "shape":"PlaybackRestrictionPolicyEnableStrictOriginEnforcement", "documentation":"

Whether channel playback is constrained by origin site. Default: false.

" @@ -1784,7 +1785,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+" }, "PlaybackRestrictionPolicyEnableStrictOriginEnforcement":{ "type":"boolean", @@ -1798,16 +1799,20 @@ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" }, "PlaybackRestrictionPolicySummary":{ "type":"structure", "required":[ + "arn", "allowedCountries", - "allowedOrigins", - "arn" + "allowedOrigins" ], "members":{ + "arn":{ + "shape":"PlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN

" + }, "allowedCountries":{ "shape":"PlaybackRestrictionPolicyAllowedCountryList", "documentation":"

A list of country codes that control geoblocking restriction. Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries (an empty array).

" @@ -1816,10 +1821,6 @@ "shape":"PlaybackRestrictionPolicyAllowedOriginList", "documentation":"

A list of origin sites that control CORS restriction. Allowed values are the same as valid values of the Origin header defined at https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin. Default: All origins (an empty array).

" }, - "arn":{ - "shape":"PlaybackRestrictionPolicyArn", - "documentation":"

Playback-restriction-policy ARN

" - }, "enableStrictOriginEnforcement":{ "shape":"PlaybackRestrictionPolicyEnableStrictOriginEnforcement", "documentation":"

Whether channel playback is constrained by origin site. Default: false.

" @@ -1865,21 +1866,13 @@ "shape":"RecordingConfigurationArn", "documentation":"

Recording-configuration ARN.

" }, - "destinationConfiguration":{ - "shape":"DestinationConfiguration", - "documentation":"

A complex type that contains information about where recorded video will be stored.

" - }, "name":{ "shape":"RecordingConfigurationName", "documentation":"

Recording-configuration name. The value does not need to be unique.

" }, - "recordingReconnectWindowSeconds":{ - "shape":"RecordingReconnectWindowSeconds", - "documentation":"

If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. Default: 0.

" - }, - "renditionConfiguration":{ - "shape":"RenditionConfiguration", - "documentation":"

Object that describes which renditions should be recorded for a stream.

" + "destinationConfiguration":{ + "shape":"DestinationConfiguration", + "documentation":"

A complex type that contains information about where recorded video will be stored.

" }, "state":{ "shape":"RecordingConfigurationState", @@ -1892,6 +1885,14 @@ "thumbnailConfiguration":{ "shape":"ThumbnailConfiguration", "documentation":"

A complex type that allows you to enable/disable the recording of thumbnails for a live session and modify the interval at which thumbnails are generated for the live session.

" + }, + "recordingReconnectWindowSeconds":{ + "shape":"RecordingReconnectWindowSeconds", + "documentation":"

If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. Default: 0.

" + }, + "renditionConfiguration":{ + "shape":"RenditionConfiguration", + "documentation":"

Object that describes which renditions should be recorded for a stream.

" } }, "documentation":"

An object representing a configuration to record a channel stream.

" @@ -1900,7 +1901,7 @@ "type":"string", "max":128, "min":0, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:recording-configuration/[a-zA-Z0-9-]+" }, "RecordingConfigurationList":{ "type":"list", @@ -1910,7 +1911,7 @@ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" }, "RecordingConfigurationState":{ "type":"string", @@ -1932,14 +1933,14 @@ "shape":"RecordingConfigurationArn", "documentation":"

Recording-configuration ARN.

" }, - "destinationConfiguration":{ - "shape":"DestinationConfiguration", - "documentation":"

A complex type that contains information about where recorded video will be stored.

" - }, "name":{ "shape":"RecordingConfigurationName", "documentation":"

Recording-configuration name. The value does not need to be unique.

" }, + "destinationConfiguration":{ + "shape":"DestinationConfiguration", + "documentation":"

A complex type that contains information about where recorded video will be stored.

" + }, "state":{ "shape":"RecordingConfigurationState", "documentation":"

Indicates the current state of the recording configuration. When the state is ACTIVE, the configuration is ready for recording a channel stream.

" @@ -1980,9 +1981,9 @@ "RenditionConfigurationRendition":{ "type":"string", "enum":[ - "FULL_HD", - "HD", "SD", + "HD", + "FULL_HD", "LOWEST_RESOLUTION" ] }, @@ -2002,7 +2003,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:[a-z-]/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:[a-z-]/[a-zA-Z0-9-]+" }, "ResourceNotFoundException":{ "type":"structure", @@ -2023,7 +2024,7 @@ "type":"string", "max":63, "min":3, - "pattern":"^[a-z0-9-.]+$" + "pattern":"[a-z0-9-.]+" }, "S3DestinationConfiguration":{ "type":"structure", @@ -2118,9 +2119,9 @@ "shape":"ChannelArn", "documentation":"

Channel ARN for the stream.

" }, - "health":{ - "shape":"StreamHealth", - "documentation":"

The stream’s health.

" + "streamId":{ + "shape":"StreamId", + "documentation":"

Unique identifier for a live or previously live stream in the specified channel.

" }, "playbackUrl":{ "shape":"PlaybackURL", @@ -2134,9 +2135,9 @@ "shape":"StreamState", "documentation":"

The stream’s state. Do not rely on the OFFLINE state, as the API may not return it; instead, a \"NotBroadcasting\" error will indicate that the stream is not live.

" }, - "streamId":{ - "shape":"StreamId", - "documentation":"

Unique identifier for a live or previously live stream in the specified channel.

" + "health":{ + "shape":"StreamHealth", + "documentation":"

The stream’s health.

" }, "viewerCount":{ "shape":"StreamViewerCount", @@ -2148,10 +2149,6 @@ "StreamEvent":{ "type":"structure", "members":{ - "eventTime":{ - "shape":"Time", - "documentation":"

Time when the event occurred. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, "name":{ "shape":"String", "documentation":"

Name that identifies the stream event within a type.

" @@ -2159,6 +2156,10 @@ "type":{ "shape":"String", "documentation":"

Logical group for certain events.

" + }, + "eventTime":{ + "shape":"Time", + "documentation":"

Time when the event occurred. This is an ISO 8601 timestamp; note that this is returned as a string.

" } }, "documentation":"

Object specifying a stream’s events. For a list of events, see Using Amazon EventBridge with Amazon IVS.

" @@ -2191,7 +2192,7 @@ "type":"string", "max":26, "min":26, - "pattern":"^st-[a-zA-Z0-9]+$" + "pattern":"st-[a-zA-Z0-9]+" }, "StreamKey":{ "type":"structure", @@ -2200,6 +2201,10 @@ "shape":"StreamKeyArn", "documentation":"

Stream-key ARN.

" }, + "value":{ + "shape":"StreamKeyValue", + "documentation":"

Stream-key value.

" + }, "channelArn":{ "shape":"ChannelArn", "documentation":"

Channel ARN for the stream.

" @@ -2207,10 +2212,6 @@ "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

" - }, - "value":{ - "shape":"StreamKeyValue", - "documentation":"

Stream-key value.

" } }, "documentation":"

Object specifying a stream key.

" @@ -2219,7 +2220,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:stream-key/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivs:[a-z0-9-]+:[0-9]+:stream-key/[a-zA-Z0-9-]+" }, "StreamKeyArnList":{ "type":"list", @@ -2269,14 +2270,22 @@ "StreamSession":{ "type":"structure", "members":{ - "channel":{ - "shape":"Channel", - "documentation":"

The properties of the channel at the time of going live.

" + "streamId":{ + "shape":"StreamId", + "documentation":"

Unique identifier for a live or previously live stream in the specified channel.

" + }, + "startTime":{ + "shape":"Time", + "documentation":"

Time when the channel went live. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, "endTime":{ "shape":"Time", "documentation":"

Time when the channel went offline. This is an ISO 8601 timestamp; note that this is returned as a string. For live streams, this is NULL.

" }, + "channel":{ + "shape":"Channel", + "documentation":"

The properties of the channel at the time of going live.

" + }, "ingestConfiguration":{ "shape":"IngestConfiguration", "documentation":"

The properties of the incoming RTMP stream for the stream.

" @@ -2285,14 +2294,6 @@ "shape":"RecordingConfiguration", "documentation":"

The properties of recording the live stream.

" }, - "startTime":{ - "shape":"Time", - "documentation":"

Time when the channel went live. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, - "streamId":{ - "shape":"StreamId", - "documentation":"

Unique identifier for a live or previously live stream in the specified channel.

" - }, "truncatedEvents":{ "shape":"StreamEvents", "documentation":"

List of Amazon IVS events that the stream encountered. The list is sorted by most recent events and contains up to 500 events. For Amazon IVS events, see Using Amazon EventBridge with Amazon IVS.

" @@ -2307,6 +2308,14 @@ "StreamSessionSummary":{ "type":"structure", "members":{ + "streamId":{ + "shape":"StreamId", + "documentation":"

Unique identifier for a live or previously live stream in the specified channel.

" + }, + "startTime":{ + "shape":"Time", + "documentation":"

Time when the channel went live. This is an ISO 8601 timestamp; note that this is returned as a string.

" + }, "endTime":{ "shape":"Time", "documentation":"

Time when the channel went offline. This is an ISO 8601 timestamp; note that this is returned as a string. For live streams, this is NULL.

" @@ -2314,14 +2323,6 @@ "hasErrorEvent":{ "shape":"Boolean", "documentation":"

If true, this stream encountered a quota breach or failure.

" - }, - "startTime":{ - "shape":"Time", - "documentation":"

Time when the channel went live. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, - "streamId":{ - "shape":"StreamId", - "documentation":"

Unique identifier for a live or previously live stream in the specified channel.

" } }, "documentation":"

Summary information about a stream session.

" @@ -2344,25 +2345,25 @@ "shape":"ChannelArn", "documentation":"

Channel ARN for the stream.

" }, - "health":{ - "shape":"StreamHealth", - "documentation":"

The stream’s health.

" - }, - "startTime":{ - "shape":"StreamStartTime", - "documentation":"

Time of the stream’s start. This is an ISO 8601 timestamp; note that this is returned as a string.

" + "streamId":{ + "shape":"StreamId", + "documentation":"

Unique identifier for a live or previously live stream in the specified channel.

" }, "state":{ "shape":"StreamState", "documentation":"

The stream’s state. Do not rely on the OFFLINE state, as the API may not return it; instead, a \"NotBroadcasting\" error will indicate that the stream is not live.

" }, - "streamId":{ - "shape":"StreamId", - "documentation":"

Unique identifier for a live or previously live stream in the specified channel.

" + "health":{ + "shape":"StreamHealth", + "documentation":"

The stream’s health.

" }, "viewerCount":{ "shape":"StreamViewerCount", "documentation":"

A count of concurrent views of the stream. Typically, a new view appears in viewerCount within 15 seconds of when video playback starts and a view is removed from viewerCount within 1 minute of when video playback ends. A value of -1 indicates that the request timed out; in this case, retry.

" + }, + "startTime":{ + "shape":"StreamStartTime", + "documentation":"

Time of the stream’s start. This is an ISO 8601 timestamp; note that this is returned as a string.

" } }, "documentation":"

Summary information about a stream.

" @@ -2457,6 +2458,10 @@ "shape":"RecordingMode", "documentation":"

Thumbnail recording mode. Default: INTERVAL.

" }, + "targetIntervalSeconds":{ + "shape":"TargetIntervalSeconds", + "documentation":"

The targeted thumbnail-generation interval in seconds. This is configurable (and required) only if recordingMode is INTERVAL. Default: 60.

Important: For the BASIC channel type, setting a value for targetIntervalSeconds does not guarantee that thumbnails are generated at the specified interval. For thumbnails to be generated at the targetIntervalSeconds interval, the IDR/Keyframe value for the input video must be less than the targetIntervalSeconds value. See Amazon IVS Streaming Configuration for information on setting IDR/Keyframe to the recommended value in video-encoder settings.

" + }, "resolution":{ "shape":"ThumbnailConfigurationResolution", "documentation":"

Indicates the desired resolution of recorded thumbnails. Thumbnails are recorded at the selected resolution if the corresponding rendition is available during the stream; otherwise, they are recorded at source resolution. For more information about resolution values and their corresponding height and width dimensions, see Auto-Record to Amazon S3. Default: Null (source resolution is returned).

" @@ -2464,10 +2469,6 @@ "storage":{ "shape":"ThumbnailConfigurationStorageList", "documentation":"

Indicates the format in which thumbnails are recorded. SEQUENTIAL records all generated thumbnails in a serial manner, to the media/thumbnails directory. LATEST saves the latest thumbnail in media/latest_thumbnail/thumb.jpg and overwrites it at the interval specified by targetIntervalSeconds. You can enable both SEQUENTIAL and LATEST. Default: SEQUENTIAL.

" - }, - "targetIntervalSeconds":{ - "shape":"TargetIntervalSeconds", - "documentation":"

The targeted thumbnail-generation interval in seconds. This is configurable (and required) only if recordingMode is INTERVAL. Default: 60.

Important: For the BASIC channel type, setting a value for targetIntervalSeconds does not guarantee that thumbnails are generated at the specified interval. For thumbnails to be generated at the targetIntervalSeconds interval, the IDR/Keyframe value for the input video must be less than the targetIntervalSeconds value. See Amazon IVS Streaming Configuration for information on setting IDR/Keyframe to the recommended value in video-encoder settings.

" } }, "documentation":"

An object representing a configuration of thumbnails for recorded video.

" @@ -2475,9 +2476,9 @@ "ThumbnailConfigurationResolution":{ "type":"string", "enum":[ - "FULL_HD", - "HD", "SD", + "HD", + "FULL_HD", "LOWEST_RESOLUTION" ] }, @@ -2537,37 +2538,37 @@ "shape":"ChannelArn", "documentation":"

ARN of the channel to be updated.

" }, + "name":{ + "shape":"ChannelName", + "documentation":"

Channel name.

" + }, + "latencyMode":{ + "shape":"ChannelLatencyMode", + "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers.

" + }, + "type":{ + "shape":"ChannelType", + "documentation":"

Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types.

" + }, "authorized":{ "shape":"Boolean", "documentation":"

Whether the channel is private (enabled for playback authorization).

" }, + "recordingConfigurationArn":{ + "shape":"ChannelRecordingConfigurationArn", + "documentation":"

Recording-configuration ARN. A valid ARN value here both specifies the ARN and enables recording. If this is set to an empty string, recording is disabled.

" + }, "insecureIngest":{ "shape":"Boolean", "documentation":"

Whether the channel allows insecure RTMP and SRT ingest. Default: false.

" }, - "latencyMode":{ - "shape":"ChannelLatencyMode", - "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers.

" - }, - "name":{ - "shape":"ChannelName", - "documentation":"

Channel name.

" - }, - "playbackRestrictionPolicyArn":{ - "shape":"ChannelPlaybackRestrictionPolicyArn", - "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. If this is set to an empty string, playback restriction policy is disabled.

" - }, "preset":{ "shape":"TranscodePreset", "documentation":"

Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

" }, - "recordingConfigurationArn":{ - "shape":"ChannelRecordingConfigurationArn", - "documentation":"

Recording-configuration ARN. A valid ARN value here both specifies the ARN and enables recording. If this is set to an empty string, recording is disabled.

" - }, - "type":{ - "shape":"ChannelType", - "documentation":"

Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types.

" + "playbackRestrictionPolicyArn":{ + "shape":"ChannelPlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. If this is set to an empty string, playback restriction policy is disabled.

" } } }, @@ -2584,6 +2585,10 @@ "type":"structure", "required":["arn"], "members":{ + "arn":{ + "shape":"PlaybackRestrictionPolicyArn", + "documentation":"

ARN of the playback-restriction-policy to be updated.

" + }, "allowedCountries":{ "shape":"PlaybackRestrictionPolicyAllowedCountryList", "documentation":"

A list of country codes that control geoblocking restriction. Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries (an empty array).

" @@ -2592,10 +2597,6 @@ "shape":"PlaybackRestrictionPolicyAllowedOriginList", "documentation":"

A list of origin sites that control CORS restriction. Allowed values are the same as valid values of the Origin header defined at https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin. Default: All origins (an empty array).

" }, - "arn":{ - "shape":"PlaybackRestrictionPolicyArn", - "documentation":"

ARN of the playback-restriction-policy to be updated.

" - }, "enableStrictOriginEnforcement":{ "shape":"PlaybackRestrictionPolicyEnableStrictOriginEnforcement", "documentation":"

Whether channel playback is constrained by origin site. Default: false.

" @@ -2633,14 +2634,14 @@ "VideoConfiguration":{ "type":"structure", "members":{ - "avcLevel":{ - "shape":"String", - "documentation":"

Indicates the degree of required decoder performance for a profile. Normally this is set automatically by the encoder. For details, see the H.264 specification.

" - }, "avcProfile":{ "shape":"String", "documentation":"

Indicates to the decoder the requirements for decoding the stream. For definitions of the valid values, see the H.264 specification.

" }, + "avcLevel":{ + "shape":"String", + "documentation":"

Indicates the degree of required decoder performance for a profile. Normally this is set automatically by the encoder. For details, see the H.264 specification.

" + }, "codec":{ "shape":"String", "documentation":"

Codec used for the video encoding.

" @@ -2680,5 +2681,5 @@ "errorCode":{"type":"string"}, "errorMessage":{"type":"string"} }, - "documentation":"

Introduction

The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

The API is an Amazon Web Services regional service. For a list of supported regions and Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the Amazon Web Services General Reference.

All API request parameters and URLs are case sensitive.

For a summary of notable documentation changes in each release, see Document History.

Allowed Header Values

  • Accept: application/json

  • Accept-Encoding: gzip, deflate

  • Content-Type: application/json

Key Concepts

  • Channel — Stores configuration data related to your live stream. You first create a channel and then use the channel’s stream key to start your live stream.

  • Stream key — An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. Treat the stream key like a secret, since it allows anyone to stream to the channel.

  • Playback key pair — Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token.

  • Recording configuration — Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration.

  • Playback restriction policy — Restricts playback by countries and/or origin sites.

For more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming.

Tagging

A tag is a metadata label that you assign to an Amazon Web Services resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your Amazon Web Services resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording Configurations.

At most 50 tags can be applied to a resource.

Authentication versus Authorization

Note the differences between these concepts:

  • Authentication is about verifying identity. You need to be authenticated to sign Amazon IVS API requests.

  • Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition, authorization is needed to view Amazon IVS private channels. (Private channels are channels that are enabled for \"playback authorization.\")

Authentication

All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS API directly, it’s your responsibility to sign the requests.

You generate a signature using valid Amazon Web Services credentials that have permission to perform the requested action. For example, you must sign PutMetadata requests with a signature generated from a user account that has the ivs:PutMetadata permission.

For more information:

Amazon Resource Names (ARNs)

ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference.

Channel Endpoints

  • CreateChannel — Creates a new channel and an associated stream key to start streaming.

  • GetChannel — Gets the channel configuration for the specified channel ARN.

  • BatchGetChannel — Performs GetChannel on multiple ARNs simultaneously.

  • ListChannels — Gets summary information about all channels in your account, in the Amazon Web Services region where the API request is processed. This list can be filtered to match a specified name or recording-configuration ARN. Filters are mutually exclusive and cannot be used together. If you try to use both filters, you will get an error (409 Conflict Exception).

  • UpdateChannel — Updates a channel's configuration. This does not affect an ongoing stream of this channel. You must stop and restart the stream for the changes to take effect.

  • DeleteChannel — Deletes the specified channel.

Playback Restriction Policy Endpoints

Private Channel Endpoints

For more information, see Setting Up Private Channels in the Amazon IVS User Guide.

  • ImportPlaybackKeyPair — Imports the public portion of a new key pair and returns its arn and fingerprint. The privateKey can then be used to generate viewer authorization tokens, to grant viewers access to private channels (channels enabled for playback authorization).

  • GetPlaybackKeyPair — Gets a specified playback authorization key pair and returns the arn and fingerprint. The privateKey held by the caller can be used to generate viewer authorization tokens, to grant viewers access to private channels.

  • ListPlaybackKeyPairs — Gets summary information about playback key pairs.

  • DeletePlaybackKeyPair — Deletes a specified authorization key pair. This invalidates future viewer tokens generated using the key pair’s privateKey.

  • StartViewerSessionRevocation — Starts the process of revoking the viewer session associated with a specified channel ARN and viewer ID. Optionally, you can provide a version to revoke viewer sessions less than and including that version.

  • BatchStartViewerSessionRevocation — Performs StartViewerSessionRevocation on multiple channel ARN and viewer ID pairs simultaneously.

Recording Configuration Endpoints

Stream Endpoints

  • GetStream — Gets information about the active (live) stream on a specified channel.

  • GetStreamSession — Gets metadata on a specified stream.

  • ListStreams — Gets summary information about live streams in your account, in the Amazon Web Services region where the API request is processed.

  • ListStreamSessions — Gets a summary of current and previous streams for a specified channel in your account, in the AWS region where the API request is processed.

  • StopStream — Disconnects the incoming RTMPS stream for the specified channel. Can be used in conjunction with DeleteStreamKey to prevent further streaming to a channel.

  • PutMetadata — Inserts metadata into the active stream of the specified channel. At most 5 requests per second per channel are allowed, each with a maximum 1 KB payload. (If 5 TPS is not sufficient for your needs, we recommend batching your data into a single PutMetadata call.) At most 155 requests per second per account are allowed.

Stream Key Endpoints

  • CreateStreamKey — Creates a stream key, used to initiate a stream, for the specified channel ARN.

  • GetStreamKey — Gets stream key information for the specified ARN.

  • BatchGetStreamKey — Performs GetStreamKey on multiple ARNs simultaneously.

  • ListStreamKeys — Gets summary information about stream keys for the specified channel.

  • DeleteStreamKey — Deletes the stream key for the specified ARN, so it can no longer be used to stream.

Amazon Web Services Tags Endpoints

  • TagResource — Adds or updates tags for the Amazon Web Services resource with the specified ARN.

  • UntagResource — Removes tags from the resource with the specified ARN.

  • ListTagsForResource — Gets information about Amazon Web Services tags for the specified ARN.

" + "documentation":"

Introduction

The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

The API is an Amazon Web Services regional service. For a list of supported regions and Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the Amazon Web Services General Reference.

All API request parameters and URLs are case sensitive.

For a summary of notable documentation changes in each release, see Document History.

Allowed Header Values

  • Accept: application/json

  • Accept-Encoding: gzip, deflate

  • Content-Type: application/json

Key Concepts

  • Channel — Stores configuration data related to your live stream. You first create a channel and then use the channel’s stream key to start your live stream.

  • Stream key — An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. Treat the stream key like a secret, since it allows anyone to stream to the channel.

  • Playback key pair — Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token.

  • Recording configuration — Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration.

  • Playback restriction policy — Restricts playback by countries and/or origin sites.

For more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming.

Tagging

A tag is a metadata label that you assign to an Amazon Web Services resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your Amazon Web Services resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording Configurations.

At most 50 tags can be applied to a resource.

Authentication versus Authorization

Note the differences between these concepts:

  • Authentication is about verifying identity. You need to be authenticated to sign Amazon IVS API requests.

  • Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition, authorization is needed to view Amazon IVS private channels. (Private channels are channels that are enabled for \"playback authorization.\")

Authentication

All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS API directly, it’s your responsibility to sign the requests.

You generate a signature using valid Amazon Web Services credentials that have permission to perform the requested action. For example, you must sign PutMetadata requests with a signature generated from a user account that has the ivs:PutMetadata permission.

For more information:

Amazon Resource Names (ARNs)

ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference.

" } diff --git a/botocore/data/ivschat/2020-07-14/service-2.json b/botocore/data/ivschat/2020-07-14/service-2.json index b7ec1b2ec7..0232cb940f 100644 --- a/botocore/data/ivschat/2020-07-14/service-2.json +++ b/botocore/data/ivschat/2020-07-14/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2020-07-14", + "auth":["aws.auth#sigv4"], "endpointPrefix":"ivschat", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"ivschat", "serviceFullName":"Amazon Interactive Video Service Chat", "serviceId":"ivschat", @@ -321,7 +322,7 @@ "type":"string", "max":63, "min":3, - "pattern":"^[a-z0-9-.]+$" + "pattern":"[a-z0-9-.]+" }, "ChatToken":{ "type":"string", @@ -388,35 +389,31 @@ "userId" ], "members":{ - "attributes":{ - "shape":"ChatTokenAttributes", - "documentation":"

Application-provided attributes to encode into the token and attach to a chat session. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total.

" + "roomIdentifier":{ + "shape":"RoomIdentifier", + "documentation":"

Identifier of the room that the client is trying to access. Currently this must be an ARN.

" + }, + "userId":{ + "shape":"UserID", + "documentation":"

Application-provided ID that uniquely identifies the user associated with this token. This can be any UTF-8 encoded text.

" }, "capabilities":{ "shape":"ChatTokenCapabilities", "documentation":"

Set of capabilities that the user is allowed to perform in the room. Default: None (the capability to view messages is implicitly included in all requests).

" }, - "roomIdentifier":{ - "shape":"RoomIdentifier", - "documentation":"

Identifier of the room that the client is trying to access. Currently this must be an ARN.

" - }, "sessionDurationInMinutes":{ "shape":"SessionDurationInMinutes", "documentation":"

Session duration (in minutes), after which the session expires. Default: 60 (1 hour).

" }, - "userId":{ - "shape":"UserID", - "documentation":"

Application-provided ID that uniquely identifies the user associated with this token. This can be any UTF-8 encoded text.

" + "attributes":{ + "shape":"ChatTokenAttributes", + "documentation":"

Application-provided attributes to encode into the token and attach to a chat session. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total.

" } } }, "CreateChatTokenResponse":{ "type":"structure", "members":{ - "sessionExpirationTime":{ - "shape":"Time", - "documentation":"

Time after which an end user's session is no longer valid. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, "token":{ "shape":"ChatToken", "documentation":"

The issued client token, encrypted.

" @@ -424,6 +421,10 @@ "tokenExpirationTime":{ "shape":"Time", "documentation":"

Time after which the token is no longer valid and cannot be used to connect to a room. This is an ISO 8601 timestamp; note that this is returned as a string.

" + }, + "sessionExpirationTime":{ + "shape":"Time", + "documentation":"

Time after which an end user's session is no longer valid. This is an ISO 8601 timestamp; note that this is returned as a string.

" } } }, @@ -431,14 +432,14 @@ "type":"structure", "required":["destinationConfiguration"], "members":{ - "destinationConfiguration":{ - "shape":"DestinationConfiguration", - "documentation":"

A complex type that contains a destination configuration for where chat content will be logged. There can be only one type of destination (cloudWatchLogs, firehose, or s3) in a destinationConfiguration.

" - }, "name":{ "shape":"LoggingConfigurationName", "documentation":"

Logging-configuration name. The value does not need to be unique.

" }, + "destinationConfiguration":{ + "shape":"DestinationConfiguration", + "documentation":"

A complex type that contains a destination configuration for where chat content will be logged. There can be only one type of destination (cloudWatchLogs, firehose, or s3) in a destinationConfiguration.

" + }, "tags":{ "shape":"Tags", "documentation":"

Tags to attach to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS Chat has no constraints on tags beyond what is documented there.

" @@ -452,22 +453,26 @@ "shape":"LoggingConfigurationArn", "documentation":"

Logging-configuration ARN, assigned by the system.

" }, + "id":{ + "shape":"LoggingConfigurationID", + "documentation":"

Logging-configuration ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the logging configuration.

" + }, "createTime":{ "shape":"Time", "documentation":"

Time when the logging configuration was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, - "destinationConfiguration":{ - "shape":"DestinationConfiguration", - "documentation":"

A complex type that contains a destination configuration for where chat content will be logged, from the request. There is only one type of destination (cloudWatchLogs, firehose, or s3) in a destinationConfiguration.

" - }, - "id":{ - "shape":"LoggingConfigurationID", - "documentation":"

Logging-configuration ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the logging configuration.

" + "updateTime":{ + "shape":"Time", + "documentation":"

Time of the logging configuration’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, "name":{ "shape":"LoggingConfigurationName", "documentation":"

Logging-configuration name, from the request (if specified).

" }, + "destinationConfiguration":{ + "shape":"DestinationConfiguration", + "documentation":"

A complex type that contains a destination configuration for where chat content will be logged, from the request. There is only one type of destination (cloudWatchLogs, firehose, or s3) in a destinationConfiguration.

" + }, "state":{ "shape":"CreateLoggingConfigurationState", "documentation":"

The state of the logging configuration. When the state is ACTIVE, the configuration is ready to log chat content.

" @@ -475,10 +480,6 @@ "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource, from the request (if specified). Array of maps, each of the form string:string (key:value).

" - }, - "updateTime":{ - "shape":"Time", - "documentation":"

Time of the logging configuration’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" } } }, @@ -489,29 +490,29 @@ "CreateRoomRequest":{ "type":"structure", "members":{ - "loggingConfigurationIdentifiers":{ - "shape":"LoggingConfigurationIdentifierList", - "documentation":"

Array of logging-configuration identifiers attached to the room.

" - }, - "maximumMessageLength":{ - "shape":"RoomMaxMessageLength", - "documentation":"

Maximum number of characters in a single message. Messages are expected to be UTF-8 encoded and this limit applies specifically to rune/code-point count, not number of bytes. Default: 500.

" + "name":{ + "shape":"RoomName", + "documentation":"

Room name. The value does not need to be unique.

" }, "maximumMessageRatePerSecond":{ "shape":"RoomMaxMessageRatePerSecond", "documentation":"

Maximum number of messages per second that can be sent to the room (by all clients). Default: 10.

" }, + "maximumMessageLength":{ + "shape":"RoomMaxMessageLength", + "documentation":"

Maximum number of characters in a single message. Messages are expected to be UTF-8 encoded and this limit applies specifically to rune/code-point count, not number of bytes. Default: 500.

" + }, "messageReviewHandler":{ "shape":"MessageReviewHandler", "documentation":"

Configuration information for optional review of messages.

" }, - "name":{ - "shape":"RoomName", - "documentation":"

Room name. The value does not need to be unique.

" - }, "tags":{ "shape":"Tags", "documentation":"

Tags to attach to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS Chat has no constraints beyond what is documented there.

" + }, + "loggingConfigurationIdentifiers":{ + "shape":"LoggingConfigurationIdentifierList", + "documentation":"

Array of logging-configuration identifiers attached to the room.

" } } }, @@ -522,41 +523,41 @@ "shape":"RoomArn", "documentation":"

Room ARN, assigned by the system.

" }, - "createTime":{ - "shape":"Time", - "documentation":"

Time when the room was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, "id":{ "shape":"RoomID", "documentation":"

Room ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the room.

" }, - "loggingConfigurationIdentifiers":{ - "shape":"LoggingConfigurationIdentifierList", - "documentation":"

Array of logging configurations attached to the room, from the request (if specified).

" + "name":{ + "shape":"RoomName", + "documentation":"

Room name, from the request (if specified).

" }, - "maximumMessageLength":{ - "shape":"RoomMaxMessageLength", - "documentation":"

Maximum number of characters in a single message, from the request (if specified).

" + "createTime":{ + "shape":"Time", + "documentation":"

Time when the room was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" + }, + "updateTime":{ + "shape":"Time", + "documentation":"

Time of the room’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, "maximumMessageRatePerSecond":{ "shape":"RoomMaxMessageRatePerSecond", "documentation":"

Maximum number of messages per second that can be sent to the room (by all clients), from the request (if specified).

" }, + "maximumMessageLength":{ + "shape":"RoomMaxMessageLength", + "documentation":"

Maximum number of characters in a single message, from the request (if specified).

" + }, "messageReviewHandler":{ "shape":"MessageReviewHandler", "documentation":"

Configuration information for optional review of messages.

" }, - "name":{ - "shape":"RoomName", - "documentation":"

Room name, from the request (if specified).

" - }, "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource, from the request (if specified).

" }, - "updateTime":{ - "shape":"Time", - "documentation":"

Time of the room’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" + "loggingConfigurationIdentifiers":{ + "shape":"LoggingConfigurationIdentifierList", + "documentation":"

Array of logging configurations attached to the room, from the request (if specified).

" } } }, @@ -573,10 +574,14 @@ "DeleteMessageRequest":{ "type":"structure", "required":[ - "id", - "roomIdentifier" + "roomIdentifier", + "id" ], "members":{ + "roomIdentifier":{ + "shape":"RoomIdentifier", + "documentation":"

Identifier of the room where the message should be deleted. Currently this must be an ARN.

" + }, "id":{ "shape":"MessageID", "documentation":"

ID of the message to be deleted. This is the Id field in the received message (see Message (Subscribe) in the Chat Messaging API).

" @@ -584,10 +589,6 @@ "reason":{ "shape":"Reason", "documentation":"

Reason for deleting the message.

" - }, - "roomIdentifier":{ - "shape":"RoomIdentifier", - "documentation":"

Identifier of the room where the message should be deleted. Currently this must be an ARN.

" } } }, @@ -614,11 +615,15 @@ "type":"string", "max":64, "min":1, - "pattern":"^[a-zA-Z0-9_.-]+$" + "pattern":"[a-zA-Z0-9_.-]+" }, "DestinationConfiguration":{ "type":"structure", "members":{ + "s3":{ + "shape":"S3DestinationConfiguration", + "documentation":"

An Amazon S3 destination configuration where chat activity will be logged.

" + }, "cloudWatchLogs":{ "shape":"CloudWatchLogsDestinationConfiguration", "documentation":"

An Amazon CloudWatch Logs destination configuration where chat activity will be logged.

" @@ -626,10 +631,6 @@ "firehose":{ "shape":"FirehoseDestinationConfiguration", "documentation":"

An Amazon Kinesis Data Firehose destination configuration where chat activity will be logged.

" - }, - "s3":{ - "shape":"S3DestinationConfiguration", - "documentation":"

An Amazon S3 destination configuration where chat activity will be logged.

" } }, "documentation":"

A complex type that describes a location where chat logs will be stored. Each member represents the configuration of one log destination. For logging, you define only one type of destination (for CloudWatch Logs, Kinesis Firehose, or S3).

", @@ -642,10 +643,6 @@ "userId" ], "members":{ - "reason":{ - "shape":"Reason", - "documentation":"

Reason for disconnecting the user.

" - }, "roomIdentifier":{ "shape":"RoomIdentifier", "documentation":"

Identifier of the room from which the user's clients should be disconnected. Currently this must be an ARN.

" @@ -653,6 +650,10 @@ "userId":{ "shape":"UserID", "documentation":"

ID of the user (connection) to disconnect from the room.

" + }, + "reason":{ + "shape":"Reason", + "documentation":"

Reason for disconnecting the user.

" } } }, @@ -708,22 +709,26 @@ "shape":"LoggingConfigurationArn", "documentation":"

Logging-configuration ARN, from the request (if identifier was an ARN).

" }, + "id":{ + "shape":"LoggingConfigurationID", + "documentation":"

Logging-configuration ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the logging configuration.

" + }, "createTime":{ "shape":"Time", "documentation":"

Time when the logging configuration was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, - "destinationConfiguration":{ - "shape":"DestinationConfiguration", - "documentation":"

A complex type that contains a destination configuration for where chat content will be logged. There is only one type of destination (cloudWatchLogs, firehose, or s3) in a destinationConfiguration.

" - }, - "id":{ - "shape":"LoggingConfigurationID", - "documentation":"

Logging-configuration ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the logging configuration.

" + "updateTime":{ + "shape":"Time", + "documentation":"

Time of the logging configuration’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, "name":{ "shape":"LoggingConfigurationName", "documentation":"

Logging-configuration name. This value does not need to be unique.

" }, + "destinationConfiguration":{ + "shape":"DestinationConfiguration", + "documentation":"

A complex type that contains a destination configuration for where chat content will be logged. There is only one type of destination (cloudWatchLogs, firehose, or s3) in a destinationConfiguration.

" + }, "state":{ "shape":"LoggingConfigurationState", "documentation":"

The state of the logging configuration. When the state is ACTIVE, the configuration is ready to log chat content.

" @@ -731,10 +736,6 @@ "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value).

" - }, - "updateTime":{ - "shape":"Time", - "documentation":"

Time of the logging configuration’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" } } }, @@ -755,41 +756,41 @@ "shape":"RoomArn", "documentation":"

Room ARN, from the request (if identifier was an ARN).

" }, - "createTime":{ - "shape":"Time", - "documentation":"

Time when the room was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, "id":{ "shape":"RoomID", "documentation":"

Room ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the room.

" }, - "loggingConfigurationIdentifiers":{ - "shape":"LoggingConfigurationIdentifierList", - "documentation":"

Array of logging configurations attached to the room.

" + "name":{ + "shape":"RoomName", + "documentation":"

Room name. The value does not need to be unique.

" }, - "maximumMessageLength":{ - "shape":"RoomMaxMessageLength", - "documentation":"

Maximum number of characters in a single message. Messages are expected to be UTF-8 encoded and this limit applies specifically to rune/code-point count, not number of bytes. Default: 500.

" + "createTime":{ + "shape":"Time", + "documentation":"

Time when the room was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" + }, + "updateTime":{ + "shape":"Time", + "documentation":"

Time of the room’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, "maximumMessageRatePerSecond":{ "shape":"RoomMaxMessageRatePerSecond", "documentation":"

Maximum number of messages per second that can be sent to the room (by all clients). Default: 10.

" }, + "maximumMessageLength":{ + "shape":"RoomMaxMessageLength", + "documentation":"

Maximum number of characters in a single message. Messages are expected to be UTF-8 encoded and this limit applies specifically to rune/code-point count, not number of bytes. Default: 500.

" + }, "messageReviewHandler":{ "shape":"MessageReviewHandler", "documentation":"

Configuration information for optional review of messages.

" }, - "name":{ - "shape":"RoomName", - "documentation":"

Room name. The value does not need to be unique.

" - }, "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value).

" }, - "updateTime":{ - "shape":"Time", - "documentation":"

Time of the room’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" + "loggingConfigurationIdentifiers":{ + "shape":"LoggingConfigurationIdentifierList", + "documentation":"

Array of logging configurations attached to the room.

" } } }, @@ -797,7 +798,7 @@ "type":"string", "max":12, "min":12, - "pattern":"^[a-zA-Z0-9]+$" + "pattern":"[a-zA-Z0-9]+" }, "InternalServerException":{ "type":"structure", @@ -814,19 +815,19 @@ "type":"string", "max":170, "min":0, - "pattern":"^$|^arn:aws:lambda:[a-z0-9-]+:[0-9]{12}:function:.+" + "pattern":"$|^arn:aws:lambda:[a-z0-9-]+:[0-9]{12}:function:.+.*" }, "Limit":{"type":"integer"}, "ListLoggingConfigurationsRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxLoggingConfigurationResults", - "documentation":"

Maximum number of logging configurations to return. Default: 50.

" - }, "nextToken":{ "shape":"PaginationToken", "documentation":"

The first logging configurations to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxLoggingConfigurationResults", + "documentation":"

Maximum number of logging configurations to return. Default: 50.

" } } }, @@ -847,9 +848,13 @@ "ListRoomsRequest":{ "type":"structure", "members":{ - "loggingConfigurationIdentifier":{ - "shape":"LoggingConfigurationIdentifier", - "documentation":"

Logging-configuration identifier.

" + "name":{ + "shape":"RoomName", + "documentation":"

Filters the list to match the specified room name.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The first room to retrieve. This is used for pagination; see the nextToken response field.

" }, "maxResults":{ "shape":"MaxRoomResults", @@ -859,13 +864,9 @@ "shape":"LambdaArn", "documentation":"

Filters the list to match the specified message review handler URI.

" }, - "name":{ - "shape":"RoomName", - "documentation":"

Filters the list to match the specified room name.

" - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

The first room to retrieve. This is used for pagination; see the nextToken response field.

" + "loggingConfigurationIdentifier":{ + "shape":"LoggingConfigurationIdentifier", + "documentation":"

Logging-configuration identifier.

" } } }, @@ -873,13 +874,13 @@ "type":"structure", "required":["rooms"], "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

If there are more rooms than maxResults, use nextToken in the request to get the next set.

" - }, "rooms":{ "shape":"RoomList", "documentation":"

List of the matching rooms (summary information only).

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more rooms than maxResults, use nextToken in the request to get the next set.

" } } }, @@ -909,25 +910,25 @@ "type":"string", "max":512, "min":1, - "pattern":"^[\\.\\-_/#A-Za-z0-9]+$" + "pattern":"[\\.\\-_/#A-Za-z0-9]+" }, "LoggingConfigurationArn":{ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivschat:[a-z0-9-]+:[0-9]+:logging-configuration/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivschat:[a-z0-9-]+:[0-9]+:logging-configuration/[a-zA-Z0-9-]+" }, "LoggingConfigurationID":{ "type":"string", "max":12, "min":12, - "pattern":"^[a-zA-Z0-9]+$" + "pattern":"[a-zA-Z0-9]+" }, "LoggingConfigurationIdentifier":{ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivschat:[a-z0-9-]+:[0-9]+:logging-configuration/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivschat:[a-z0-9-]+:[0-9]+:logging-configuration/[a-zA-Z0-9-]+" }, "LoggingConfigurationIdentifierList":{ "type":"list", @@ -943,7 +944,7 @@ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" }, "LoggingConfigurationState":{ "type":"string", @@ -964,22 +965,26 @@ "shape":"LoggingConfigurationArn", "documentation":"

Logging-configuration ARN.

" }, + "id":{ + "shape":"LoggingConfigurationID", + "documentation":"

Logging-configuration ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the room.

" + }, "createTime":{ "shape":"Time", "documentation":"

Time when the logging configuration was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, - "destinationConfiguration":{ - "shape":"DestinationConfiguration", - "documentation":"

A complex type that contains a destination configuration for where chat content will be logged.

" - }, - "id":{ - "shape":"LoggingConfigurationID", - "documentation":"

Logging-configuration ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the room.

" + "updateTime":{ + "shape":"Time", + "documentation":"

Time of the logging configuration’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, "name":{ "shape":"LoggingConfigurationName", "documentation":"

Logging-configuration name. The value does not need to be unique.

" }, + "destinationConfiguration":{ + "shape":"DestinationConfiguration", + "documentation":"

A complex type that contains a destination configuration for where chat content will be logged.

" + }, "state":{ "shape":"LoggingConfigurationState", "documentation":"

The state of the logging configuration. When this is ACTIVE, the configuration is ready for logging chat content.

" @@ -987,10 +992,6 @@ "tags":{ "shape":"Tags", "documentation":"

Tags to attach to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS Chat has no constraints on tags beyond what is documented there.

" - }, - "updateTime":{ - "shape":"Time", - "documentation":"

Time of the logging configuration’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" } }, "documentation":"

Summary information about a logging configuration.

" @@ -1011,18 +1012,18 @@ "type":"string", "max":12, "min":12, - "pattern":"^[a-zA-Z0-9]+$" + "pattern":"[a-zA-Z0-9]+" }, "MessageReviewHandler":{ "type":"structure", "members":{ - "fallbackResult":{ - "shape":"FallbackResult", - "documentation":"

Specifies the fallback behavior (whether the message is allowed or denied) if the handler does not return a valid response, encounters an error, or times out. (For the timeout period, see Service Quotas.) If allowed, the message is delivered with returned content to all users connected to the room. If denied, the message is not delivered to any user. Default: ALLOW.

" - }, "uri":{ "shape":"LambdaArn", "documentation":"

Identifier of the message review handler. Currently this must be an ARN of a lambda function.

" + }, + "fallbackResult":{ + "shape":"FallbackResult", + "documentation":"

Specifies the fallback behavior (whether the message is allowed or denied) if the handler does not return a valid response, encounters an error, or times out. (For the timeout period, see Service Quotas.) If allowed, the message is delivered with returned content to all users connected to the room. If denied, the message is not delivered to any user. Default: ALLOW.

" } }, "documentation":"

Configuration information for optional message review.

" @@ -1054,11 +1055,11 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivschat:[a-z0-9-]+:[0-9]+:[a-z-]/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivschat:[a-z0-9-]+:[0-9]+:[a-z-]/[a-zA-Z0-9-]+" }, "ResourceId":{ "type":"string", - "pattern":"^[a-zA-Z0-9]+$" + "pattern":"[a-zA-Z0-9]+" }, "ResourceNotFoundException":{ "type":"structure", @@ -1093,19 +1094,19 @@ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivschat:[a-z0-9-]+:[0-9]+:room/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivschat:[a-z0-9-]+:[0-9]+:room/[a-zA-Z0-9-]+" }, "RoomID":{ "type":"string", "max":12, "min":12, - "pattern":"^[a-zA-Z0-9]+$" + "pattern":"[a-zA-Z0-9]+" }, "RoomIdentifier":{ "type":"string", "max":128, "min":1, - "pattern":"^arn:aws:ivschat:[a-z0-9-]+:[0-9]+:room/[a-zA-Z0-9-]+$" + "pattern":"arn:aws:ivschat:[a-z0-9-]+:[0-9]+:room/[a-zA-Z0-9-]+" }, "RoomList":{ "type":"list", @@ -1127,7 +1128,7 @@ "type":"string", "max":128, "min":0, - "pattern":"^[a-zA-Z0-9-_]*$" + "pattern":"[a-zA-Z0-9-_]*" }, "RoomSummary":{ "type":"structure", @@ -1136,33 +1137,33 @@ "shape":"RoomArn", "documentation":"

Room ARN.

" }, - "createTime":{ - "shape":"Time", - "documentation":"

Time when the room was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, "id":{ "shape":"RoomID", "documentation":"

Room ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the room.

" }, - "loggingConfigurationIdentifiers":{ - "shape":"LoggingConfigurationIdentifierList", - "documentation":"

List of logging-configuration identifiers attached to the room.

" + "name":{ + "shape":"RoomName", + "documentation":"

Room name. The value does not need to be unique.

" }, "messageReviewHandler":{ "shape":"MessageReviewHandler", "documentation":"

Configuration information for optional review of messages.

" }, - "name":{ - "shape":"RoomName", - "documentation":"

Room name. The value does not need to be unique.

" + "createTime":{ + "shape":"Time", + "documentation":"

Time when the room was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" + }, + "updateTime":{ + "shape":"Time", + "documentation":"

Time of the room’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS Chat has no constraints beyond what is documented there.

" }, - "updateTime":{ - "shape":"Time", - "documentation":"

Time of the room’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" + "loggingConfigurationIdentifiers":{ + "shape":"LoggingConfigurationIdentifierList", + "documentation":"

List of logging-configuration identifiers attached to the room.

" } }, "documentation":"

Summary information about a room.

" @@ -1181,21 +1182,21 @@ "SendEventRequest":{ "type":"structure", "required":[ - "eventName", - "roomIdentifier" + "roomIdentifier", + "eventName" ], "members":{ - "attributes":{ - "shape":"EventAttributes", - "documentation":"

Application-defined metadata to attach to the event sent to clients. The maximum length of the metadata is 1 KB total.

" + "roomIdentifier":{ + "shape":"RoomIdentifier", + "documentation":"

Identifier of the room to which the event will be sent. Currently this must be an ARN.

" }, "eventName":{ "shape":"EventName", "documentation":"

Application-defined name of the event to send to clients.

" }, - "roomIdentifier":{ - "shape":"RoomIdentifier", - "documentation":"

Identifier of the room to which the event will be sent. Currently this must be an ARN.

" + "attributes":{ + "shape":"EventAttributes", + "documentation":"

Application-defined metadata to attach to the event sent to clients. The maximum length of the metadata is 1 KB total.

" } } }, @@ -1211,16 +1212,12 @@ "ServiceQuotaExceededException":{ "type":"structure", "required":[ - "limit", "message", "resourceId", - "resourceType" + "resourceType", + "limit" ], "members":{ - "limit":{ - "shape":"Limit", - "documentation":"

" - }, "message":{"shape":"ErrorMessage"}, "resourceId":{ "shape":"ResourceId", @@ -1229,6 +1226,10 @@ "resourceType":{ "shape":"ResourceType", "documentation":"

" + }, + "limit":{ + "shape":"Limit", + "documentation":"

" } }, "documentation":"

", @@ -1295,16 +1296,12 @@ "ThrottlingException":{ "type":"structure", "required":[ - "limit", "message", "resourceId", - "resourceType" + "resourceType", + "limit" ], "members":{ - "limit":{ - "shape":"Limit", - "documentation":"

" - }, "message":{"shape":"ErrorMessage"}, "resourceId":{ "shape":"ResourceId", @@ -1313,6 +1310,10 @@ "resourceType":{ "shape":"ResourceType", "documentation":"

" + }, + "limit":{ + "shape":"Limit", + "documentation":"

" } }, "documentation":"

", @@ -1356,10 +1357,6 @@ "type":"structure", "required":["identifier"], "members":{ - "destinationConfiguration":{ - "shape":"DestinationConfiguration", - "documentation":"

A complex type that contains a destination configuration for where chat content will be logged. There can be only one type of destination (cloudWatchLogs, firehose, or s3) in a destinationConfiguration.

" - }, "identifier":{ "shape":"LoggingConfigurationIdentifier", "documentation":"

Identifier of the logging configuration to be updated.

" @@ -1367,6 +1364,10 @@ "name":{ "shape":"LoggingConfigurationName", "documentation":"

Logging-configuration name. The value does not need to be unique.

" + }, + "destinationConfiguration":{ + "shape":"DestinationConfiguration", + "documentation":"

A complex type that contains a destination configuration for where chat content will be logged. There can be only one type of destination (cloudWatchLogs, firehose, or s3) in a destinationConfiguration.

" } } }, @@ -1377,22 +1378,26 @@ "shape":"LoggingConfigurationArn", "documentation":"

Logging-configuration ARN, from the request (if identifier was an ARN).

" }, + "id":{ + "shape":"LoggingConfigurationID", + "documentation":"

Logging-configuration ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the room.

" + }, "createTime":{ "shape":"Time", "documentation":"

Time when the logging configuration was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, - "destinationConfiguration":{ - "shape":"DestinationConfiguration", - "documentation":"

A complex type that contains a destination configuration for where chat content will be logged, from the request. There is only one type of destination (cloudWatchLogs, firehose, or s3) in a destinationConfiguration.

" - }, - "id":{ - "shape":"LoggingConfigurationID", - "documentation":"

Logging-configuration ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the room.

" + "updateTime":{ + "shape":"Time", + "documentation":"

Time of the logging configuration’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, "name":{ "shape":"LoggingConfigurationName", "documentation":"

Logging-configuration name, from the request (if specified).

" }, + "destinationConfiguration":{ + "shape":"DestinationConfiguration", + "documentation":"

A complex type that contains a destination configuration for where chat content will be logged, from the request. There is only one type of destination (cloudWatchLogs, firehose, or s3) in a destinationConfiguration.

" + }, "state":{ "shape":"UpdateLoggingConfigurationState", "documentation":"

The state of the logging configuration. When the state is ACTIVE, the configuration is ready to log chat content.

" @@ -1400,10 +1405,6 @@ "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value).

" - }, - "updateTime":{ - "shape":"Time", - "documentation":"

Time of the logging configuration’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" } } }, @@ -1419,25 +1420,25 @@ "shape":"RoomIdentifier", "documentation":"

Identifier of the room to be updated. Currently this must be an ARN.

" }, - "loggingConfigurationIdentifiers":{ - "shape":"LoggingConfigurationIdentifierList", - "documentation":"

Array of logging-configuration identifiers attached to the room.

" - }, - "maximumMessageLength":{ - "shape":"RoomMaxMessageLength", - "documentation":"

The maximum number of characters in a single message. Messages are expected to be UTF-8 encoded and this limit applies specifically to rune/code-point count, not number of bytes. Default: 500.

" + "name":{ + "shape":"RoomName", + "documentation":"

Room name. The value does not need to be unique.

" }, "maximumMessageRatePerSecond":{ "shape":"RoomMaxMessageRatePerSecond", "documentation":"

Maximum number of messages per second that can be sent to the room (by all clients). Default: 10.

" }, + "maximumMessageLength":{ + "shape":"RoomMaxMessageLength", + "documentation":"

The maximum number of characters in a single message. Messages are expected to be UTF-8 encoded and this limit applies specifically to rune/code-point count, not number of bytes. Default: 500.

" + }, "messageReviewHandler":{ "shape":"MessageReviewHandler", "documentation":"

Configuration information for optional review of messages. Specify an empty uri string to disassociate a message review handler from the specified room.

" }, - "name":{ - "shape":"RoomName", - "documentation":"

Room name. The value does not need to be unique.

" + "loggingConfigurationIdentifiers":{ + "shape":"LoggingConfigurationIdentifierList", + "documentation":"

Array of logging-configuration identifiers attached to the room.

" } } }, @@ -1448,41 +1449,41 @@ "shape":"RoomArn", "documentation":"

Room ARN, from the request (if identifier was an ARN).

" }, - "createTime":{ - "shape":"Time", - "documentation":"

Time when the room was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" - }, "id":{ "shape":"RoomID", "documentation":"

Room ID, generated by the system. This is a relative identifier, the part of the ARN that uniquely identifies the room.

" }, - "loggingConfigurationIdentifiers":{ - "shape":"LoggingConfigurationIdentifierList", - "documentation":"

Array of logging configurations attached to the room, from the request (if specified).

" + "name":{ + "shape":"RoomName", + "documentation":"

Room name, from the request (if specified).

" }, - "maximumMessageLength":{ - "shape":"RoomMaxMessageLength", - "documentation":"

Maximum number of characters in a single message, from the request (if specified).

" + "createTime":{ + "shape":"Time", + "documentation":"

Time when the room was created. This is an ISO 8601 timestamp; note that this is returned as a string.

" + }, + "updateTime":{ + "shape":"Time", + "documentation":"

Time of the room’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" }, "maximumMessageRatePerSecond":{ "shape":"RoomMaxMessageRatePerSecond", "documentation":"

Maximum number of messages per second that can be sent to the room (by all clients), from the request (if specified).

" }, + "maximumMessageLength":{ + "shape":"RoomMaxMessageLength", + "documentation":"

Maximum number of characters in a single message, from the request (if specified).

" + }, "messageReviewHandler":{ "shape":"MessageReviewHandler", "documentation":"

Configuration information for optional review of messages.

" }, - "name":{ - "shape":"RoomName", - "documentation":"

Room name, from the request (if specified).

" - }, "tags":{ "shape":"Tags", "documentation":"

Tags attached to the resource. Array of maps, each of the form string:string (key:value).

" }, - "updateTime":{ - "shape":"Time", - "documentation":"

Time of the room’s last update. This is an ISO 8601 timestamp; note that this is returned as a string.

" + "loggingConfigurationIdentifiers":{ + "shape":"LoggingConfigurationIdentifierList", + "documentation":"

Array of logging configurations attached to the room, from the request (if specified).

" } } }, @@ -1499,14 +1500,14 @@ "reason" ], "members":{ - "fieldList":{ - "shape":"ValidationExceptionFieldList", - "documentation":"

" - }, "message":{"shape":"ErrorMessage"}, "reason":{ "shape":"ValidationExceptionReason", "documentation":"

" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

" } }, "documentation":"

", @@ -1519,17 +1520,17 @@ "ValidationExceptionField":{ "type":"structure", "required":[ - "message", - "name" + "name", + "message" ], "members":{ - "message":{ - "shape":"ErrorMessage", - "documentation":"

Explanation of the reason for the validation error.

" - }, "name":{ "shape":"FieldName", "documentation":"

Name of the field which failed validation.

" + }, + "message":{ + "shape":"ErrorMessage", + "documentation":"

Explanation of the reason for the validation error.

" } }, "documentation":"

This object is used in the ValidationException error.

" @@ -1547,5 +1548,5 @@ ] } }, - "documentation":"

Introduction

The Amazon IVS Chat control-plane API enables you to create and manage Amazon IVS Chat resources. You also need to integrate with the Amazon IVS Chat Messaging API, to enable users to interact with chat rooms in real time.

The API is an AWS regional service. For a list of supported regions and Amazon IVS Chat HTTPS service endpoints, see the Amazon IVS Chat information on the Amazon IVS page in the AWS General Reference.

Notes on terminology:

  • You create service applications using the Amazon IVS Chat API. We refer to these as applications.

  • You create front-end client applications (browser and Android/iOS apps) using the Amazon IVS Chat Messaging API. We refer to these as clients.

Key Concepts

  • LoggingConfiguration — A configuration that allows customers to store and record sent messages in a chat room.

  • Room — The central Amazon IVS Chat resource through which clients connect to and exchange chat messages.

Tagging

A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS Chat has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS Chat API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Room.

At most 50 tags can be applied to a resource.

API Access Security

Your Amazon IVS Chat applications (service applications and clients) must be authenticated and authorized to access Amazon IVS Chat resources. Note the differences between these concepts:

  • Authentication is about verifying identity. Requests to the Amazon IVS Chat API must be signed to verify your identity.

  • Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS Chat API requests.

Users (viewers) connect to a room using secure access tokens that you create using the CreateChatToken endpoint through the AWS SDK. You call CreateChatToken for every user’s chat session, passing identity and authorization information about the user.

Signing API Requests

HTTP API requests must be signed with an AWS SigV4 signature using your AWS security credentials. The AWS Command Line Interface (CLI) and the AWS SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS Chat HTTP API directly, it’s your responsibility to sign the requests.

You generate a signature using valid AWS credentials for an IAM role that has permission to perform the requested action. For example, DeleteMessage requests must be made using an IAM role that has the ivschat:DeleteMessage permission.

For more information:

Amazon Resource Names (ARNs)

ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference.

Messaging Endpoints

  • DeleteMessage — Sends an event to a specific room which directs clients to delete a specific message; that is, unrender it from view and delete it from the client’s chat history. This event’s EventName is aws:DELETE_MESSAGE. This replicates the DeleteMessage WebSocket operation in the Amazon IVS Chat Messaging API.

  • DisconnectUser — Disconnects all connections using a specified user ID from a room. This replicates the DisconnectUser WebSocket operation in the Amazon IVS Chat Messaging API.

  • SendEvent — Sends an event to a room. Use this within your application’s business logic to send events to clients of a room; e.g., to notify clients to change the way the chat UI is rendered.

Chat Token Endpoint

  • CreateChatToken — Creates an encrypted token that is used by a chat participant to establish an individual WebSocket chat connection to a room. When the token is used to connect to chat, the connection is valid for the session duration specified in the request. The token becomes invalid at the token-expiration timestamp included in the response.

Room Endpoints

  • CreateRoom — Creates a room that allows clients to connect and pass messages.

  • DeleteRoom — Deletes the specified room.

  • GetRoom — Gets the specified room.

  • ListRooms — Gets summary information about all your rooms in the AWS region where the API request is processed.

  • UpdateRoom — Updates a room’s configuration.

Logging Configuration Endpoints

Tags Endpoints

  • ListTagsForResource — Gets information about AWS tags for the specified ARN.

  • TagResource — Adds or updates tags for the AWS resource with the specified ARN.

  • UntagResource — Removes tags from the resource with the specified ARN.

All the above are HTTP operations. There is a separate messaging API for managing Chat resources; see the Amazon IVS Chat Messaging API Reference.

" + "documentation":"

Introduction

The Amazon IVS Chat control-plane API enables you to create and manage Amazon IVS Chat resources. You also need to integrate with the Amazon IVS Chat Messaging API, to enable users to interact with chat rooms in real time.

The API is an AWS regional service. For a list of supported regions and Amazon IVS Chat HTTPS service endpoints, see the Amazon IVS Chat information on the Amazon IVS page in the AWS General Reference.

This document describes HTTP operations. There is a separate messaging API for managing Chat resources; see the Amazon IVS Chat Messaging API Reference.

Notes on terminology:

  • You create service applications using the Amazon IVS Chat API. We refer to these as applications.

  • You create front-end client applications (browser and Android/iOS apps) using the Amazon IVS Chat Messaging API. We refer to these as clients.

Resources

The following resources are part of Amazon IVS Chat:

  • LoggingConfiguration — A configuration that allows customers to store and record sent messages in a chat room. See the Logging Configuration endpoints for more information.

  • Room — The central Amazon IVS Chat resource through which clients connect to and exchange chat messages. See the Room endpoints for more information.

Tagging

A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS Chat has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS Chat API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Room.

At most 50 tags can be applied to a resource.

API Access Security

Your Amazon IVS Chat applications (service applications and clients) must be authenticated and authorized to access Amazon IVS Chat resources. Note the differences between these concepts:

  • Authentication is about verifying identity. Requests to the Amazon IVS Chat API must be signed to verify your identity.

  • Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS Chat API requests.

Users (viewers) connect to a room using secure access tokens that you create using the CreateChatToken endpoint through the AWS SDK. You call CreateChatToken for every user’s chat session, passing identity and authorization information about the user.

Signing API Requests

HTTP API requests must be signed with an AWS SigV4 signature using your AWS security credentials. The AWS Command Line Interface (CLI) and the AWS SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS Chat HTTP API directly, it’s your responsibility to sign the requests.

You generate a signature using valid AWS credentials for an IAM role that has permission to perform the requested action. For example, DeleteMessage requests must be made using an IAM role that has the ivschat:DeleteMessage permission.

For more information:

Amazon Resource Names (ARNs)

ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference.

" } diff --git a/botocore/data/ivschat/2020-07-14/waiters-2.json b/botocore/data/ivschat/2020-07-14/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/ivschat/2020-07-14/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/kafka/2018-11-14/service-2.json b/botocore/data/kafka/2018-11-14/service-2.json index e71e97c939..8c09d49249 100644 --- a/botocore/data/kafka/2018-11-14/service-2.json +++ b/botocore/data/kafka/2018-11-14/service-2.json @@ -2104,6 +2104,22 @@ "DEFAULT" ] }, + "BrokerCountUpdateInfo" : { + "type" : "structure", + "members" : { + "CreatedBrokerIds" : { + "shape" : "__listOf__double", + "locationName" : "createdBrokerIds", + "documentation": "\n

Kafka Broker IDs of brokers being created.

\n " + }, + "DeletedBrokerIds" : { + "shape" : "__listOf__double", + "locationName" : "deletedBrokerIds", + "documentation": "\n

Kafka Broker IDs of brokers being deleted.

\n " + } + }, + "documentation": "\n

Information regarding UpdateBrokerCount.

\n " + }, "BrokerEBSVolumeInfo": { "type": "structure", "members": { @@ -3553,6 +3569,17 @@ }, "documentation" : "\n

Returns information about a cluster operation.

" }, + "ControllerNodeInfo": { + "type": "structure", + "members": { + "Endpoints": { + "shape": "__listOf__string", + "locationName": "endpoints", + "documentation": "\n

Endpoints for accessing the Controller.

\n " + } + }, + "documentation": "\n

Controller node information.

\n " + }, "CustomerActionStatus": { "type": "string", "documentation": "\n

A type of an action required from the customer.

", @@ -5030,6 +5057,11 @@ "shape" : "StorageMode", "locationName" : "storageMode", "documentation" : "\n

This controls storage mode for supported storage tiers.

\n " + }, + "BrokerCountUpdateInfo" : { + "shape": "BrokerCountUpdateInfo", + "locationName" : "brokerCountUpdateInfo", + "documentation" : "\n

Describes brokers being changed during a broker count update.

\n " } }, "documentation": "\n

Information about cluster attributes that can be updated via update APIs.

\n " @@ -5352,6 +5384,11 @@ "locationName": "brokerNodeInfo", "documentation": "\n

The broker node info.

\n " }, + "ControllerNodeInfo": { + "shape": "ControllerNodeInfo", + "locationName": "controllerNodeInfo", + "documentation": "\n

The ControllerNodeInfo.

\n " + }, "InstanceType": { "shape": "__string", "locationName": "instanceType", @@ -6591,6 +6628,12 @@ "shape" : "UnprocessedScramSecret" } }, + "__listOf__double" : { + "type" : "list", + "member" : { + "shape" : "__double" + } + }, "__listOf__string": { "type": "list", "member": { diff --git a/botocore/data/kinesis-video-webrtc-storage/2018-05-10/endpoint-rule-set-1.json b/botocore/data/kinesis-video-webrtc-storage/2018-05-10/endpoint-rule-set-1.json index f61682a8f1..d6abb57a2f 100644 --- a/botocore/data/kinesis-video-webrtc-storage/2018-05-10/endpoint-rule-set-1.json +++ b/botocore/data/kinesis-video-webrtc-storage/2018-05-10/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -59,7 +58,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -87,13 +85,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -106,7 +105,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -120,7 +118,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -143,7 +140,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -178,11 +174,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -193,16 +187,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -216,14 +213,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -232,15 +227,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -251,16 +245,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -274,7 +271,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -294,11 +290,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -309,20 +303,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -333,18 +329,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] } \ No newline at end of file diff --git a/botocore/data/kinesis-video-webrtc-storage/2018-05-10/service-2.json b/botocore/data/kinesis-video-webrtc-storage/2018-05-10/service-2.json index 0799b85e9b..db6f10b9f5 100644 --- a/botocore/data/kinesis-video-webrtc-storage/2018-05-10/service-2.json +++ b/botocore/data/kinesis-video-webrtc-storage/2018-05-10/service-2.json @@ -26,7 +26,23 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Join the ongoing one way-video and/or multi-way audio WebRTC session as a video producing device for an input channel. If there’s no existing session for the channel, a new streaming session needs to be created, and the Amazon Resource Name (ARN) of the signaling channel must be provided.

Currently for the SINGLE_MASTER type, a video producing device is able to ingest both audio and video media into a stream, while viewers can only ingest audio. Both a video producing device and viewers can join the session first, and wait for other participants.

While participants are having peer to peer conversations through webRTC, the ingested media session will be stored into the Kinesis Video Stream. Multiple viewers are able to playback real-time media.

Customers can also use existing Kinesis Video Streams features like HLS or DASH playback, Image generation, and more with ingested WebRTC media.

Assume that only one video producing device client can be associated with a session for the channel. If more than one client joins the session of a specific channel as a video producing device, the most recent client request takes precedence.

" + "documentation":"

Before using this API, you must call the GetSignalingChannelEndpoint API to request the WEBRTC endpoint. You then specify the endpoint and region in your JoinStorageSession API request.

Join the ongoing one way-video and/or multi-way audio WebRTC session as a video producing device for an input channel. If there’s no existing session for the channel, a new streaming session needs to be created, and the Amazon Resource Name (ARN) of the signaling channel must be provided.

Currently for the SINGLE_MASTER type, a video producing device is able to ingest both audio and video media into a stream. Only video producing devices can join the session and record media.

Both audio and video tracks are currently required for WebRTC ingestion.

Current requirements:

  • Video track: H.264

  • Audio track: Opus

The resulting ingested video in the Kinesis video stream will have the following parameters: H.264 video and AAC audio.

Once a master participant has negotiated a connection through WebRTC, the ingested media session will be stored in the Kinesis video stream. Multiple viewers are then able to play back real-time media through our Playback APIs.

You can also use existing Kinesis Video Streams features like HLS or DASH playback, image generation via GetImages, and more with ingested WebRTC media.

S3 image delivery and notifications are not currently supported.

Assume that only one video producing device client can be associated with a session for the channel. If more than one client joins the session of a specific channel as a video producing device, the most recent client request takes precedence.

Additional information

  • Idempotent - This API is not idempotent.

  • Retry behavior - This is counted as a new API call.

  • Concurrent calls - Concurrent calls are allowed. An offer is sent once per each call.

" + }, + "JoinStorageSessionAsViewer":{ + "name":"JoinStorageSessionAsViewer", + "http":{ + "method":"POST", + "requestUri":"/joinStorageSessionAsViewer", + "responseCode":200 + }, + "input":{"shape":"JoinStorageSessionAsViewerInput"}, + "errors":[ + {"shape":"ClientLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Join the ongoing one way-video and/or multi-way audio WebRTC session as a viewer for an input channel. If there’s no existing session for the channel, create a new streaming session and provide the Amazon Resource Name (ARN) of the signaling channel (channelArn) and client id (clientId).

Currently for SINGLE_MASTER type, a video producing device is able to ingest both audio and video media into a stream, while viewers can only ingest audio. Both a video producing device and viewers can join a session first and wait for other participants. While participants are having peer to peer conversations through WebRTC, the ingested media session will be stored into the Kinesis Video Stream. Multiple viewers are able to playback real-time media.

Customers can also use existing Kinesis Video Streams features like HLS or DASH playback, Image generation, and more with ingested WebRTC media. If there’s an existing session with the same clientId that's found in the join session request, the new request takes precedence.

" } }, "shapes":{ @@ -46,6 +62,12 @@ "type":"string", "pattern":"^arn:(aws[a-zA-Z-]*):kinesisvideo:[a-z0-9-]+:[0-9]+:[a-z]+/[a-zA-Z0-9_.-]+/[0-9]+$" }, + "ClientId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9_.-]+$" + }, "ClientLimitExceededException":{ "type":"structure", "members":{ @@ -70,6 +92,23 @@ }, "exception":true }, + "JoinStorageSessionAsViewerInput":{ + "type":"structure", + "required":[ + "channelArn", + "clientId" + ], + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

The Amazon Resource Name (ARN) of the signaling channel.

" + }, + "clientId":{ + "shape":"ClientId", + "documentation":"

The unique identifier for the sender client.

" + } + } + }, "JoinStorageSessionInput":{ "type":"structure", "required":["channelArn"], @@ -94,5 +133,5 @@ }, "String":{"type":"string"} }, - "documentation":"

" + "documentation":"

webrtc

" } diff --git a/botocore/data/kinesis/2013-12-02/service-2.json b/botocore/data/kinesis/2013-12-02/service-2.json index 54b26deea0..93d247243d 100644 --- a/botocore/data/kinesis/2013-12-02/service-2.json +++ b/botocore/data/kinesis/2013-12-02/service-2.json @@ -6,12 +6,14 @@ "jsonVersion":"1.1", "protocol":"json", "protocolSettings":{"h2":"eventstream"}, + "protocols":["json"], "serviceAbbreviation":"Kinesis", "serviceFullName":"Amazon Kinesis", "serviceId":"Kinesis", "signatureVersion":"v4", "targetPrefix":"Kinesis_20131202", - "uid":"kinesis-2013-12-02" + "uid":"kinesis-2013-12-02", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddTagsToStream":{ diff --git a/botocore/data/kinesisanalyticsv2/2018-05-23/paginators-1.json b/botocore/data/kinesisanalyticsv2/2018-05-23/paginators-1.json index 70052cd36c..eb315fdd2d 100644 --- a/botocore/data/kinesisanalyticsv2/2018-05-23/paginators-1.json +++ b/botocore/data/kinesisanalyticsv2/2018-05-23/paginators-1.json @@ -11,6 +11,18 @@ "limit_key": "Limit", "output_token": "NextToken", "result_key": "ApplicationSummaries" + }, + "ListApplicationOperations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "Limit", + "result_key": "ApplicationOperationInfoList" + }, + "ListApplicationVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "Limit", + "result_key": "ApplicationVersionSummaries" } } } diff --git a/botocore/data/kinesisanalyticsv2/2018-05-23/service-2.json b/botocore/data/kinesisanalyticsv2/2018-05-23/service-2.json index 6a24d56dc0..c365d3614f 100644 --- a/botocore/data/kinesisanalyticsv2/2018-05-23/service-2.json +++ b/botocore/data/kinesisanalyticsv2/2018-05-23/service-2.json @@ -5,13 +5,15 @@ "endpointPrefix":"kinesisanalytics", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Kinesis Analytics V2", "serviceFullName":"Amazon Kinesis Analytics", "serviceId":"Kinesis Analytics V2", "signatureVersion":"v4", "signingName":"kinesisanalytics", "targetPrefix":"KinesisAnalytics_20180523", - "uid":"kinesisanalyticsv2-2018-05-23" + "uid":"kinesisanalyticsv2-2018-05-23", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddApplicationCloudWatchLoggingOption":{ @@ -309,6 +311,21 @@ ], "documentation":"

Returns information about a specific Managed Service for Apache Flink application.

If you want to retrieve a list of all applications in your account, use the ListApplications operation.

" }, + "DescribeApplicationOperation":{ + "name":"DescribeApplicationOperation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeApplicationOperationRequest"}, + "output":{"shape":"DescribeApplicationOperationResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"Returns information about a specific operation performed on a Managed Service for Apache Flink application" + }, "DescribeApplicationSnapshot":{ "name":"DescribeApplicationSnapshot", "http":{ @@ -357,6 +374,21 @@ ], "documentation":"

Infers a schema for a SQL-based Kinesis Data Analytics application by evaluating sample records on the specified streaming source (Kinesis data stream or Kinesis Data Firehose delivery stream) or Amazon S3 object. In the response, the operation returns the inferred schema and also the sample records that the operation used to infer the schema.

You can use the inferred schema when configuring a streaming source for your application. When you create an application using the Kinesis Data Analytics console, the console uses this operation to infer a schema and show it in the console user interface.

" }, + "ListApplicationOperations":{ + "name":"ListApplicationOperations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListApplicationOperationsRequest"}, + "output":{"shape":"ListApplicationOperationsResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"Lists information about operations performed on a Managed Service for Apache Flink application" + }, "ListApplicationSnapshots":{ "name":"ListApplicationSnapshots", "http":{ @@ -430,7 +462,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

Reverts the application to the previous running version. You can roll back an application if you suspect it is stuck in a transient status.

You can roll back an application only if it is in the UPDATING or AUTOSCALING status.

When you rollback an application, it loads state data from the last successful snapshot. If the application has no snapshots, Managed Service for Apache Flink rejects the rollback request.

This action is not supported for Managed Service for Apache Flink for SQL applications.

" + "documentation":"

Reverts the application to the previous running version. You can roll back an application if you suspect it is stuck in a transient status or in the running status.

You can roll back an application only if it is in the UPDATING, AUTOSCALING, or RUNNING statuses.

When you rollback an application, it loads state data from the last successful snapshot. If the application has no snapshots, Managed Service for Apache Flink rejects the rollback request.

" }, "StartApplication":{ "name":"StartApplication", @@ -579,6 +611,10 @@ "CloudWatchLoggingOptionDescriptions":{ "shape":"CloudWatchLoggingOptionDescriptions", "documentation":"

The descriptions of the current CloudWatch logging options for the SQL-based Kinesis Data Analytics application.

" + }, + "OperationId":{ + "shape":"OperationId", + "documentation":"Operation ID for tracking AddApplicationCloudWatchLoggingOption request" } } }, @@ -786,6 +822,10 @@ "VpcConfigurationDescription":{ "shape":"VpcConfigurationDescription", "documentation":"

The parameters of the new VPC configuration.

" + }, + "OperationId":{ + "shape":"OperationId", + "documentation":"Operation ID for tracking AddApplicationVpcConfiguration request" } } }, @@ -856,6 +896,7 @@ "shape":"ApplicationSnapshotConfiguration", "documentation":"

Describes whether snapshots are enabled for a Managed Service for Apache Flink application.

" }, + "ApplicationSystemRollbackConfiguration":{"shape":"ApplicationSystemRollbackConfiguration"}, "VpcConfigurations":{ "shape":"VpcConfigurations", "documentation":"

The array of descriptions of VPC configurations available to the application.

" @@ -894,6 +935,7 @@ "shape":"ApplicationSnapshotConfigurationDescription", "documentation":"

Describes whether snapshots are enabled for a Managed Service for Apache Flink application.

" }, + "ApplicationSystemRollbackConfigurationDescription":{"shape":"ApplicationSystemRollbackConfigurationDescription"}, "VpcConfigurationDescriptions":{ "shape":"VpcConfigurationDescriptions", "documentation":"

The array of descriptions of VPC configurations available to the application.

" @@ -928,6 +970,7 @@ "shape":"ApplicationSnapshotConfigurationUpdate", "documentation":"

Describes whether snapshots are enabled for a Managed Service for Apache Flink application.

" }, + "ApplicationSystemRollbackConfigurationUpdate":{"shape":"ApplicationSystemRollbackConfigurationUpdate"}, "VpcConfigurationUpdates":{ "shape":"VpcConfigurationUpdates", "documentation":"

Updates to the array of descriptions of VPC configurations available to the application.

" @@ -1010,6 +1053,10 @@ "shape":"ApplicationVersionId", "documentation":"

If you reverted the application using RollbackApplication, the application version when RollbackApplication was called.

" }, + "ApplicationVersionCreateTimestamp":{ + "shape":"Timestamp", + "documentation":"The current timestamp when the application version was created." + }, "ConditionalToken":{ "shape":"ConditionalToken", "documentation":"

A value you use to implement strong concurrency for application updates.

" @@ -1075,10 +1122,57 @@ }, "ApplicationName":{ "type":"string", + "documentation":"The name of the application", "max":128, "min":1, "pattern":"[a-zA-Z0-9_.-]+" }, + "ApplicationOperationInfo":{ + "type":"structure", + "members":{ + "Operation":{"shape":"Operation"}, + "OperationId":{"shape":"OperationId"}, + "StartTime":{ + "shape":"Timestamp", + "documentation":"The timestamp at which the operation was created" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"The timestamp at which the operation finished for the application" + }, + "OperationStatus":{"shape":"OperationStatus"} + }, + "documentation":"Provides a description of the operation, such as the type and status of operation" + }, + "ApplicationOperationInfoDetails":{ + "type":"structure", + "required":[ + "Operation", + "StartTime", + "EndTime", + "OperationStatus" + ], + "members":{ + "Operation":{"shape":"Operation"}, + "StartTime":{ + "shape":"Timestamp", + "documentation":"The timestamp at which the operation was created" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"The timestamp at which the operation finished for the application" + }, + "OperationStatus":{"shape":"OperationStatus"}, + "ApplicationVersionChangeDetails":{"shape":"ApplicationVersionChangeDetails"}, + "OperationFailureDetails":{"shape":"OperationFailureDetails"} + }, + "documentation":"Provides a description of the operation, such as the operation-type and status" + }, + "ApplicationOperationInfoList":{ + "type":"list", + "member":{"shape":"ApplicationOperationInfo"}, + "documentation":"List of ApplicationOperationInfo for an application" + }, "ApplicationRestoreConfiguration":{ "type":"structure", "required":["ApplicationRestoreType"], @@ -1192,6 +1286,57 @@ }, "documentation":"

Provides application summary information, including the application Amazon Resource Name (ARN), name, and status.

" }, + "ApplicationSystemRollbackConfiguration":{ + "type":"structure", + "required":["RollbackEnabled"], + "members":{ + "RollbackEnabled":{ + "shape":"BooleanObject", + "documentation":"Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application" + } + }, + "documentation":"Describes system rollback configuration for a Managed Service for Apache Flink application" + }, + "ApplicationSystemRollbackConfigurationDescription":{ + "type":"structure", + "required":["RollbackEnabled"], + "members":{ + "RollbackEnabled":{ + "shape":"BooleanObject", + "documentation":"Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application" + } + }, + "documentation":"Describes system rollback configuration for a Managed Service for Apache Flink application" + }, + "ApplicationSystemRollbackConfigurationUpdate":{ + "type":"structure", + "required":["RollbackEnabledUpdate"], + "members":{ + "RollbackEnabledUpdate":{ + "shape":"BooleanObject", + "documentation":"Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application" + } + }, + "documentation":"Describes system rollback configuration for a Managed Service for Apache Flink application" + }, + "ApplicationVersionChangeDetails":{ + "type":"structure", + "required":[ + "ApplicationVersionUpdatedFrom", + "ApplicationVersionUpdatedTo" + ], + "members":{ + "ApplicationVersionUpdatedFrom":{ + "shape":"ApplicationVersionId", + "documentation":"The operation was performed on this version of the application" + }, + "ApplicationVersionUpdatedTo":{ + "shape":"ApplicationVersionId", + "documentation":"The operation execution resulted in the transition to the following version of the application" + } + }, + "documentation":"Contains information about the application version changes due to an operation" + }, "ApplicationVersionId":{ "type":"long", "max":999999999, @@ -1313,10 +1458,10 @@ }, "MinPauseBetweenCheckpoints":{ "shape":"MinPauseBetweenCheckpoints", - "documentation":"

Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start. If a checkpoint operation takes longer than the CheckpointInterval, the application otherwise performs continual checkpoint operations. For more information, see Tuning Checkpointing in the Apache Flink Documentation.

If CheckpointConfiguration.ConfigurationType is DEFAULT, the application will use a MinPauseBetweenCheckpoints value of 5000, even if this value is set using this API or in application code.

" + "documentation":"

Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start. If a checkpoint operation takes longer than the CheckpointInterval, the application otherwise performs continual checkpoint operations. For more information, see Tuning Checkpointing in the Apache Flink Documentation.

If CheckpointConfiguration.ConfigurationType is DEFAULT, the application will use a MinPauseBetweenCheckpoints value of 5000, even if this value is set using this API or in application code.

" } }, - "documentation":"

Describes an application's checkpointing configuration. Checkpointing is the process of persisting application state for fault tolerance. For more information, see Checkpoints for Fault Tolerance in the Apache Flink Documentation.

" + "documentation":"

Describes an application's checkpointing configuration. Checkpointing is the process of persisting application state for fault tolerance. For more information, see Checkpoints for Fault Tolerance in the Apache Flink Documentation.

" }, "CheckpointConfigurationDescription":{ "type":"structure", @@ -1717,6 +1862,10 @@ "CloudWatchLoggingOptionDescriptions":{ "shape":"CloudWatchLoggingOptionDescriptions", "documentation":"

The descriptions of the remaining CloudWatch logging options for the application.

" + }, + "OperationId":{ + "shape":"OperationId", + "documentation":"Operation ID for tracking DeleteApplicationCloudWatchLoggingOption request" } } }, @@ -1909,6 +2058,10 @@ "ApplicationVersionId":{ "shape":"ApplicationVersionId", "documentation":"

The updated version ID of the application.

" + }, + "OperationId":{ + "shape":"OperationId", + "documentation":"Operation ID for tracking DeleteApplicationVpcConfiguration request" } } }, @@ -1944,6 +2097,25 @@ }, "documentation":"

Updates to the configuration information required to deploy an Amazon Data Analytics Studio notebook as an application with durable state.

" }, + "DescribeApplicationOperationRequest":{ + "type":"structure", + "required":[ + "ApplicationName", + "OperationId" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "OperationId":{"shape":"OperationId"} + }, + "documentation":"Request for information about a specific operation performed on a Managed Service for Apache Flink application" + }, + "DescribeApplicationOperationResponse":{ + "type":"structure", + "members":{ + "ApplicationOperationInfoDetails":{"shape":"ApplicationOperationInfoDetails"} + }, + "documentation":"Provides details of the operation corresponding to the operation-ID on a Managed Service for Apache Flink application" + }, "DescribeApplicationRequest":{ "type":"structure", "required":["ApplicationName"], @@ -2108,7 +2280,20 @@ }, "documentation":"

Describes updates to the execution property groups for a Managed Service for Apache Flink application or a Studio notebook.

" }, + "ErrorInfo":{ + "type":"structure", + "members":{ + "ErrorString":{"shape":"ErrorString"} + }, + "documentation":"Provides a description of the operation failure error" + }, "ErrorMessage":{"type":"string"}, + "ErrorString":{ + "type":"string", + "documentation":"Error message resulting in failure of the operation", + "max":512, + "min":1 + }, "FileKey":{ "type":"string", "max":1024, @@ -2119,7 +2304,7 @@ "members":{ "CheckpointConfiguration":{ "shape":"CheckpointConfiguration", - "documentation":"

Describes an application's checkpointing configuration. Checkpointing is the process of persisting application state for fault tolerance. For more information, see Checkpoints for Fault Tolerance in the Apache Flink Documentation.

" + "documentation":"

Describes an application's checkpointing configuration. Checkpointing is the process of persisting application state for fault tolerance. For more information, see Checkpoints for Fault Tolerance in the Apache Flink Documentation.

" }, "MonitoringConfiguration":{ "shape":"MonitoringConfiguration", @@ -2149,7 +2334,7 @@ }, "JobPlanDescription":{ "shape":"JobPlanDescription", - "documentation":"

The job plan for an application. For more information about the job plan, see Jobs and Scheduling in the Apache Flink Documentation. To retrieve the job plan for the application, use the DescribeApplicationRequest$IncludeAdditionalDetails parameter of the DescribeApplication operation.

" + "documentation":"

The job plan for an application. For more information about the job plan, see Jobs and Scheduling in the Apache Flink Documentation. To retrieve the job plan for the application, use the DescribeApplicationRequest$IncludeAdditionalDetails parameter of the DescribeApplication operation.

" } }, "documentation":"

Describes configuration parameters for a Managed Service for Apache Flink application.

" @@ -2177,7 +2362,7 @@ "members":{ "AllowNonRestoredState":{ "shape":"BooleanObject", - "documentation":"

When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program. This will happen if the program is updated between snapshots to remove stateful parameters, and state data in the snapshot no longer corresponds to valid application data. For more information, see Allowing Non-Restored State in the Apache Flink documentation.

This value defaults to false. If you update your application without specifying this parameter, AllowNonRestoredState will be set to false, even if it was previously set to true.

" + "documentation":"

When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program. This will happen if the program is updated between snapshots to remove stateful parameters, and state data in the snapshot no longer corresponds to valid application data. For more information, see Allowing Non-Restored State in the Apache Flink documentation.

This value defaults to false. If you update your application without specifying this parameter, AllowNonRestoredState will be set to false, even if it was previously set to true.

" } }, "documentation":"

Describes the starting parameters for a Managed Service for Apache Flink application.

" @@ -2725,6 +2910,32 @@ "documentation":"

The number of allowed resources has been exceeded.

", "exception":true }, + "ListApplicationOperationsInputLimit":{ + "type":"integer", + "documentation":"Limit on the number of records returned in the response", + "max":50, + "min":1 + }, + "ListApplicationOperationsRequest":{ + "type":"structure", + "required":["ApplicationName"], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "Limit":{"shape":"ListApplicationOperationsInputLimit"}, + "NextToken":{"shape":"NextToken"}, + "Operation":{"shape":"Operation"}, + "OperationStatus":{"shape":"OperationStatus"} + }, + "documentation":"Request to list operations performed on an application" + }, + "ListApplicationOperationsResponse":{ + "type":"structure", + "members":{ + "ApplicationOperationInfoList":{"shape":"ApplicationOperationInfoList"}, + "NextToken":{"shape":"NextToken"} + }, + "documentation":"Response with the list of operations for an application" + }, "ListApplicationSnapshotsRequest":{ "type":"structure", "required":["ApplicationName"], @@ -2988,6 +3199,7 @@ }, "NextToken":{ "type":"string", + "documentation":"If a previous command returned a pagination token, pass it into this value to retrieve the next set of results", "max":512, "min":1 }, @@ -2996,6 +3208,39 @@ "max":1024, "min":0 }, + "Operation":{ + "type":"string", + "documentation":"Type of operation performed on an application", + "max":64, + "min":1 + }, + "OperationFailureDetails":{ + "type":"structure", + "members":{ + "RollbackOperationId":{ + "shape":"OperationId", + "documentation":"Provides the operation ID of a system-rollback operation executed due to failure in the current operation" + }, + "ErrorInfo":{"shape":"ErrorInfo"} + }, + "documentation":"Provides a description of the operation failure" + }, + "OperationId":{ + "type":"string", + "documentation":"Identifier of the Operation", + "max":64, + "min":1 + }, + "OperationStatus":{ + "type":"string", + "documentation":"Status of the operation performed on an application", + "enum":[ + "IN_PROGRESS", + "CANCELLED", + "SUCCESSFUL", + "FAILED" + ] + }, "Output":{ "type":"structure", "required":[ @@ -3124,7 +3369,7 @@ "documentation":"

Describes whether the Managed Service for Apache Flink service can increase the parallelism of the application in response to increased throughput.

" } }, - "documentation":"

Describes parameters for how a Managed Service for Apache Flink application executes multiple tasks simultaneously. For more information about parallelism, see Parallel Execution in the Apache Flink Documentation.

" + "documentation":"

Describes parameters for how a Managed Service for Apache Flink application executes multiple tasks simultaneously. For more information about parallelism, see Parallel Execution in the Apache Flink Documentation.

" }, "ParallelismConfigurationDescription":{ "type":"structure", @@ -3466,7 +3711,11 @@ "type":"structure", "required":["ApplicationDetail"], "members":{ - "ApplicationDetail":{"shape":"ApplicationDetail"} + "ApplicationDetail":{"shape":"ApplicationDetail"}, + "OperationId":{ + "shape":"OperationId", + "documentation":"Operation ID for tracking RollbackApplication request" + } } }, "RunConfiguration":{ @@ -3524,7 +3773,8 @@ "ZEPPELIN-FLINK-2_0", "FLINK-1_15", "ZEPPELIN-FLINK-3_0", - "FLINK-1_18" + "FLINK-1_18", + "FLINK-1_19" ] }, "S3ApplicationCodeLocationDescription":{ @@ -3887,6 +4137,10 @@ "StartApplicationResponse":{ "type":"structure", "members":{ + "OperationId":{ + "shape":"OperationId", + "documentation":"Operation ID for tracking StartApplication request" + } } }, "StopApplicationRequest":{ @@ -3906,6 +4160,10 @@ "StopApplicationResponse":{ "type":"structure", "members":{ + "OperationId":{ + "shape":"OperationId", + "documentation":"Operation ID for tracking StopApplication request" + } } }, "SubnetId":{"type":"string"}, @@ -4109,6 +4367,10 @@ "ApplicationDetail":{ "shape":"ApplicationDetail", "documentation":"

Describes application updates.

" + }, + "OperationId":{ + "shape":"OperationId", + "documentation":"Operation ID for tracking UpdateApplication request" } } }, diff --git a/botocore/data/kms/2014-11-01/service-2.json b/botocore/data/kms/2014-11-01/service-2.json index 06121c4b71..8e4c88b3ee 100644 --- a/botocore/data/kms/2014-11-01/service-2.json +++ b/botocore/data/kms/2014-11-01/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"kms", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"KMS", "serviceFullName":"AWS Key Management Service", "serviceId":"KMS", "signatureVersion":"v4", "targetPrefix":"TrentService", - "uid":"kms-2014-11-01" + "uid":"kms-2014-11-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "CancelKeyDeletion":{ @@ -138,7 +140,7 @@ {"shape":"XksKeyAlreadyInUseException"}, {"shape":"XksKeyNotFoundException"} ], - "documentation":"

Creates a unique customer managed KMS key in your Amazon Web Services account and Region. You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services services let you use KMS keys that you create and manage to protect your service resources.

A KMS key is a logical representation of a cryptographic key. In addition to the key material used in cryptographic operations, a KMS key includes metadata, such as the key ID, key policy, creation date, description, and key state. For details, see Managing keys in the Key Management Service Developer Guide

Use the parameters of CreateKey to specify the type of KMS key, the source of its key material, its key policy, description, tags, and other properties.

KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

To create different types of KMS keys, use the following guidance:

Symmetric encryption KMS key

By default, CreateKey creates a symmetric encryption KMS key with key material that KMS generates. This is the basic and most widely used type of KMS key, and provides the best performance.

To create a symmetric encryption KMS key, you don't need to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric encryption KMS key with KMS key material.

If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

Asymmetric KMS keys

To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). KMS keys with ECC key pairs can be used only to sign and verify messages. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

HMAC KMS key

To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

Multi-Region primary keys
Imported key material

To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

To import your own key material into a KMS key, begin by creating a KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token. Use the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

You can import key material into KMS keys of all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't import key material into a KMS key in a custom key store.

To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

Custom key store

A custom key store lets you protect your Amazon Web Services resources using keys in a backing key store that you own and manage. When you request a cryptographic operation with a KMS key in a custom key store, the operation is performed in the backing key store using its cryptographic keys.

KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store, KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you specify an existing encryption key in the external key manager.

Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

Before you create a KMS key in a custom key store, the ConnectionState of the key store must be CONNECTED. To connect the custom key store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores operation.

To create a KMS key in a custom key store, use the CustomKeyStoreId. Use the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is supported in a custom key store.

To create a KMS key in an CloudHSM key store, use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

To create a KMS key in an external key store, use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing external key.

Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

Related operations:

Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

" + "documentation":"

Creates a unique customer managed KMS key in your Amazon Web Services account and Region. You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services services let you use KMS keys that you create and manage to protect your service resources.

A KMS key is a logical representation of a cryptographic key. In addition to the key material used in cryptographic operations, a KMS key includes metadata, such as the key ID, key policy, creation date, description, and key state. For details, see Managing keys in the Key Management Service Developer Guide

Use the parameters of CreateKey to specify the type of KMS key, the source of its key material, its key policy, description, tags, and other properties.

KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

To create different types of KMS keys, use the following guidance:

Symmetric encryption KMS key

By default, CreateKey creates a symmetric encryption KMS key with key material that KMS generates. This is the basic and most widely used type of KMS key, and provides the best performance.

To create a symmetric encryption KMS key, you don't need to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric encryption KMS key with KMS key material.

If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

Asymmetric KMS keys

To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. Each KMS key can have only one key usage. KMS keys with RSA key pairs can be used to encrypt and decrypt data or sign and verify messages (but not both). KMS keys with NIST-recommended ECC key pairs can be used to sign and verify messages or derive shared secrets (but not both). KMS keys with ECC_SECG_P256K1 can be used only to sign and verify messages. KMS keys with SM2 key pairs (China Regions only) can be used to either encrypt and decrypt data, sign and verify messages, or derive shared secrets (you must choose one key usage type). For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

HMAC KMS key

To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

Multi-Region primary keys
Imported key material

To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

To import your own key material into a KMS key, begin by creating a KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token. Use the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

You can import key material into KMS keys of all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't import key material into a KMS key in a custom key store.

To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

Custom key store

A custom key store lets you protect your Amazon Web Services resources using keys in a backing key store that you own and manage. When you request a cryptographic operation with a KMS key in a custom key store, the operation is performed in the backing key store using its cryptographic keys.

KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store, KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you specify an existing encryption key in the external key manager.

Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

Before you create a KMS key in a custom key store, the ConnectionState of the key store must be CONNECTED. To connect the custom key store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores operation.

To create a KMS key in a custom key store, use the CustomKeyStoreId. Use the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is supported in a custom key store.

To create a KMS key in an CloudHSM key store, use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

To create a KMS key in an external key store, use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing external key.

Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

Related operations:

Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

" }, "Decrypt":{ "name":"Decrypt", @@ -211,6 +213,27 @@ ], "documentation":"

Deletes key material that was previously imported. This operation makes the specified KMS key temporarily unusable. To restore the usability of the KMS key, reimport the same key material. For more information about importing key material into KMS, see Importing Key Material in the Key Management Service Developer Guide.

When the specified KMS key is in the PendingDeletion state, this operation does not change the KMS key's state. Otherwise, it changes the KMS key's state to PendingImport.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:DeleteImportedKeyMaterial (key policy)

Related operations:

Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

" }, + "DeriveSharedSecret":{ + "name":"DeriveSharedSecret", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeriveSharedSecretRequest"}, + "output":{"shape":"DeriveSharedSecretResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"DisabledException"}, + {"shape":"KeyUnavailableException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidGrantTokenException"}, + {"shape":"InvalidKeyUsageException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"}, + {"shape":"DryRunOperationException"} + ], + "documentation":"

Derives a shared secret using a key agreement algorithm.

You must use an asymmetric NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) KMS key pair with a KeyUsage value of KEY_AGREEMENT to call DeriveSharedSecret.

DeriveSharedSecret uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive (ECDH) to establish a key agreement between two peers by deriving a shared secret from their elliptic curve public-private key pairs. You can use the raw shared secret that DeriveSharedSecret returns to derive a symmetric key that can encrypt and decrypt data that is sent between the two peers, or that can generate and verify HMACs. KMS recommends that you follow NIST recommendations for key derivation when using the raw shared secret to derive a symmetric key.

The following workflow demonstrates how to establish key agreement over an insecure communication channel using DeriveSharedSecret.

  1. Alice calls CreateKey to create an asymmetric KMS key pair with a KeyUsage value of KEY_AGREEMENT.

    The asymmetric KMS key must use a NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) key spec.

  2. Bob creates an elliptic curve key pair.

    Bob can call CreateKey to create an asymmetric KMS key pair or generate a key pair outside of KMS. Bob's key pair must use the same NIST-recommended elliptic curve (ECC) or SM2 (China Regions ony) curve as Alice.

  3. Alice and Bob exchange their public keys through an insecure communication channel (like the internet).

    Use GetPublicKey to download the public key of your asymmetric KMS key pair.

    KMS strongly recommends verifying that the public key you receive came from the expected party before using it to derive a shared secret.

  4. Alice calls DeriveSharedSecret.

    KMS uses the private key from the KMS key pair generated in Step 1, Bob's public key, and the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to derive the shared secret. The private key in your KMS key pair never leaves KMS unencrypted. DeriveSharedSecret returns the raw shared secret.

  5. Bob uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to calculate the same raw secret using his private key and Alice's public key.

To derive a shared secret you must provide a key agreement algorithm, the private key of the caller's asymmetric NIST-recommended elliptic curve or SM2 (China Regions only) KMS key pair, and the public key from your peer's NIST-recommended elliptic curve or SM2 (China Regions only) key pair. The public key can be from another asymmetric KMS key pair or from a key pair generated outside of KMS, but both key pairs must be on the same elliptic curve.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:DeriveSharedSecret (key policy)

Related operations:

Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

" + }, "DescribeCustomKeyStores":{ "name":"DescribeCustomKeyStores", "http":{ @@ -543,7 +566,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS.

To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including:

  • KeySpec: The type of key material in the public key, such as RSA_4096 or ECC_NIST_P521.

  • KeyUsage: Whether the key is used for encryption or signing.

  • EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing algorithms for the key.

Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation.

To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GetPublicKey (key policy)

Related operations: CreateKey

Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

" + "documentation":"

Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS.

To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including:

  • KeySpec: The type of key material in the public key, such as RSA_4096 or ECC_NIST_P521.

  • KeyUsage: Whether the key is used for encryption, signing, or deriving a shared secret.

  • EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing algorithms for the key.

Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation.

To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GetPublicKey (key policy)

Related operations: CreateKey

Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

" }, "ImportKeyMaterial":{ "name":"ImportKeyMaterial", @@ -1010,7 +1033,8 @@ "RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256", "RSA_AES_KEY_WRAP_SHA_1", - "RSA_AES_KEY_WRAP_SHA_256" + "RSA_AES_KEY_WRAP_SHA_256", + "SM2PKE" ] }, "AliasList":{ @@ -1334,7 +1358,7 @@ }, "KeyUsage":{ "shape":"KeyUsageType", - "documentation":"

Determines the cryptographic operations for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric encryption KMS key; otherwise, it is required. You can't change the KeyUsage value after the KMS key is created.

Select only one valid value.

  • For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT.

  • For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.

  • For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT or SIGN_VERIFY.

  • For asymmetric KMS keys with ECC key material, specify SIGN_VERIFY.

  • For asymmetric KMS keys with SM2 key material (China Regions only), specify ENCRYPT_DECRYPT or SIGN_VERIFY.

" + "documentation":"

Determines the cryptographic operations for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric encryption KMS key; otherwise, it is required. You can't change the KeyUsage value after the KMS key is created.

Select only one valid value.

  • For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT.

  • For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.

  • For asymmetric KMS keys with RSA key pairs, specify ENCRYPT_DECRYPT or SIGN_VERIFY.

  • For asymmetric KMS keys with NIST-recommended elliptic curve key pairs, specify SIGN_VERIFY or KEY_AGREEMENT.

  • For asymmetric KMS keys with ECC_SECG_P256K1 key pairs specify SIGN_VERIFY.

  • For asymmetric KMS keys with SM2 key pairs (China Regions only), specify ENCRYPT_DECRYPT, SIGN_VERIFY, or KEY_AGREEMENT.

" }, "CustomerMasterKeySpec":{ "shape":"CustomerMasterKeySpec", @@ -1344,7 +1368,7 @@ }, "KeySpec":{ "shape":"KeySpec", - "documentation":"

Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide .

The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide .

Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

KMS supports the following key specs for KMS keys:

  • Symmetric encryption key (default)

    • SYMMETRIC_DEFAULT

  • HMAC keys (symmetric)

    • HMAC_224

    • HMAC_256

    • HMAC_384

    • HMAC_512

  • Asymmetric RSA key pairs

    • RSA_2048

    • RSA_3072

    • RSA_4096

  • Asymmetric NIST-recommended elliptic curve key pairs

    • ECC_NIST_P256 (secp256r1)

    • ECC_NIST_P384 (secp384r1)

    • ECC_NIST_P521 (secp521r1)

  • Other asymmetric elliptic curve key pairs

    • ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies.

  • SM2 key pairs (China Regions only)

    • SM2

" + "documentation":"

Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide .

The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide .

Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

KMS supports the following key specs for KMS keys:

  • Symmetric encryption key (default)

    • SYMMETRIC_DEFAULT

  • HMAC keys (symmetric)

    • HMAC_224

    • HMAC_256

    • HMAC_384

    • HMAC_512

  • Asymmetric RSA key pairs (encryption and decryption -or- signing and verification)

    • RSA_2048

    • RSA_3072

    • RSA_4096

  • Asymmetric NIST-recommended elliptic curve key pairs (signing and verification -or- deriving shared secrets)

    • ECC_NIST_P256 (secp256r1)

    • ECC_NIST_P384 (secp384r1)

    • ECC_NIST_P521 (secp521r1)

  • Other asymmetric elliptic curve key pairs (signing and verification)

    • ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies.

  • SM2 key pairs (encryption and decryption -or- signing and verification -or- deriving shared secrets)

    • SM2 (China Regions only)

" }, "Origin":{ "shape":"OriginType", @@ -1616,6 +1640,65 @@ "exception":true, "fault":true }, + "DeriveSharedSecretRequest":{ + "type":"structure", + "required":[ + "KeyId", + "KeyAgreementAlgorithm", + "PublicKey" + ], + "members":{ + "KeyId":{ + "shape":"KeyIdType", + "documentation":"

Identifies an asymmetric NIST-recommended ECC or SM2 (China Regions only) KMS key. KMS uses the private key in the specified key pair to derive the shared secret. The key usage of the KMS key must be KEY_AGREEMENT. To find the KeyUsage of a KMS key, use the DescribeKey operation.

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + }, + "KeyAgreementAlgorithm":{ + "shape":"KeyAgreementAlgorithmSpec", + "documentation":"

Specifies the key agreement algorithm used to derive the shared secret. The only valid value is ECDH.

" + }, + "PublicKey":{ + "shape":"PublicKeyType", + "documentation":"

Specifies the public key in your peer's NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) key pair.

The public key must be a DER-encoded X.509 public key, also known as SubjectPublicKeyInfo (SPKI), as defined in RFC 5280.

GetPublicKey returns the public key of an asymmetric KMS key pair in the required DER-encoded format.

If you use Amazon Web Services CLI version 1, you must provide the DER-encoded X.509 public key in a file. Otherwise, the Amazon Web Services CLI Base64-encodes the public key a second time, resulting in a ValidationException.

You can specify the public key as binary data in a file using fileb (fileb://<path-to-file>) or in-line using a Base64 encoded string.

" + }, + "GrantTokens":{ + "shape":"GrantTokenList", + "documentation":"

A list of grant tokens.

Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

" + }, + "DryRun":{ + "shape":"NullableBooleanType", + "documentation":"

Checks if your request will succeed. DryRun is an optional parameter.

To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

" + }, + "Recipient":{ + "shape":"RecipientInfo", + "documentation":"

A signed attestation document from an Amazon Web Services Nitro enclave and the encryption algorithm to use with the enclave's public key. The only valid encryption algorithm is RSAES_OAEP_SHA_256.

This parameter only supports attestation documents for Amazon Web Services Nitro Enclaves. To call DeriveSharedSecret for an Amazon Web Services Nitro Enclaves, use the Amazon Web Services Nitro Enclaves SDK to generate the attestation document and then use the Recipient parameter from any Amazon Web Services SDK to provide the attestation document for the enclave.

When you use this parameter, instead of returning a plaintext copy of the shared secret, KMS encrypts the plaintext shared secret under the public key in the attestation document, and returns the resulting ciphertext in the CiphertextForRecipient field in the response. This ciphertext can be decrypted only with the private key in the enclave. The CiphertextBlob field in the response contains the encrypted shared secret derived from the KMS key specified by the KeyId parameter and public key specified by the PublicKey parameter. The SharedSecret field in the response is null or empty.

For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

" + } + } + }, + "DeriveSharedSecretResponse":{ + "type":"structure", + "members":{ + "KeyId":{ + "shape":"KeyIdType", + "documentation":"

Identifies the KMS key used to derive the shared secret.

" + }, + "SharedSecret":{ + "shape":"PlaintextType", + "documentation":"

The raw secret derived from the specified key agreement algorithm, private key in the asymmetric KMS key, and your peer's public key.

If the response includes the CiphertextForRecipient field, the SharedSecret field is null or empty.

" + }, + "CiphertextForRecipient":{ + "shape":"CiphertextType", + "documentation":"

The plaintext shared secret encrypted with the public key in the attestation document.

This field is included in the response only when the Recipient parameter in the request includes a valid attestation document from an Amazon Web Services Nitro enclave. For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

" + }, + "KeyAgreementAlgorithm":{ + "shape":"KeyAgreementAlgorithmSpec", + "documentation":"

Identifies the key agreement algorithm used to derive the shared secret.

" + }, + "KeyOrigin":{ + "shape":"OriginType", + "documentation":"

The source of the key material for the specified KMS key.

When this value is AWS_KMS, KMS created the key material. When this value is EXTERNAL, the key material was imported or the KMS key doesn't have any key material.

The only valid values for DeriveSharedSecret are AWS_KMS and EXTERNAL. DeriveSharedSecret does not support KMS keys with a KeyOrigin value of AWS_CLOUDHSM or EXTERNAL_KEY_STORE.

" + } + } + }, "DescribeCustomKeyStoresRequest":{ "type":"structure", "members":{ @@ -1868,7 +1951,7 @@ }, "Recipient":{ "shape":"RecipientInfo", - "documentation":"

A signed attestation document from an Amazon Web Services Nitro enclave and the encryption algorithm to use with the enclave's public key. The only valid encryption algorithm is RSAES_OAEP_SHA_256.

This parameter only supports attestation documents for Amazon Web Services Nitro Enclaves. To include this parameter, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK.

When you use this parameter, instead of returning a plaintext copy of the private data key, KMS encrypts the plaintext private data key under the public key in the attestation document, and returns the resulting ciphertext in the CiphertextForRecipient field in the response. This ciphertext can be decrypted only with the private key in the enclave. The CiphertextBlob field in the response contains a copy of the private data key encrypted under the KMS key specified by the KeyId parameter. The PrivateKeyPlaintext field in the response is null or empty.

For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

" + "documentation":"

A signed attestation document from an Amazon Web Services Nitro enclave and the encryption algorithm to use with the enclave's public key. The only valid encryption algorithm is RSAES_OAEP_SHA_256.

This parameter only supports attestation documents for Amazon Web Services Nitro Enclaves. To call DeriveSharedSecret for an Amazon Web Services Nitro Enclaves, use the Amazon Web Services Nitro Enclaves SDK to generate the attestation document and then use the Recipient parameter from any Amazon Web Services SDK to provide the attestation document for the enclave.

When you use this parameter, instead of returning a plaintext copy of the private data key, KMS encrypts the plaintext private data key under the public key in the attestation document, and returns the resulting ciphertext in the CiphertextForRecipient field in the response. This ciphertext can be decrypted only with the private key in the enclave. The CiphertextBlob field in the response contains a copy of the private data key encrypted under the KMS key specified by the KeyId parameter. The PrivateKeyPlaintext field in the response is null or empty.

For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

" }, "DryRun":{ "shape":"NullableBooleanType", @@ -2272,7 +2355,7 @@ }, "KeyUsage":{ "shape":"KeyUsageType", - "documentation":"

The permitted use of the public key. Valid values are ENCRYPT_DECRYPT or SIGN_VERIFY.

This information is critical. If a public key with SIGN_VERIFY key usage encrypts data outside of KMS, the ciphertext cannot be decrypted.

" + "documentation":"

The permitted use of the public key. Valid values for asymmetric key pairs are ENCRYPT_DECRYPT, SIGN_VERIFY, and KEY_AGREEMENT.

This information is critical. For example, if a public key with SIGN_VERIFY key usage encrypts data outside of KMS, the ciphertext cannot be decrypted.

" }, "EncryptionAlgorithms":{ "shape":"EncryptionAlgorithmSpecList", @@ -2281,6 +2364,10 @@ "SigningAlgorithms":{ "shape":"SigningAlgorithmSpecList", "documentation":"

The signing algorithms that KMS supports for this key.

This field appears in the response only when the KeyUsage of the public key is SIGN_VERIFY.

" + }, + "KeyAgreementAlgorithms":{ + "shape":"KeyAgreementAlgorithmSpecList", + "documentation":"

The key agreement algorithm used to derive a shared secret. This field is present only when the KMS key has a KeyUsage value of KEY_AGREEMENT.

" } } }, @@ -2373,7 +2460,8 @@ "GenerateDataKeyPair", "GenerateDataKeyPairWithoutPlaintext", "GenerateMac", - "VerifyMac" + "VerifyMac", + "DeriveSharedSecret" ] }, "GrantOperationList":{ @@ -2503,7 +2591,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

The request was rejected for one of the following reasons:

  • The KeyUsage value of the KMS key is incompatible with the API operation.

  • The encryption algorithm or signing algorithm specified for the operation is incompatible with the type of key material in the KMS key (KeySpec).

For encrypting, decrypting, re-encrypting, and generating data keys, the KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage of a KMS key, use the DescribeKey operation.

To find the encryption or signing algorithms supported for a particular KMS key, use the DescribeKey operation.

", + "documentation":"

The request was rejected for one of the following reasons:

  • The KeyUsage value of the KMS key is incompatible with the API operation.

  • The encryption algorithm or signing algorithm specified for the operation is incompatible with the type of key material in the KMS key (KeySpec).

For encrypting, decrypting, re-encrypting, and generating data keys, the KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage of a KMS key, use the DescribeKey operation.

To find the encryption or signing algorithms supported for a particular KMS key, use the DescribeKey operation.

", "exception":true }, "InvalidMarkerException":{ @@ -2547,6 +2635,14 @@ "documentation":"

The request was rejected because the state of the specified resource is not valid for this request.

This exceptions means one of the following:

  • The key state of the KMS key is not compatible with the operation.

    To find the key state, use the DescribeKey operation. For more information about which key states are compatible with each KMS operation, see Key states of KMS keys in the Key Management Service Developer Guide .

  • For cryptographic operations on KMS keys in custom key stores, this exception represents a general failure with many possible causes. To identify the cause, see the error message that accompanies the exception.

", "exception":true }, + "KeyAgreementAlgorithmSpec":{ + "type":"string", + "enum":["ECDH"] + }, + "KeyAgreementAlgorithmSpecList":{ + "type":"list", + "member":{"shape":"KeyAgreementAlgorithmSpec"} + }, "KeyEncryptionMechanism":{ "type":"string", "enum":["RSAES_OAEP_SHA_256"] @@ -2663,6 +2759,10 @@ "shape":"SigningAlgorithmSpecList", "documentation":"

The signing algorithms that the KMS key supports. You cannot use the KMS key with other signing algorithms within KMS.

This field appears only when the KeyUsage of the KMS key is SIGN_VERIFY.

" }, + "KeyAgreementAlgorithms":{ + "shape":"KeyAgreementAlgorithmSpecList", + "documentation":"

The key agreement algorithm used to derive a shared secret.

" + }, "MultiRegion":{ "shape":"NullableBooleanType", "documentation":"

Indicates whether the KMS key is a multi-Region (True) or regional (False) key. This value is True for multi-Region primary and replica keys and False for regional KMS keys.

For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

" @@ -2737,7 +2837,8 @@ "enum":[ "SIGN_VERIFY", "ENCRYPT_DECRYPT", - "GENERATE_VERIFY_MAC" + "GENERATE_VERIFY_MAC", + "KEY_AGREEMENT" ] }, "LimitExceededException":{ @@ -3788,7 +3889,8 @@ "enum":[ "RSA_2048", "RSA_3072", - "RSA_4096" + "RSA_4096", + "SM2" ] }, "XksKeyAlreadyInUseException":{ diff --git a/botocore/data/lakeformation/2017-03-31/service-2.json b/botocore/data/lakeformation/2017-03-31/service-2.json index ac16da0185..3e6a768ab4 100644 --- a/botocore/data/lakeformation/2017-03-31/service-2.json +++ b/botocore/data/lakeformation/2017-03-31/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"lakeformation", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Lake Formation", "serviceId":"LakeFormation", "signatureVersion":"v4", @@ -376,6 +377,21 @@ ], "documentation":"

Returns a data cells filter.

" }, + "GetDataLakePrincipal":{ + "name":"GetDataLakePrincipal", + "http":{ + "method":"POST", + "requestUri":"/GetDataLakePrincipal" + }, + "input":{"shape":"GetDataLakePrincipalRequest"}, + "output":{"shape":"GetDataLakePrincipalResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Returns the identity of the invoking principal.

" + }, "GetDataLakeSettings":{ "name":"GetDataLakeSettings", "http":{ @@ -2017,6 +2033,20 @@ } } }, + "GetDataLakePrincipalRequest":{ + "type":"structure", + "members":{ + } + }, + "GetDataLakePrincipalResponse":{ + "type":"structure", + "members":{ + "Identity":{ + "shape":"IdentityString", + "documentation":"

A unique identifier of the invoking principal.

" + } + } + }, "GetDataLakeSettingsRequest":{ "type":"structure", "members":{ @@ -2518,6 +2548,7 @@ "min":1 }, "IdentityCenterInstanceArn":{"type":"string"}, + "IdentityString":{"type":"string"}, "Integer":{ "type":"integer", "box":true diff --git a/botocore/data/launch-wizard/2018-05-10/service-2.json b/botocore/data/launch-wizard/2018-05-10/service-2.json index 956316c387..c4365bb5e2 100644 --- a/botocore/data/launch-wizard/2018-05-10/service-2.json +++ b/botocore/data/launch-wizard/2018-05-10/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"launchwizard", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Launch Wizard", "serviceId":"Launch Wizard", "signatureVersion":"v4", @@ -39,6 +40,7 @@ "input":{"shape":"DeleteDeploymentInput"}, "output":{"shape":"DeleteDeploymentOutput"}, "errors":[ + {"shape":"ResourceLimitException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} @@ -78,6 +80,22 @@ ], "documentation":"

Returns information about a workload.

" }, + "GetWorkloadDeploymentPattern":{ + "name":"GetWorkloadDeploymentPattern", + "http":{ + "method":"POST", + "requestUri":"/getWorkloadDeploymentPattern", + "responseCode":200 + }, + "input":{"shape":"GetWorkloadDeploymentPatternInput"}, + "output":{"shape":"GetWorkloadDeploymentPatternOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns details for a given workload and deployment pattern, including the available specifications. You can use the ListWorkloads operation to discover the available workload names and the ListWorkloadDeploymentPatterns operation to discover the available deployment pattern names of a given workload.

" + }, "ListDeploymentEvents":{ "name":"ListDeploymentEvents", "http":{ @@ -109,6 +127,22 @@ ], "documentation":"

Lists the deployments that have been created.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists the tags associated with a specified resource.

" + }, "ListWorkloadDeploymentPatterns":{ "name":"ListWorkloadDeploymentPatterns", "http":{ @@ -123,7 +157,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the workload deployment patterns.

" + "documentation":"

Lists the workload deployment patterns for a given workload name. You can use the ListWorkloads operation to discover the available workload names.

" }, "ListWorkloads":{ "name":"ListWorkloads", @@ -138,10 +172,47 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Lists the workloads.

" + "documentation":"

Lists the available workload names. You can use the ListWorkloadDeploymentPatterns operation to discover the available deployment patterns for a given workload.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds the specified tags to the given resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes the specified tags from the given resource.

", + "idempotent":true } }, "shapes":{ + "AllowedValues":{ + "type":"list", + "member":{"shape":"ValueString"} + }, "Boolean":{ "type":"boolean", "box":true @@ -169,11 +240,15 @@ }, "specifications":{ "shape":"DeploymentSpecifications", - "documentation":"

The settings specified for the deployment. For more information on the specifications required for creating a deployment, see Workload specifications.

" + "documentation":"

The settings specified for the deployment. These settings define how to deploy and configure your resources created by the deployment. For more information about the specifications required for creating a deployment for a SAP workload, see SAP deployment specifications. To retrieve the specifications required to create a deployment for other workloads, use the GetWorkloadDeploymentPattern operation.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags to add to the deployment.

" }, "workloadName":{ "shape":"WorkloadName", - "documentation":"

The name of the workload. You can use the ListWorkloadDeploymentPatterns operation to discover supported values for this parameter.

" + "documentation":"

The name of the workload. You can use the ListWorkloads operation to discover supported values for this parameter.

" } } }, @@ -209,6 +284,24 @@ } } }, + "DeploymentConditionalField":{ + "type":"structure", + "members":{ + "comparator":{ + "shape":"String", + "documentation":"

The comparator of the condition.

Valid values: Equal | NotEqual

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the deployment condition.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value of the condition.

" + } + }, + "documentation":"

A field that details a condition of the specifications for a deployment.

" + }, "DeploymentData":{ "type":"structure", "members":{ @@ -220,6 +313,10 @@ "shape":"Timestamp", "documentation":"

The time the deployment was deleted.

" }, + "deploymentArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the deployment.

" + }, "id":{ "shape":"DeploymentId", "documentation":"

The ID of the deployment.

" @@ -238,12 +335,16 @@ }, "specifications":{ "shape":"DeploymentSpecifications", - "documentation":"

The specifications of the deployment. For more information on specifications for each deployment, see Workload specifications.

" + "documentation":"

The settings specified for the deployment. These settings define how to deploy and configure your resources created by the deployment. For more information about the specifications required for creating a deployment for a SAP workload, see SAP deployment specifications. To retrieve the specifications required to create a deployment for other workloads, use the GetWorkloadDeploymentPattern operation.

" }, "status":{ "shape":"DeploymentStatus", "documentation":"

The status of the deployment.

" }, + "tags":{ + "shape":"Tags", + "documentation":"

Information about the tags attached to a deployment.

" + }, "workloadName":{ "shape":"WorkloadName", "documentation":"

The name of the workload.

" @@ -358,15 +459,15 @@ }, "DeploymentName":{ "type":"string", - "max":25, + "max":50, "min":1, - "pattern":"^[A-Za-z0-9_\\s\\.-]+$" + "pattern":"^[A-Za-z0-9_\\.-]+$" }, "DeploymentPatternName":{ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9-]+$" + "pattern":"^[A-Za-z0-9][a-zA-Z0-9-]*$" }, "DeploymentSpecifications":{ "type":"map", @@ -376,6 +477,38 @@ "min":1, "sensitive":true }, + "DeploymentSpecificationsData":{ + "type":"list", + "member":{"shape":"DeploymentSpecificationsField"}, + "max":100, + "min":1 + }, + "DeploymentSpecificationsField":{ + "type":"structure", + "members":{ + "allowedValues":{ + "shape":"AllowedValues", + "documentation":"

The allowed values of the deployment specification.

" + }, + "conditionals":{ + "shape":"SpecificationsConditionalData", + "documentation":"

The conditionals used for the deployment specification.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the deployment specification.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the deployment specification.

" + }, + "required":{ + "shape":"String", + "documentation":"

Indicates if the deployment specification is required.

" + } + }, + "documentation":"

A field that details a specification of a deployment pattern.

" + }, "DeploymentStatus":{ "type":"string", "enum":[ @@ -422,6 +555,32 @@ } } }, + "GetWorkloadDeploymentPatternInput":{ + "type":"structure", + "required":[ + "deploymentPatternName", + "workloadName" + ], + "members":{ + "deploymentPatternName":{ + "shape":"DeploymentPatternName", + "documentation":"

The name of the deployment pattern.

" + }, + "workloadName":{ + "shape":"WorkloadName", + "documentation":"

The name of the workload.

" + } + } + }, + "GetWorkloadDeploymentPatternOutput":{ + "type":"structure", + "members":{ + "workloadDeploymentPattern":{ + "shape":"WorkloadDeploymentPatternData", + "documentation":"

Details about the workload deployment pattern.

" + } + } + }, "GetWorkloadInput":{ "type":"structure", "required":["workloadName"], @@ -493,7 +652,7 @@ "members":{ "filters":{ "shape":"DeploymentFilterList", - "documentation":"

Filters to scope the results. The following filters are supported:

  • WORKLOAD_NAME

  • DEPLOYMENT_STATUS

" + "documentation":"

Filters to scope the results. The following filters are supported:

  • WORKLOAD_NAME - The name used in deployments.

  • DEPLOYMENT_STATUS - COMPLETED | CREATING | DELETE_IN_PROGRESS | DELETE_INITIATING | DELETE_FAILED | DELETED | FAILED | IN_PROGRESS | VALIDATING

" }, "maxResults":{ "shape":"MaxDeploymentResults", @@ -518,6 +677,27 @@ } } }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"Tags", + "documentation":"

Information about the tags.

" + } + } + }, "ListWorkloadDeploymentPatternsInput":{ "type":"structure", "required":["workloadName"], @@ -628,8 +808,88 @@ }, "exception":true }, + "SpecificationsConditionalData":{ + "type":"list", + "member":{"shape":"DeploymentConditionalField"}, + "max":5, + "min":1 + }, "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"Tags", + "documentation":"

One or more tags to attach to the resource.

" + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":1 + }, "Timestamp":{"type":"timestamp"}, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

Keys identifying the tags to remove.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, "ValidationException":{ "type":"structure", "members":{ @@ -699,6 +959,44 @@ "type":"list", "member":{"shape":"WorkloadDataSummary"} }, + "WorkloadDeploymentPatternData":{ + "type":"structure", + "members":{ + "deploymentPatternName":{ + "shape":"DeploymentPatternName", + "documentation":"

The name of the deployment pattern.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the deployment pattern.

" + }, + "displayName":{ + "shape":"String", + "documentation":"

The display name of the deployment pattern.

" + }, + "specifications":{ + "shape":"DeploymentSpecificationsData", + "documentation":"

The settings specified for the deployment. These settings define how to deploy and configure your resources created by the deployment. For more information about the specifications required for creating a deployment for a SAP workload, see SAP deployment specifications. To retrieve the specifications required to create a deployment for other workloads, use the GetWorkloadDeploymentPattern operation.

" + }, + "status":{ + "shape":"WorkloadDeploymentPatternStatus", + "documentation":"

The status of the deployment pattern.

" + }, + "statusMessage":{ + "shape":"String", + "documentation":"

The status message of the deployment pattern.

" + }, + "workloadName":{ + "shape":"WorkloadName", + "documentation":"

The workload name of the deployment pattern.

" + }, + "workloadVersionName":{ + "shape":"WorkloadVersionName", + "documentation":"

The workload version name of the deployment pattern.

" + } + }, + "documentation":"

The data that details a workload deployment pattern.

" + }, "WorkloadDeploymentPatternDataSummary":{ "type":"structure", "members":{ @@ -748,9 +1046,9 @@ }, "WorkloadName":{ "type":"string", - "max":256, + "max":100, "min":1, - "pattern":"^[a-zA-Z0-9-]+$" + "pattern":"^[A-Za-z][a-zA-Z0-9-_]*$" }, "WorkloadStatus":{ "type":"string", @@ -765,7 +1063,7 @@ "type":"string", "max":30, "min":5, - "pattern":"^[a-zA-Z0-9-]+$" + "pattern":"^[A-Za-z0-9][a-zA-Z0-9-]*$" } }, "documentation":"

Launch Wizard offers a guided way of sizing, configuring, and deploying Amazon Web Services resources for third party applications, such as Microsoft SQL Server Always On and HANA based SAP systems, without the need to manually identify and provision individual Amazon Web Services resources.

" diff --git a/botocore/data/lexv2-models/2020-08-07/service-2.json b/botocore/data/lexv2-models/2020-08-07/service-2.json index 6671405782..90e0a1c303 100644 --- a/botocore/data/lexv2-models/2020-08-07/service-2.json +++ b/botocore/data/lexv2-models/2020-08-07/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"models-v2-lex", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"Lex Models V2", "serviceFullName":"Amazon Lex Model Building V2", "serviceId":"Lex Models V2", "signatureVersion":"v4", "signingName":"lex", - "uid":"models.lex.v2-2020-08-07" + "uid":"models.lex.v2-2020-08-07", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchCreateCustomVocabularyItem":{ @@ -256,7 +258,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Adds a new resource policy statement to a bot or bot alias. If a resource policy exists, the statement is added to the current resource policy. If a policy doesn't exist, a new policy is created.

You can't create a resource policy statement that allows cross-account access.

" + "documentation":"

Adds a new resource policy statement to a bot or bot alias. If a resource policy exists, the statement is added to the current resource policy. If a policy doesn't exist, a new policy is created.

You can't create a resource policy statement that allows cross-account access.

You need to add the CreateResourcePolicy or UpdateResourcePolicy action to the bot role in order to call the API.

" }, "CreateSlot":{ "name":"CreateSlot", @@ -533,7 +535,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes a policy statement from a resource policy. If you delete the last statement from a policy, the policy is deleted. If you specify a statement ID that doesn't exist in the policy, or if the bot or bot alias doesn't have a policy attached, Amazon Lex returns an exception.

" + "documentation":"

Deletes a policy statement from a resource policy. If you delete the last statement from a policy, the policy is deleted. If you specify a statement ID that doesn't exist in the policy, or if the bot or bot alias doesn't have a policy attached, Amazon Lex returns an exception.

You need to add the DeleteResourcePolicy or UpdateResourcePolicy action to the bot role in order to call the API.

" }, "DeleteSlot":{ "name":"DeleteSlot", @@ -3295,6 +3297,34 @@ } } }, + "BedrockGuardrailConfiguration":{ + "type":"structure", + "required":[ + "identifier", + "version" + ], + "members":{ + "identifier":{ + "shape":"BedrockGuardrailIdentifier", + "documentation":"

The unique guardrail id for the Bedrock guardrail configuration.

" + }, + "version":{ + "shape":"BedrockGuardrailVersion", + "documentation":"

The guardrail version for the Bedrock guardrail configuration.

" + } + }, + "documentation":"

The details on the Bedrock guardrail configuration.

" + }, + "BedrockGuardrailIdentifier":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$" + }, + "BedrockGuardrailVersion":{ + "type":"string", + "pattern":"^(([1-9][0-9]{0,7})|(DRAFT))$" + }, "BedrockKnowledgeBaseArn":{ "type":"string", "max":200, @@ -3307,15 +3337,38 @@ "members":{ "bedrockKnowledgeBaseArn":{ "shape":"BedrockKnowledgeBaseArn", - "documentation":"

The ARN of the knowledge base used.

" + "documentation":"

The base ARN of the knowledge base used.

" + }, + "exactResponse":{ + "shape":"Boolean", + "documentation":"

Specifies whether to return an exact response, or to return an answer generated by the model, using the fields you specify from the database.

" + }, + "exactResponseFields":{ + "shape":"BedrockKnowledgeStoreExactResponseFields", + "documentation":"

Contains the names of the fields used for an exact response to the user.

" } }, "documentation":"

Contains details about the configuration of a Amazon Bedrock knowledge base.

" }, + "BedrockKnowledgeStoreExactResponseFields":{ + "type":"structure", + "members":{ + "answerField":{ + "shape":"AnswerField", + "documentation":"

The answer field used for an exact response from Bedrock Knowledge Store.

" + } + }, + "documentation":"

The exact response fields given by the Bedrock knowledge store.

" + }, "BedrockModelArn":{ "type":"string", "pattern":"^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model\\/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}$" }, + "BedrockModelCustomPrompt":{ + "type":"string", + "max":4000, + "min":1 + }, "BedrockModelSpecification":{ "type":"structure", "required":["modelArn"], @@ -3323,10 +3376,29 @@ "modelArn":{ "shape":"BedrockModelArn", "documentation":"

The ARN of the foundation model used in descriptive bot building.

" + }, + "guardrail":{ + "shape":"BedrockGuardrailConfiguration", + "documentation":"

The guardrail configuration in the Bedrock model specification details.

" + }, + "traceStatus":{ + "shape":"BedrockTraceStatus", + "documentation":"

The Bedrock trace status in the Bedrock model specification details.

" + }, + "customPrompt":{ + "shape":"BedrockModelCustomPrompt", + "documentation":"

The custom prompt used in the Bedrock model specification details.

" } }, "documentation":"

Contains information about the Amazon Bedrock model used to interpret the prompt used in descriptive bot building.

" }, + "BedrockTraceStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "Boolean":{"type":"boolean"}, "BotAliasHistoryEvent":{ "type":"structure", diff --git a/botocore/data/license-manager-linux-subscriptions/2018-05-10/endpoint-rule-set-1.json b/botocore/data/license-manager-linux-subscriptions/2018-05-10/endpoint-rule-set-1.json index df67e99d28..d624f6615f 100644 --- a/botocore/data/license-manager-linux-subscriptions/2018-05-10/endpoint-rule-set-1.json +++ b/botocore/data/license-manager-linux-subscriptions/2018-05-10/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -59,7 +58,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -87,13 +85,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -106,7 +105,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -120,7 +118,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -143,7 +140,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -178,11 +174,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -193,16 +187,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -216,14 +213,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -232,15 +227,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -251,16 +245,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -274,7 +271,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -294,11 +290,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -309,20 +303,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -333,18 +329,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] } \ No newline at end of file diff --git a/botocore/data/license-manager-linux-subscriptions/2018-05-10/paginators-1.json b/botocore/data/license-manager-linux-subscriptions/2018-05-10/paginators-1.json index 1dc8f4d827..6d74d22e33 100644 --- a/botocore/data/license-manager-linux-subscriptions/2018-05-10/paginators-1.json +++ b/botocore/data/license-manager-linux-subscriptions/2018-05-10/paginators-1.json @@ -11,6 +11,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Subscriptions" + }, + "ListRegisteredSubscriptionProviders": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "RegisteredSubscriptionProviders" } } } diff --git a/botocore/data/license-manager-linux-subscriptions/2018-05-10/service-2.json b/botocore/data/license-manager-linux-subscriptions/2018-05-10/service-2.json index 73b857ab06..e3d923c11d 100644 --- a/botocore/data/license-manager-linux-subscriptions/2018-05-10/service-2.json +++ b/botocore/data/license-manager-linux-subscriptions/2018-05-10/service-2.json @@ -5,13 +5,51 @@ "endpointPrefix":"license-manager-linux-subscriptions", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS License Manager Linux Subscriptions", "serviceId":"License Manager Linux Subscriptions", "signatureVersion":"v4", "signingName":"license-manager-linux-subscriptions", - "uid":"license-manager-linux-subscriptions-2018-05-10" + "uid":"license-manager-linux-subscriptions-2018-05-10", + "auth":["aws.auth#sigv4"] }, "operations":{ + "DeregisterSubscriptionProvider":{ + "name":"DeregisterSubscriptionProvider", + "http":{ + "method":"POST", + "requestUri":"/subscription/DeregisterSubscriptionProvider", + "responseCode":200 + }, + "input":{"shape":"DeregisterSubscriptionProviderRequest"}, + "output":{"shape":"DeregisterSubscriptionProviderResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Remove a third-party subscription provider from the Bring Your Own License (BYOL) subscriptions registered to your account.

", + "idempotent":true + }, + "GetRegisteredSubscriptionProvider":{ + "name":"GetRegisteredSubscriptionProvider", + "http":{ + "method":"POST", + "requestUri":"/subscription/GetRegisteredSubscriptionProvider", + "responseCode":200 + }, + "input":{"shape":"GetRegisteredSubscriptionProviderRequest"}, + "output":{"shape":"GetRegisteredSubscriptionProviderResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Get details for a Bring Your Own License (BYOL) subscription that's registered to your account.

", + "idempotent":true + }, "GetServiceSettings":{ "name":"GetServiceSettings", "http":{ @@ -26,7 +64,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Lists the Linux subscriptions service settings.

", + "documentation":"

Lists the Linux subscriptions service settings for your account.

", "idempotent":true }, "ListLinuxSubscriptionInstances":{ @@ -63,6 +101,89 @@ "documentation":"

Lists the Linux subscriptions that have been discovered. If you have linked your organization, the returned results will include data aggregated across your accounts in Organizations.

", "idempotent":true }, + "ListRegisteredSubscriptionProviders":{ + "name":"ListRegisteredSubscriptionProviders", + "http":{ + "method":"POST", + "requestUri":"/subscription/ListRegisteredSubscriptionProviders", + "responseCode":200 + }, + "input":{"shape":"ListRegisteredSubscriptionProvidersRequest"}, + "output":{"shape":"ListRegisteredSubscriptionProvidersResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

List Bring Your Own License (BYOL) subscription registration resources for your account.

", + "idempotent":true + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

List the metadata tags that are assigned to the specified Amazon Web Services resource.

" + }, + "RegisterSubscriptionProvider":{ + "name":"RegisterSubscriptionProvider", + "http":{ + "method":"POST", + "requestUri":"/subscription/RegisterSubscriptionProvider", + "responseCode":200 + }, + "input":{"shape":"RegisterSubscriptionProviderRequest"}, + "output":{"shape":"RegisterSubscriptionProviderResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Register the supported third-party subscription provider for your Bring Your Own License (BYOL) subscription.

", + "idempotent":true + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"PUT", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Add metadata tags to the specified Amazon Web Services resource.

", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Remove one or more metadata tag from the specified Amazon Web Services resource.

", + "idempotent":true + }, "UpdateServiceSettings":{ "name":"UpdateServiceSettings", "http":{ @@ -94,6 +215,21 @@ "type":"long", "box":true }, + "DeregisterSubscriptionProviderRequest":{ + "type":"structure", + "required":["SubscriptionProviderArn"], + "members":{ + "SubscriptionProviderArn":{ + "shape":"SubscriptionProviderArn", + "documentation":"

The Amazon Resource Name (ARN) of the subscription provider resource to deregister.

" + } + } + }, + "DeregisterSubscriptionProviderResponse":{ + "type":"structure", + "members":{ + } + }, "Filter":{ "type":"structure", "members":{ @@ -116,6 +252,45 @@ "type":"list", "member":{"shape":"Filter"} }, + "GetRegisteredSubscriptionProviderRequest":{ + "type":"structure", + "required":["SubscriptionProviderArn"], + "members":{ + "SubscriptionProviderArn":{ + "shape":"SubscriptionProviderArn", + "documentation":"

The Amazon Resource Name (ARN) of the BYOL registration resource to get details for.

" + } + } + }, + "GetRegisteredSubscriptionProviderResponse":{ + "type":"structure", + "members":{ + "LastSuccessfulDataRetrievalTime":{ + "shape":"String", + "documentation":"

The timestamp from the last time License Manager retrieved subscription details from your registered third-party Linux subscription provider.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The Amazon Resource Name (ARN) of the third-party access secret stored in Secrets Manager for the BYOL registration resource specified in the request.

" + }, + "SubscriptionProviderArn":{ + "shape":"SubscriptionProviderArn", + "documentation":"

The Amazon Resource Name (ARN) for the BYOL registration resource specified in the request.

" + }, + "SubscriptionProviderSource":{ + "shape":"SubscriptionProviderSource", + "documentation":"

The subscription provider for the BYOL registration resource specified in the request.

" + }, + "SubscriptionProviderStatus":{ + "shape":"SubscriptionProviderStatus", + "documentation":"

The status of the Linux subscription provider access token from the last successful subscription data request.

" + }, + "SubscriptionProviderStatusMessage":{ + "shape":"String", + "documentation":"

The detailed message from your subscription provider token status.

" + } + } + }, "GetServiceSettingsRequest":{ "type":"structure", "members":{ @@ -157,6 +332,10 @@ "shape":"String", "documentation":"

The AMI ID used to launch the instance.

" }, + "DualSubscription":{ + "shape":"String", + "documentation":"

Indicates that you have two different license subscriptions for the same software on your instance.

" + }, "InstanceID":{ "shape":"String", "documentation":"

The instance ID of the resource.

" @@ -169,6 +348,10 @@ "shape":"String", "documentation":"

The time in which the last discovery updated the instance details.

" }, + "OsVersion":{ + "shape":"String", + "documentation":"

The operating system software version that runs on your instance.

" + }, "ProductCode":{ "shape":"ProductCodeList", "documentation":"

The product code for the instance. For more information, see Usage operation values in the License Manager User Guide .

" @@ -177,13 +360,25 @@ "shape":"String", "documentation":"

The Region the instance is running in.

" }, + "RegisteredWithSubscriptionProvider":{ + "shape":"String", + "documentation":"

Indicates that your instance uses a BYOL license subscription from a third-party Linux subscription provider that you've registered with License Manager.

" + }, "Status":{ "shape":"String", "documentation":"

The status of the instance.

" }, "SubscriptionName":{ "shape":"String", - "documentation":"

The name of the subscription being used by the instance.

" + "documentation":"

The name of the license subscription that the instance uses.

" + }, + "SubscriptionProviderCreateTime":{ + "shape":"String", + "documentation":"

The timestamp when you registered the third-party Linux subscription provider for the subscription that the instance uses.

" + }, + "SubscriptionProviderUpdateTime":{ + "shape":"String", + "documentation":"

The timestamp from the last time that the instance synced with the registered third-party Linux subscription provider.

" }, "UsageOperation":{ "shape":"String", @@ -235,15 +430,15 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

An array of structures that you can use to filter the results to those that match one or more sets of key-value pairs that you specify. For example, you can filter by the name of AmiID with an optional operator to see subscriptions that match, partially match, or don't match a certain Amazon Machine Image (AMI) ID.

The valid names for this filter are:

  • AmiID

  • InstanceID

  • AccountID

  • Status

  • Region

  • UsageOperation

  • ProductCode

  • InstanceType

The valid Operators for this filter are:

  • contains

  • equals

  • Notequal

" + "documentation":"

An array of structures that you can use to filter the results by your specified criteria. For example, you can specify Region in the Name, with the contains operator to list all subscriptions that match a partial string in the Value, such as us-west.

For each filter, you can specify one of the following values for the Name key to streamline results:

  • AccountID

  • AmiID

  • DualSubscription

  • InstanceID

  • InstanceType

  • ProductCode

  • Region

  • Status

  • UsageOperation

For each filter, you can use one of the following Operator values to define the behavior of the filter:

  • contains

  • equals

  • Notequal

" }, "MaxResults":{ "shape":"BoxInteger", - "documentation":"

Maximum number of results to return in a single call.

" + "documentation":"

The maximum items to return in a request.

" }, "NextToken":{ "shape":"ListLinuxSubscriptionInstancesRequestNextTokenString", - "documentation":"

Token for the next set of results.

" + "documentation":"

A token to specify where to start paginating. This is the nextToken from a previously truncated response.

" } }, "documentation":"

NextToken length limit is half of ddb accepted limit. Increase this limit if parameters in request increases.

" @@ -262,7 +457,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Token for the next set of results.

" + "documentation":"

The next token used for paginated responses. When this field isn't empty, there are additional elements that the service hasn't included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -275,11 +470,11 @@ }, "MaxResults":{ "shape":"BoxInteger", - "documentation":"

Maximum number of results to return in a single call.

" + "documentation":"

The maximum items to return in a request.

" }, "NextToken":{ "shape":"ListLinuxSubscriptionsRequestNextTokenString", - "documentation":"

Token for the next set of results.

" + "documentation":"

A token to specify where to start paginating. This is the nextToken from a previously truncated response.

" } }, "documentation":"

NextToken length limit is half of ddb accepted limit. Increase this limit if parameters in request increases.

" @@ -294,7 +489,7 @@ "members":{ "NextToken":{ "shape":"String", - "documentation":"

Token for the next set of results.

" + "documentation":"

The next token used for paginated responses. When this field isn't empty, there are additional elements that the service hasn't included in this request. Use this token with the next request to retrieve additional objects.

" }, "Subscriptions":{ "shape":"SubscriptionList", @@ -302,6 +497,63 @@ } } }, + "ListRegisteredSubscriptionProvidersRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListRegisteredSubscriptionProvidersRequestMaxResultsInteger", + "documentation":"

The maximum items to return in a request.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

A token to specify where to start paginating. This is the nextToken from a previously truncated response.

" + }, + "SubscriptionProviderSources":{ + "shape":"SubscriptionProviderSourceList", + "documentation":"

To filter your results, specify which subscription providers to return in the list.

" + } + } + }, + "ListRegisteredSubscriptionProvidersRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListRegisteredSubscriptionProvidersResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "documentation":"

The next token used for paginated responses. When this field isn't empty, there are additional elements that the service hasn't included in this request. Use this token with the next request to retrieve additional objects.

" + }, + "RegisteredSubscriptionProviders":{ + "shape":"RegisteredSubscriptionProviderList", + "documentation":"

The list of BYOL registration resources that fit the criteria you specified in the request.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"SubscriptionProviderArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource for which to list metadata tags.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"Tags", + "documentation":"

The metadata tags for the requested resource.

" + } + } + }, "Operator":{ "type":"string", "enum":[ @@ -323,6 +575,90 @@ "type":"list", "member":{"shape":"String"} }, + "RegisterSubscriptionProviderRequest":{ + "type":"structure", + "required":[ + "SecretArn", + "SubscriptionProviderSource" + ], + "members":{ + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The Amazon Resource Name (ARN) of the secret where you've stored your subscription provider's access token. For RHEL subscriptions managed through the Red Hat Subscription Manager (RHSM), the secret contains your Red Hat Offline token.

" + }, + "SubscriptionProviderSource":{ + "shape":"SubscriptionProviderSource", + "documentation":"

The supported Linux subscription provider to register.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The metadata tags to assign to your registered Linux subscription provider resource.

" + } + } + }, + "RegisterSubscriptionProviderResponse":{ + "type":"structure", + "members":{ + "SubscriptionProviderArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the Linux subscription provider resource that you registered.

" + }, + "SubscriptionProviderSource":{ + "shape":"SubscriptionProviderSource", + "documentation":"

The Linux subscription provider that you registered.

" + }, + "SubscriptionProviderStatus":{ + "shape":"SubscriptionProviderStatus", + "documentation":"

Indicates the status of the registration action for the Linux subscription provider that you requested.

" + } + } + }, + "RegisteredSubscriptionProvider":{ + "type":"structure", + "members":{ + "LastSuccessfulDataRetrievalTime":{ + "shape":"String", + "documentation":"

The timestamp from the last time that License Manager accessed third-party subscription data for your account from your registered Linux subscription provider.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The Amazon Resource Name (ARN) of the Secrets Manager secret that stores your registered Linux subscription provider access token. For RHEL account subscriptions, this is the offline token.

" + }, + "SubscriptionProviderArn":{ + "shape":"SubscriptionProviderArn", + "documentation":"

The Amazon Resource Name (ARN) of the Linux subscription provider resource that you registered.

" + }, + "SubscriptionProviderSource":{ + "shape":"SubscriptionProviderSource", + "documentation":"

A supported third-party Linux subscription provider. License Manager currently supports Red Hat subscriptions.

" + }, + "SubscriptionProviderStatus":{ + "shape":"SubscriptionProviderStatus", + "documentation":"

Indicates the status of your registered Linux subscription provider access token from the last time License Manager retrieved subscription data. For RHEL account subscriptions, this is the status of the offline token.

" + }, + "SubscriptionProviderStatusMessage":{ + "shape":"String", + "documentation":"

A detailed message that's associated with your BYOL subscription provider token status.

" + } + }, + "documentation":"

A third-party provider for operating system (OS) platform software and license subscriptions, such as Red Hat. When you register a third-party Linux subscription provider, License Manager can get subscription data from the registered provider.

" + }, + "RegisteredSubscriptionProviderList":{ + "type":"list", + "member":{"shape":"RegisteredSubscriptionProvider"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Unable to find the requested Amazon Web Services resource.

", + "exception":true + }, + "SecretArn":{ + "type":"string", + "pattern":"^arn:[a-z0-9-\\.]{1,63}:secretsmanager:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:secret:[^/]{1,1023}$" + }, "Status":{ "type":"string", "enum":[ @@ -371,6 +707,65 @@ "type":"list", "member":{"shape":"Subscription"} }, + "SubscriptionProviderArn":{ + "type":"string", + "pattern":"^arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,510}/[a-z0-9-\\.]{1,510}$" + }, + "SubscriptionProviderSource":{ + "type":"string", + "enum":["RedHat"] + }, + "SubscriptionProviderSourceList":{ + "type":"list", + "member":{"shape":"SubscriptionProviderSource"} + }, + "SubscriptionProviderStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INVALID", + "PENDING" + ] + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"String"}, + "max":50, + "min":0, + "sensitive":true + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"SubscriptionProviderArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services resource to which to add the specified metadata tags.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The metadata tags to assign to the Amazon Web Services resource. Tags are formatted as key value pairs.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "Tags":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"}, + "max":50, + "min":0, + "sensitive":true + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -379,6 +774,32 @@ "documentation":"

The request was denied due to request throttling.

", "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"SubscriptionProviderArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services resource to remove the metadata tags from.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

A list of metadata tag keys to remove from the requested resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateServiceSettingsRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/lightsail/2016-11-28/service-2.json b/botocore/data/lightsail/2016-11-28/service-2.json index cb178ff952..665fb211e0 100644 --- a/botocore/data/lightsail/2016-11-28/service-2.json +++ b/botocore/data/lightsail/2016-11-28/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"lightsail", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon Lightsail", "serviceId":"Lightsail", "signatureVersion":"v4", "targetPrefix":"Lightsail_20161128", - "uid":"lightsail-2016-11-28" + "uid":"lightsail-2016-11-28", + "auth":["aws.auth#sigv4"] }, "operations":{ "AllocateStaticIp":{ @@ -3558,7 +3560,7 @@ "members":{ "blueprintId":{ "shape":"NonEmptyString", - "documentation":"

The ID for the virtual private server image (app_wordpress_4_4 or app_lamp_7_0).

" + "documentation":"

The ID for the virtual private server image (app_wordpress_x_x or app_lamp_x_x).

" }, "name":{ "shape":"ResourceName", @@ -3799,11 +3801,11 @@ }, "bundleId":{ "shape":"NonEmptyString", - "documentation":"

The bundle ID (micro_1_0).

" + "documentation":"

The bundle ID (micro_x_x).

" }, "instanceType":{ "shape":"string", - "documentation":"

The Amazon EC2 instance type (t2.micro).

" + "documentation":"

The instance type (micro).

" }, "isActive":{ "shape":"boolean", @@ -5317,7 +5319,7 @@ }, "bundleId":{ "shape":"NonEmptyString", - "documentation":"

The bundle of specification information for your virtual private server (or instance), including the pricing plan (micro_1_0).

" + "documentation":"

The bundle of specification information for your virtual private server (or instance), including the pricing plan (micro_x_x).

" }, "userData":{ "shape":"string", @@ -5337,7 +5339,7 @@ }, "ipAddressType":{ "shape":"IpAddressType", - "documentation":"

The IP address type for the instance.

The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.

The default value is dualstack.

" + "documentation":"

The IP address type for the instance.

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6.

The default value is dualstack.

" }, "sourceInstanceName":{ "shape":"string", @@ -5386,11 +5388,11 @@ }, "blueprintId":{ "shape":"NonEmptyString", - "documentation":"

The ID for a virtual private server image (app_wordpress_4_4 or app_lamp_7_0). Use the get blueprints operation to return a list of available images (or blueprints).

Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.

" + "documentation":"

The ID for a virtual private server image (app_wordpress_x_x or app_lamp_x_x). Use the get blueprints operation to return a list of available images (or blueprints).

Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.

" }, "bundleId":{ "shape":"NonEmptyString", - "documentation":"

The bundle of specification information for your virtual private server (or instance), including the pricing plan (micro_1_0).

" + "documentation":"

The bundle of specification information for your virtual private server (or instance), including the pricing plan (medium_x_x).

" }, "userData":{ "shape":"string", @@ -5410,7 +5412,7 @@ }, "ipAddressType":{ "shape":"IpAddressType", - "documentation":"

The IP address type for the instance.

The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.

The default value is dualstack.

" + "documentation":"

The IP address type for the instance.

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6.

The default value is dualstack.

" } } }, @@ -5495,7 +5497,7 @@ }, "ipAddressType":{ "shape":"IpAddressType", - "documentation":"

The IP address type for the load balancer.

The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.

The default value is dualstack.

" + "documentation":"

The IP address type for the load balancer.

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6.

The default value is dualstack.

" }, "tlsPolicyName":{ "shape":"string", @@ -8761,7 +8763,7 @@ "members":{ "name":{ "shape":"ResourceName", - "documentation":"

The name the user gave the instance (Amazon_Linux-1GB-Ohio-1).

" + "documentation":"

The name the user gave the instance (Amazon_Linux_2023-1).

" }, "arn":{ "shape":"NonEmptyString", @@ -8789,15 +8791,15 @@ }, "blueprintId":{ "shape":"NonEmptyString", - "documentation":"

The blueprint ID (os_amlinux_2016_03).

" + "documentation":"

The blueprint ID (amazon_linux_2023).

" }, "blueprintName":{ "shape":"NonEmptyString", - "documentation":"

The friendly name of the blueprint (Amazon Linux).

" + "documentation":"

The friendly name of the blueprint (Amazon Linux 2023).

" }, "bundleId":{ "shape":"NonEmptyString", - "documentation":"

The bundle for the instance (micro_1_0).

" + "documentation":"

The bundle for the instance (micro_x_x).

" }, "addOns":{ "shape":"AddOnList", @@ -8821,7 +8823,7 @@ }, "ipAddressType":{ "shape":"IpAddressType", - "documentation":"

The IP address type of the instance.

The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.

" + "documentation":"

The IP address type of the instance.

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6.

" }, "hardware":{ "shape":"InstanceHardware", @@ -9099,7 +9101,7 @@ }, "protocol":{ "shape":"NetworkProtocol", - "documentation":"

The IP protocol name.

The name can be one of the following:

  • tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead.

  • all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia.

  • udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead.

  • icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

" + "documentation":"

The IP protocol name.

The name can be one of the following:

  • tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead.

  • all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia.

  • udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead.

  • icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

  • icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you specify icmp6 as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

" }, "accessFrom":{ "shape":"string", @@ -9149,7 +9151,7 @@ }, "protocol":{ "shape":"NetworkProtocol", - "documentation":"

The IP protocol name.

The name can be one of the following:

  • tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead.

  • all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia.

  • udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead.

  • icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

" + "documentation":"

The IP protocol name.

The name can be one of the following:

  • tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead.

  • all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia.

  • udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead.

  • icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

  • icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you specify icmp6 as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

" }, "state":{ "shape":"PortState", @@ -9227,11 +9229,11 @@ }, "fromBlueprintId":{ "shape":"string", - "documentation":"

The blueprint ID from which you created the snapshot (os_debian_8_3). A blueprint is a virtual private server (or instance) image used to create instances quickly.

" + "documentation":"

The blueprint ID from which you created the snapshot (amazon_linux_2023). A blueprint is a virtual private server (or instance) image used to create instances quickly.

" }, "fromBundleId":{ "shape":"string", - "documentation":"

The bundle ID from which you created the snapshot (micro_1_0).

" + "documentation":"

The bundle ID from which you created the snapshot (micro_x_x).

" }, "isFromAutoSnapshot":{ "shape":"boolean", @@ -9249,11 +9251,11 @@ "members":{ "fromBundleId":{ "shape":"NonEmptyString", - "documentation":"

The bundle ID from which the source instance was created (micro_1_0).

" + "documentation":"

The bundle ID from which the source instance was created (micro_x_x).

" }, "fromBlueprintId":{ "shape":"NonEmptyString", - "documentation":"

The blueprint ID from which the source instance (os_debian_8_3).

" + "documentation":"

The blueprint ID from which the source instance (amazon_linux_2023).

" }, "fromDiskInfo":{ "shape":"DiskInfoList", @@ -9307,7 +9309,8 @@ "type":"string", "enum":[ "dualstack", - "ipv4" + "ipv4", + "ipv6" ] }, "Ipv6Address":{ @@ -9536,7 +9539,7 @@ }, "ipAddressType":{ "shape":"IpAddressType", - "documentation":"

The IP address type of the load balancer.

The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.

" + "documentation":"

The IP address type of the load balancer.

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6.

" }, "httpsRedirectionEnabled":{ "shape":"boolean", @@ -10097,7 +10100,8 @@ "tcp", "all", "udp", - "icmp" + "icmp", + "icmpv6" ] }, "NonEmptyString":{ @@ -10443,7 +10447,7 @@ }, "protocol":{ "shape":"NetworkProtocol", - "documentation":"

The IP protocol name.

The name can be one of the following:

  • tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead.

  • all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia.

  • udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead.

  • icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

" + "documentation":"

The IP protocol name.

The name can be one of the following:

  • tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead.

  • all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia.

  • udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead.

  • icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

  • icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you specify icmp6 as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

" }, "cidrs":{ "shape":"StringList", @@ -11460,7 +11464,11 @@ }, "ipAddressType":{ "shape":"IpAddressType", - "documentation":"

The IP address type to set for the specified resource.

The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.

" + "documentation":"

The IP address type to set for the specified resource.

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6.

" + }, + "acceptBundleUpdate":{ + "shape":"boolean", + "documentation":"

Required parameter to accept the instance bundle update when changing to, and from, IPv6-only.

An instance bundle will change when switching from dual-stack or ipv4, to ipv6. It also changes when switching from ipv6, to dual-stack or ipv4.

You must include this parameter in the command to update the bundle. For example, if you switch from dual-stack to ipv6, the bundle will be updated, and billing for the IPv6-only instance bundle begins immediately.

" } } }, diff --git a/botocore/data/location/2020-11-19/paginators-1.json b/botocore/data/location/2020-11-19/paginators-1.json index eaa2797564..55e77c9359 100644 --- a/botocore/data/location/2020-11-19/paginators-1.json +++ b/botocore/data/location/2020-11-19/paginators-1.json @@ -59,6 +59,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Entries" + }, + "ForecastGeofenceEvents": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ForecastedEvents" } } } diff --git a/botocore/data/location/2020-11-19/paginators-1.sdk-extras.json b/botocore/data/location/2020-11-19/paginators-1.sdk-extras.json new file mode 100644 index 0000000000..2aba0bfb65 --- /dev/null +++ b/botocore/data/location/2020-11-19/paginators-1.sdk-extras.json @@ -0,0 +1,13 @@ +{ + "version": 1.0, + "merge": { + "pagination": { + "ForecastGeofenceEvents": { + "non_aggregate_keys": [ + "DistanceUnit", + "SpeedUnit" + ] + } + } + } +} diff --git a/botocore/data/location/2020-11-19/service-2.json b/botocore/data/location/2020-11-19/service-2.json index d9ac4344fe..c9deaf2ba2 100644 --- a/botocore/data/location/2020-11-19/service-2.json +++ b/botocore/data/location/2020-11-19/service-2.json @@ -3,8 +3,8 @@ "metadata":{ "apiVersion":"2020-11-19", "endpointPrefix":"geo", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Location Service", "serviceId":"Location", "signatureVersion":"v4", @@ -564,6 +564,25 @@ "documentation":"

Removes the association between a tracker resource and a geofence collection.

Once you unlink a tracker resource from a geofence collection, the tracker positions will no longer be automatically evaluated against geofences.

", "endpoint":{"hostPrefix":"cp.tracking."} }, + "ForecastGeofenceEvents":{ + "name":"ForecastGeofenceEvents", + "http":{ + "method":"POST", + "requestUri":"/geofencing/v0/collections/{CollectionName}/forecast-geofence-events", + "responseCode":200 + }, + "input":{"shape":"ForecastGeofenceEventsRequest"}, + "output":{"shape":"ForecastGeofenceEventsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Evaluates device positions against geofence geometries from a given geofence collection. The event forecasts three states for which a device can be in relative to a geofence:

ENTER: If a device is outside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window.

EXIT: If a device is inside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window.

IDLE: If a device is inside of a geofence, and the device is not moving.

", + "endpoint":{"hostPrefix":"geofencing."} + }, "GetDevicePosition":{ "name":"GetDevicePosition", "http":{ @@ -618,7 +637,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves the geofence details from a geofence collection.

", + "documentation":"

Retrieves the geofence details from a geofence collection.

The returned geometry will always match the geometry format used when the geofence was created.

", "endpoint":{"hostPrefix":"geofencing."} }, "GetMapGlyphs":{ @@ -1134,6 +1153,25 @@ "documentation":"

Updates the specified properties of a given tracker resource.

", "endpoint":{"hostPrefix":"cp.tracking."}, "idempotent":true + }, + "VerifyDevicePosition":{ + "name":"VerifyDevicePosition", + "http":{ + "method":"POST", + "requestUri":"/tracking/v0/trackers/{TrackerName}/positions/verify", + "responseCode":200 + }, + "input":{"shape":"VerifyDevicePositionRequest"}, + "output":{"shape":"VerifyDevicePositionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Verifies the integrity of the device's position by determining if it was reported behind a proxy, and by comparing it to an inferred position estimated based on the device's state.

", + "endpoint":{"hostPrefix":"tracking."} } }, "shapes":{ @@ -1163,7 +1201,7 @@ "type":"string", "max":200, "min":5, - "pattern":"^geo:\\w*\\*?$" + "pattern":"(geo|geo-routes|geo-places|geo-maps):\\w*\\*?" }, "ApiKeyFilter":{ "type":"structure", @@ -1186,13 +1224,13 @@ "shape":"ApiKeyRestrictionsAllowActionsList", "documentation":"

A list of allowed actions that an API key resource grants permissions to perform. You must have at least one action for each type of resource. For example, if you have a place resource, you must include at least one place action.

The following are valid values for the actions.

  • Map actions

    • geo:GetMap* - Allows all actions needed for map rendering.

  • Place actions

    • geo:SearchPlaceIndexForText - Allows geocoding.

    • geo:SearchPlaceIndexForPosition - Allows reverse geocoding.

    • geo:SearchPlaceIndexForSuggestions - Allows generating suggestions from text.

    • GetPlace - Allows finding a place by place ID.

  • Route actions

    • geo:CalculateRoute - Allows point to point routing.

    • geo:CalculateRouteMatrix - Allows calculating a matrix of routes.

You must use these strings exactly. For example, to provide access to map rendering, the only valid action is geo:GetMap* as an input to the list. [\"geo:GetMap*\"] is valid but [\"geo:GetMapTile\"] is not. Similarly, you cannot use [\"geo:SearchPlaceIndexFor*\"] - you must list each of the Place actions separately.

" }, - "AllowReferers":{ - "shape":"ApiKeyRestrictionsAllowReferersList", - "documentation":"

An optional list of allowed HTTP referers for which requests must originate from. Requests using this API key from other domains will not be allowed.

Requirements:

  • Contain only alphanumeric characters (A–Z, a–z, 0–9) or any symbols in this list $\\-._+!*`(),;/?:@=&

  • May contain a percent (%) if followed by 2 hexadecimal digits (A-F, a-f, 0-9); this is used for URL encoding purposes.

  • May contain wildcard characters question mark (?) and asterisk (*).

    Question mark (?) will replace any single character (including hexadecimal digits).

    Asterisk (*) will replace any multiple characters (including multiple hexadecimal digits).

  • No spaces allowed. For example, https://example.com.

" - }, "AllowResources":{ "shape":"ApiKeyRestrictionsAllowResourcesList", "documentation":"

A list of allowed resource ARNs that a API key bearer can perform actions on.

  • The ARN must be the correct ARN for a map, place, or route ARN. You may include wildcards in the resource-id to match multiple resources of the same type.

  • The resources must be in the same partition, region, and account-id as the key that is being created.

  • Other than wildcards, you must include the full ARN, including the arn, partition, service, region, account-id and resource-id delimited by colons (:).

  • No spaces allowed, even with wildcards. For example, arn:aws:geo:region:account-id:map/ExampleMap*.

For more information about ARN format, see Amazon Resource Names (ARNs).

" + }, + "AllowReferers":{ + "shape":"ApiKeyRestrictionsAllowReferersList", + "documentation":"

An optional list of allowed HTTP referers for which requests must originate from. Requests using this API key from other domains will not be allowed.

Requirements:

  • Contain only alphanumeric characters (A–Z, a–z, 0–9) or any symbols in this list $\\-._+!*`(),;/?:@=&

  • May contain a percent (%) if followed by 2 hexadecimal digits (A-F, a-f, 0-9); this is used for URL encoding purposes.

  • May contain wildcard characters question mark (?) and asterisk (*).

    Question mark (?) will replace any single character (including hexadecimal digits).

    Asterisk (*) will replace any multiple characters (including multiple hexadecimal digits).

  • No spaces allowed. For example, https://example.com.

" } }, "documentation":"

API Restrictions on the allowed actions, resources, and referers for an API key resource.

" @@ -1211,7 +1249,7 @@ }, "ApiKeyRestrictionsAllowResourcesList":{ "type":"list", - "member":{"shape":"GeoArn"}, + "member":{"shape":"GeoArnV2"}, "max":5, "min":1 }, @@ -1219,7 +1257,7 @@ "type":"string", "max":1600, "min":0, - "pattern":"^arn(:[a-z0-9]+([.-][a-z0-9]+)*){2}(:([a-z0-9]+([.-][a-z0-9]+)*)?){2}:([^/].*)?$" + "pattern":"arn(:[a-z0-9]+([.-][a-z0-9]+)*){2}(:([a-z0-9]+([.-][a-z0-9]+)*)?){2}:([^/].*)?" }, "ArnList":{ "type":"list", @@ -1228,19 +1266,19 @@ "AssociateTrackerConsumerRequest":{ "type":"structure", "required":[ - "ConsumerArn", - "TrackerName" + "TrackerName", + "ConsumerArn" ], "members":{ - "ConsumerArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for the geofence collection to be associated to tracker resource. Used when you need to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollectionConsumer

" - }, "TrackerName":{ "shape":"ResourceName", "documentation":"

The name of the tracker resource to be associated with a geofence collection.

", "location":"uri", "locationName":"TrackerName" + }, + "ConsumerArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the geofence collection to be associated to tracker resource. Used when you need to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollectionConsumer

" } } }, @@ -1249,6 +1287,12 @@ "members":{ } }, + "Base64EncodedGeobuf":{ + "type":"blob", + "max":600000, + "min":0, + "sensitive":true + }, "BatchDeleteDevicePositionHistoryError":{ "type":"structure", "required":[ @@ -1271,19 +1315,19 @@ "BatchDeleteDevicePositionHistoryRequest":{ "type":"structure", "required":[ - "DeviceIds", - "TrackerName" + "TrackerName", + "DeviceIds" ], "members":{ - "DeviceIds":{ - "shape":"BatchDeleteDevicePositionHistoryRequestDeviceIdsList", - "documentation":"

Devices whose position history you want to delete.

  • For example, for two devices: “DeviceIds” : [DeviceId1,DeviceId2]

" - }, "TrackerName":{ "shape":"ResourceName", "documentation":"

The name of the tracker resource to delete the device position history from.

", "location":"uri", "locationName":"TrackerName" + }, + "DeviceIds":{ + "shape":"BatchDeleteDevicePositionHistoryRequestDeviceIdsList", + "documentation":"

Devices whose position history you want to delete.

  • For example, for two devices: “DeviceIds” : [DeviceId1,DeviceId2]

" } } }, @@ -1306,17 +1350,17 @@ "BatchDeleteGeofenceError":{ "type":"structure", "required":[ - "Error", - "GeofenceId" + "GeofenceId", + "Error" ], "members":{ - "Error":{ - "shape":"BatchItemError", - "documentation":"

Contains details associated to the batch error.

" - }, "GeofenceId":{ "shape":"Id", "documentation":"

The geofence associated with the error message.

" + }, + "Error":{ + "shape":"BatchItemError", + "documentation":"

Contains details associated to the batch error.

" } }, "documentation":"

Contains error details for each geofence that failed to delete from the geofence collection.

" @@ -1364,21 +1408,21 @@ "type":"structure", "required":[ "DeviceId", - "Error", - "SampleTime" + "SampleTime", + "Error" ], "members":{ "DeviceId":{ "shape":"Id", "documentation":"

The device associated with the position evaluation error.

" }, - "Error":{ - "shape":"BatchItemError", - "documentation":"

Contains details associated to the batch error.

" - }, "SampleTime":{ "shape":"Timestamp", "documentation":"

Specifies a timestamp for when the error occurred in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, + "Error":{ + "shape":"BatchItemError", + "documentation":"

Contains details associated to the batch error.

" } }, "documentation":"

Contains error details for each device that failed to evaluate its position against the geofences in a given geofence collection.

" @@ -1447,19 +1491,19 @@ "BatchGetDevicePositionRequest":{ "type":"structure", "required":[ - "DeviceIds", - "TrackerName" + "TrackerName", + "DeviceIds" ], "members":{ - "DeviceIds":{ - "shape":"BatchGetDevicePositionRequestDeviceIdsList", - "documentation":"

Devices whose position you want to retrieve.

  • For example, for two devices: device-ids=DeviceId1&device-ids=DeviceId2

" - }, "TrackerName":{ "shape":"BatchGetDevicePositionRequestTrackerNameString", "documentation":"

The tracker resource retrieving the device position.

", "location":"uri", "locationName":"TrackerName" + }, + "DeviceIds":{ + "shape":"BatchGetDevicePositionRequestDeviceIdsList", + "documentation":"

Devices whose position you want to retrieve.

  • For example, for two devices: device-ids=DeviceId1&device-ids=DeviceId2

" } } }, @@ -1472,22 +1516,22 @@ "BatchGetDevicePositionRequestTrackerNameString":{ "type":"string", "min":1, - "pattern":"^[-._\\w]+$" + "pattern":"[-._\\w]+" }, "BatchGetDevicePositionResponse":{ "type":"structure", "required":[ - "DevicePositions", - "Errors" + "Errors", + "DevicePositions" ], "members":{ - "DevicePositions":{ - "shape":"DevicePositionList", - "documentation":"

Contains device position details such as the device ID, position, and timestamps for when the position was received and sampled.

" - }, "Errors":{ "shape":"BatchGetDevicePositionErrorList", "documentation":"

Contains error details for each device that failed to send its position to the tracker resource.

" + }, + "DevicePositions":{ + "shape":"DevicePositionList", + "documentation":"

Contains device position details such as the device ID, position, and timestamps for when the position was received and sampled.

" } } }, @@ -1519,17 +1563,17 @@ "BatchPutGeofenceError":{ "type":"structure", "required":[ - "Error", - "GeofenceId" + "GeofenceId", + "Error" ], "members":{ - "Error":{ - "shape":"BatchItemError", - "documentation":"

Contains details associated to the batch error.

" - }, "GeofenceId":{ "shape":"Id", "documentation":"

The geofence associated with the error message.

" + }, + "Error":{ + "shape":"BatchItemError", + "documentation":"

Contains details associated to the batch error.

" } }, "documentation":"

Contains error details for each geofence that failed to be stored in a given geofence collection.

" @@ -1574,13 +1618,13 @@ "shape":"Id", "documentation":"

The identifier for the geofence to be stored in a given geofence collection.

" }, + "Geometry":{ + "shape":"GeofenceGeometry", + "documentation":"

Contains the details to specify the position of the geofence. Can be a polygon, a circle or a polygon encoded in Geobuf format. Including multiple selections will return a validation error.

The geofence polygon format supports a maximum of 1,000 vertices. The Geofence geobuf format supports a maximum of 100,000 vertices.

" + }, "GeofenceProperties":{ "shape":"PropertyMap", "documentation":"

Associates one of more properties with the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.

Format: \"key\" : \"value\"

" - }, - "Geometry":{ - "shape":"GeofenceGeometry", - "documentation":"

Contains the details of the position of the geofence. Can be either a polygon or a circle. Including both will return a validation error.

Each geofence polygon can have a maximum of 1,000 vertices.

" } }, "documentation":"

Contains geofence geometry details.

" @@ -1588,36 +1632,36 @@ "BatchPutGeofenceResponse":{ "type":"structure", "required":[ - "Errors", - "Successes" + "Successes", + "Errors" ], "members":{ - "Errors":{ - "shape":"BatchPutGeofenceErrorList", - "documentation":"

Contains additional error details for each geofence that failed to be stored in a geofence collection.

" - }, "Successes":{ "shape":"BatchPutGeofenceSuccessList", "documentation":"

Contains each geofence that was successfully stored in a geofence collection.

" + }, + "Errors":{ + "shape":"BatchPutGeofenceErrorList", + "documentation":"

Contains additional error details for each geofence that failed to be stored in a geofence collection.

" } } }, "BatchPutGeofenceSuccess":{ "type":"structure", "required":[ - "CreateTime", "GeofenceId", + "CreateTime", "UpdateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the geofence was stored in a geofence collection in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" - }, "GeofenceId":{ "shape":"Id", "documentation":"

The geofence successfully stored in a geofence collection.

" }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the geofence was stored in a geofence collection in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the geofence was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" @@ -1633,21 +1677,21 @@ "type":"structure", "required":[ "DeviceId", - "Error", - "SampleTime" + "SampleTime", + "Error" ], "members":{ "DeviceId":{ "shape":"Id", "documentation":"

The device associated with the failed location update.

" }, - "Error":{ - "shape":"BatchItemError", - "documentation":"

Contains details related to the error code such as the error code and error message.

" - }, "SampleTime":{ "shape":"Timestamp", "documentation":"

The timestamp at which the device position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, + "Error":{ + "shape":"BatchItemError", + "documentation":"

Contains details related to the error code such as the error code and error message.

" } }, "documentation":"

Contains error details for each device that failed to update its position.

" @@ -1731,43 +1775,43 @@ "location":"uri", "locationName":"CalculatorName" }, - "CarModeOptions":{ - "shape":"CalculateRouteCarModeOptions", - "documentation":"

Specifies route preferences when traveling by Car, such as avoiding routes that use ferries or tolls.

Requirements: TravelMode must be specified as Car.

" - }, - "DepartNow":{ - "shape":"Boolean", - "documentation":"

Sets the time of departure as the current time. Uses the current time to calculate the route matrix. You can't set both DepartureTime and DepartNow. If neither is set, the best time of day to travel with the best traffic conditions is used to calculate the route matrix.

Default Value: false

Valid Values: false | true

" - }, "DeparturePositions":{ "shape":"CalculateRouteMatrixRequestDeparturePositionsList", "documentation":"

The list of departure (origin) positions for the route matrix. An array of points, each of which is itself a 2-value array defined in WGS 84 format: [longitude, latitude]. For example, [-123.115, 49.285].

Depending on the data provider selected in the route calculator resource there may be additional restrictions on the inputs you can choose. See Position restrictions in the Amazon Location Service Developer Guide.

For route calculators that use Esri as the data provider, if you specify a departure that's not located on a road, Amazon Location moves the position to the nearest road. The snapped value is available in the result in SnappedDeparturePositions.

Valid Values: [-180 to 180,-90 to 90]

" }, + "DestinationPositions":{ + "shape":"CalculateRouteMatrixRequestDestinationPositionsList", + "documentation":"

The list of destination positions for the route matrix. An array of points, each of which is itself a 2-value array defined in WGS 84 format: [longitude, latitude]. For example, [-122.339, 47.615]

Depending on the data provider selected in the route calculator resource there may be additional restrictions on the inputs you can choose. See Position restrictions in the Amazon Location Service Developer Guide.

For route calculators that use Esri as the data provider, if you specify a destination that's not located on a road, Amazon Location moves the position to the nearest road. The snapped value is available in the result in SnappedDestinationPositions.

Valid Values: [-180 to 180,-90 to 90]

" + }, + "TravelMode":{ + "shape":"TravelMode", + "documentation":"

Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility.

The TravelMode you specify also determines how you specify route preferences:

  • If traveling by Car use the CarModeOptions parameter.

  • If traveling by Truck use the TruckModeOptions parameter.

Bicycle or Motorcycle are only valid when using Grab as a data provider, and only within Southeast Asia.

Truck is not available for Grab.

For more information about using Grab as a data provider, see GrabMaps in the Amazon Location Service Developer Guide.

Default Value: Car

" + }, "DepartureTime":{ "shape":"Timestamp", "documentation":"

Specifies the desired time of departure. Uses the given time to calculate the route matrix. You can't set both DepartureTime and DepartNow. If neither is set, the best time of day to travel with the best traffic conditions is used to calculate the route matrix.

Setting a departure time in the past returns a 400 ValidationException error.

  • In ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. For example, 2020–07-2T12:15:20.000Z+01:00

" }, - "DestinationPositions":{ - "shape":"CalculateRouteMatrixRequestDestinationPositionsList", - "documentation":"

The list of destination positions for the route matrix. An array of points, each of which is itself a 2-value array defined in WGS 84 format: [longitude, latitude]. For example, [-122.339, 47.615]

Depending on the data provider selected in the route calculator resource there may be additional restrictions on the inputs you can choose. See Position restrictions in the Amazon Location Service Developer Guide.

For route calculators that use Esri as the data provider, if you specify a destination that's not located on a road, Amazon Location moves the position to the nearest road. The snapped value is available in the result in SnappedDestinationPositions.

Valid Values: [-180 to 180,-90 to 90]

" + "DepartNow":{ + "shape":"Boolean", + "documentation":"

Sets the time of departure as the current time. Uses the current time to calculate the route matrix. You can't set both DepartureTime and DepartNow. If neither is set, the best time of day to travel with the best traffic conditions is used to calculate the route matrix.

Default Value: false

Valid Values: false | true

" }, "DistanceUnit":{ "shape":"DistanceUnit", "documentation":"

Set the unit system to specify the distance.

Default Value: Kilometers

" }, + "CarModeOptions":{ + "shape":"CalculateRouteCarModeOptions", + "documentation":"

Specifies route preferences when traveling by Car, such as avoiding routes that use ferries or tolls.

Requirements: TravelMode must be specified as Car.

" + }, + "TruckModeOptions":{ + "shape":"CalculateRouteTruckModeOptions", + "documentation":"

Specifies route preferences when traveling by Truck, such as avoiding routes that use ferries or tolls, and truck specifications to consider when choosing an optimal road.

Requirements: TravelMode must be specified as Truck.

" + }, "Key":{ "shape":"ApiKey", "documentation":"

The optional API key to authorize the request.

", "location":"querystring", "locationName":"key" - }, - "TravelMode":{ - "shape":"TravelMode", - "documentation":"

Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility.

The TravelMode you specify also determines how you specify route preferences:

  • If traveling by Car use the CarModeOptions parameter.

  • If traveling by Truck use the TruckModeOptions parameter.

Bicycle or Motorcycle are only valid when using Grab as a data provider, and only within Southeast Asia.

Truck is not available for Grab.

For more information about using Grab as a data provider, see GrabMaps in the Amazon Location Service Developer Guide.

Default Value: Car

" - }, - "TruckModeOptions":{ - "shape":"CalculateRouteTruckModeOptions", - "documentation":"

Specifies route preferences when traveling by Truck, such as avoiding routes that use ferries or tolls, and truck specifications to consider when choosing an optimal road.

Requirements: TravelMode must be specified as Truck.

" } } }, @@ -1825,26 +1869,26 @@ "type":"structure", "required":[ "DataSource", - "DistanceUnit", + "RouteCount", "ErrorCount", - "RouteCount" + "DistanceUnit" ], "members":{ "DataSource":{ "shape":"String", "documentation":"

The data provider of traffic and road network data used to calculate the routes. Indicates one of the available providers:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" }, - "DistanceUnit":{ - "shape":"DistanceUnit", - "documentation":"

The unit of measurement for route distances.

" + "RouteCount":{ + "shape":"CalculateRouteMatrixSummaryRouteCountInteger", + "documentation":"

The count of cells in the route matrix. Equal to the number of DeparturePositions multiplied by the number of DestinationPositions.

" }, "ErrorCount":{ "shape":"CalculateRouteMatrixSummaryErrorCountInteger", "documentation":"

The count of error results in the route matrix. If this number is 0, all routes were calculated successfully.

" }, - "RouteCount":{ - "shape":"CalculateRouteMatrixSummaryRouteCountInteger", - "documentation":"

The count of cells in the route matrix. Equal to the number of DeparturePositions multiplied by the number of DestinationPositions.

" + "DistanceUnit":{ + "shape":"DistanceUnit", + "documentation":"

The unit of measurement for route distances.

" } }, "documentation":"

A summary of the calculated route matrix.

" @@ -1869,35 +1913,35 @@ "DestinationPosition" ], "members":{ - "ArrivalTime":{ - "shape":"Timestamp", - "documentation":"

Specifies the desired time of arrival. Uses the given time to calculate the route. Otherwise, the best time of day to travel with the best traffic conditions is used to calculate the route.

ArrivalTime is not supported Esri.

" - }, "CalculatorName":{ "shape":"ResourceName", "documentation":"

The name of the route calculator resource that you want to use to calculate the route.

", "location":"uri", "locationName":"CalculatorName" }, - "CarModeOptions":{ - "shape":"CalculateRouteCarModeOptions", - "documentation":"

Specifies route preferences when traveling by Car, such as avoiding routes that use ferries or tolls.

Requirements: TravelMode must be specified as Car.

" - }, - "DepartNow":{ - "shape":"Boolean", - "documentation":"

Sets the time of departure as the current time. Uses the current time to calculate a route. Otherwise, the best time of day to travel with the best traffic conditions is used to calculate the route.

Default Value: false

Valid Values: false | true

" - }, "DeparturePosition":{ "shape":"Position", "documentation":"

The start position for the route. Defined in World Geodetic System (WGS 84) format: [longitude, latitude].

  • For example, [-123.115, 49.285]

If you specify a departure that's not located on a road, Amazon Location moves the position to the nearest road. If Esri is the provider for your route calculator, specifying a route that is longer than 400 km returns a 400 RoutesValidationException error.

Valid Values: [-180 to 180,-90 to 90]

" }, + "DestinationPosition":{ + "shape":"Position", + "documentation":"

The finish position for the route. Defined in World Geodetic System (WGS 84) format: [longitude, latitude].

  • For example, [-122.339, 47.615]

If you specify a destination that's not located on a road, Amazon Location moves the position to the nearest road.

Valid Values: [-180 to 180,-90 to 90]

" + }, + "WaypointPositions":{ + "shape":"CalculateRouteRequestWaypointPositionsList", + "documentation":"

Specifies an ordered list of up to 23 intermediate positions to include along a route between the departure position and destination position.

  • For example, from the DeparturePosition [-123.115, 49.285], the route follows the order that the waypoint positions are given [[-122.757, 49.0021],[-122.349, 47.620]]

If you specify a waypoint position that's not located on a road, Amazon Location moves the position to the nearest road.

Specifying more than 23 waypoints returns a 400 ValidationException error.

If Esri is the provider for your route calculator, specifying a route that is longer than 400 km returns a 400 RoutesValidationException error.

Valid Values: [-180 to 180,-90 to 90]

" + }, + "TravelMode":{ + "shape":"TravelMode", + "documentation":"

Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. You can choose Car, Truck, Walking, Bicycle or Motorcycle as options for the TravelMode.

Bicycle and Motorcycle are only valid when using Grab as a data provider, and only within Southeast Asia.

Truck is not available for Grab.

For more details on the using Grab for routing, including areas of coverage, see GrabMaps in the Amazon Location Service Developer Guide.

The TravelMode you specify also determines how you specify route preferences:

  • If traveling by Car use the CarModeOptions parameter.

  • If traveling by Truck use the TruckModeOptions parameter.

Default Value: Car

" + }, "DepartureTime":{ "shape":"Timestamp", "documentation":"

Specifies the desired time of departure. Uses the given time to calculate the route. Otherwise, the best time of day to travel with the best traffic conditions is used to calculate the route.

  • In ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. For example, 2020–07-2T12:15:20.000Z+01:00

" }, - "DestinationPosition":{ - "shape":"Position", - "documentation":"

The finish position for the route. Defined in World Geodetic System (WGS 84) format: [longitude, latitude].

  • For example, [-122.339, 47.615]

If you specify a destination that's not located on a road, Amazon Location moves the position to the nearest road.

Valid Values: [-180 to 180,-90 to 90]

" + "DepartNow":{ + "shape":"Boolean", + "documentation":"

Sets the time of departure as the current time. Uses the current time to calculate a route. Otherwise, the best time of day to travel with the best traffic conditions is used to calculate the route.

Default Value: false

Valid Values: false | true

" }, "DistanceUnit":{ "shape":"DistanceUnit", @@ -1907,27 +1951,27 @@ "shape":"Boolean", "documentation":"

Set to include the geometry details in the result for each path between a pair of positions.

Default Value: false

Valid Values: false | true

" }, - "Key":{ - "shape":"ApiKey", - "documentation":"

The optional API key to authorize the request.

", - "location":"querystring", - "locationName":"key" - }, - "OptimizeFor":{ - "shape":"OptimizationMode", - "documentation":"

Specifies the distance to optimize for when calculating a route.

" - }, - "TravelMode":{ - "shape":"TravelMode", - "documentation":"

Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. You can choose Car, Truck, Walking, Bicycle or Motorcycle as options for the TravelMode.

Bicycle and Motorcycle are only valid when using Grab as a data provider, and only within Southeast Asia.

Truck is not available for Grab.

For more details on the using Grab for routing, including areas of coverage, see GrabMaps in the Amazon Location Service Developer Guide.

The TravelMode you specify also determines how you specify route preferences:

  • If traveling by Car use the CarModeOptions parameter.

  • If traveling by Truck use the TruckModeOptions parameter.

Default Value: Car

" + "CarModeOptions":{ + "shape":"CalculateRouteCarModeOptions", + "documentation":"

Specifies route preferences when traveling by Car, such as avoiding routes that use ferries or tolls.

Requirements: TravelMode must be specified as Car.

" }, "TruckModeOptions":{ "shape":"CalculateRouteTruckModeOptions", "documentation":"

Specifies route preferences when traveling by Truck, such as avoiding routes that use ferries or tolls, and truck specifications to consider when choosing an optimal road.

Requirements: TravelMode must be specified as Truck.

" }, - "WaypointPositions":{ - "shape":"CalculateRouteRequestWaypointPositionsList", - "documentation":"

Specifies an ordered list of up to 23 intermediate positions to include along a route between the departure position and destination position.

  • For example, from the DeparturePosition [-123.115, 49.285], the route follows the order that the waypoint positions are given [[-122.757, 49.0021],[-122.349, 47.620]]

If you specify a waypoint position that's not located on a road, Amazon Location moves the position to the nearest road.

Specifying more than 23 waypoints returns a 400 ValidationException error.

If Esri is the provider for your route calculator, specifying a route that is longer than 400 km returns a 400 RoutesValidationException error.

Valid Values: [-180 to 180,-90 to 90]

" + "ArrivalTime":{ + "shape":"Timestamp", + "documentation":"

Specifies the desired time of arrival. Uses the given time to calculate the route. Otherwise, the best time of day to travel with the best traffic conditions is used to calculate the route.

ArrivalTime is not supported Esri.

" + }, + "OptimizeFor":{ + "shape":"OptimizationMode", + "documentation":"

Specifies the distance to optimize for when calculating a route.

" + }, + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" } } }, @@ -1958,32 +2002,32 @@ "CalculateRouteSummary":{ "type":"structure", "required":[ + "RouteBBox", "DataSource", "Distance", - "DistanceUnit", "DurationSeconds", - "RouteBBox" + "DistanceUnit" ], "members":{ - "DataSource":{ - "shape":"String", + "RouteBBox":{ + "shape":"BoundingBox", + "documentation":"

Specifies a geographical box surrounding a route. Used to zoom into a route when displaying it in a map. For example, [min x, min y, max x, max y].

The first 2 bbox parameters describe the lower southwest corner:

  • The first bbox position is the X coordinate or longitude of the lower southwest corner.

  • The second bbox position is the Y coordinate or latitude of the lower southwest corner.

The next 2 bbox parameters describe the upper northeast corner:

  • The third bbox position is the X coordinate, or longitude of the upper northeast corner.

  • The fourth bbox position is the Y coordinate, or latitude of the upper northeast corner.

" + }, + "DataSource":{ + "shape":"String", "documentation":"

The data provider of traffic and road network data used to calculate the route. Indicates one of the available providers:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" }, "Distance":{ "shape":"CalculateRouteSummaryDistanceDouble", "documentation":"

The total distance covered by the route. The sum of the distance travelled between every stop on the route.

If Esri is the data source for the route calculator, the route distance can’t be greater than 400 km. If the route exceeds 400 km, the response is a 400 RoutesValidationException error.

" }, - "DistanceUnit":{ - "shape":"DistanceUnit", - "documentation":"

The unit of measurement for route distances.

" - }, "DurationSeconds":{ "shape":"CalculateRouteSummaryDurationSecondsDouble", "documentation":"

The total travel time for the route measured in seconds. The sum of the travel time between every stop on the route.

" }, - "RouteBBox":{ - "shape":"BoundingBox", - "documentation":"

Specifies a geographical box surrounding a route. Used to zoom into a route when displaying it in a map. For example, [min x, min y, max x, max y].

The first 2 bbox parameters describe the lower southwest corner:

  • The first bbox position is the X coordinate or longitude of the lower southwest corner.

  • The second bbox position is the Y coordinate or latitude of the lower southwest corner.

The next 2 bbox parameters describe the upper northeast corner:

  • The third bbox position is the X coordinate, or longitude of the upper northeast corner.

  • The fourth bbox position is the Y coordinate, or latitude of the upper northeast corner.

" + "DistanceUnit":{ + "shape":"DistanceUnit", + "documentation":"

The unit of measurement for route distances.

" } }, "documentation":"

A summary of the calculated route.

" @@ -2020,6 +2064,23 @@ }, "documentation":"

Contains details about additional route preferences for requests that specify TravelMode as Truck.

" }, + "CellSignals":{ + "type":"structure", + "required":["LteCellDetails"], + "members":{ + "LteCellDetails":{ + "shape":"CellSignalsLteCellDetailsList", + "documentation":"

Information about the Long-Term Evolution (LTE) network the device is connected to.

" + } + }, + "documentation":"

The cellular network communication infrastructure that the device uses.

" + }, + "CellSignalsLteCellDetailsList":{ + "type":"list", + "member":{"shape":"LteCellDetails"}, + "max":16, + "min":1 + }, "Circle":{ "type":"structure", "required":[ @@ -2055,25 +2116,21 @@ }, "exception":true }, - "CountryCode":{ - "type":"string", - "pattern":"^[A-Z]{3}$" - }, "CountryCode3":{ "type":"string", "max":3, "min":3, - "pattern":"^[A-Z]{3}$" + "pattern":"[A-Z]{3}" }, "CountryCode3OrEmpty":{ "type":"string", "max":3, "min":0, - "pattern":"^[A-Z]{3}$|^$" + "pattern":"[A-Z]{3}$|^" }, "CountryCodeList":{ "type":"list", - "member":{"shape":"CountryCode"}, + "member":{"shape":"CountryCode3"}, "max":100, "min":1 }, @@ -2085,14 +2142,6 @@ "shape":"ResourceName", "documentation":"

A custom name for the geofence collection.

Requirements:

  • Contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-), periods (.), and underscores (_).

  • Must be a unique geofence collection name.

  • No spaces allowed. For example, ExampleGeofenceCollection.

" }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

An optional description for the geofence collection.

" - }, - "KmsKeyId":{ - "shape":"KmsKeyId", - "documentation":"

A key identifier for an Amazon Web Services KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN.

" - }, "PricingPlan":{ "shape":"PricingPlan", "documentation":"

No longer used. If included, the only allowed value is RequestBasedUsage.

", @@ -2105,28 +2154,36 @@ "deprecated":true, "deprecatedMessage":"Deprecated. No longer allowed." }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

An optional description for the geofence collection.

" + }, "Tags":{ "shape":"TagMap", "documentation":"

Applies one or more tags to the geofence collection. A tag is a key-value pair helps manage, identify, search, and filter your resources by labelling them.

Format: \"key\" : \"value\"

Restrictions:

  • Maximum 50 tags per resource

  • Each resource tag must be unique with a maximum of one value.

  • Maximum key length: 128 Unicode characters in UTF-8

  • Maximum value length: 256 Unicode characters in UTF-8

  • Can use alphanumeric characters (A–Z, a–z, 0–9), and the following characters: + - = . _ : / @.

  • Cannot use \"aws:\" as a prefix for a key.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

A key identifier for an Amazon Web Services KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN.

" } } }, "CreateGeofenceCollectionResponse":{ "type":"structure", "required":[ - "CollectionArn", "CollectionName", + "CollectionArn", "CreateTime" ], "members":{ - "CollectionArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for the geofence collection resource. Used when you need to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollection

" - }, "CollectionName":{ "shape":"ResourceName", "documentation":"

The name for the geofence collection.

" }, + "CollectionArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the geofence collection resource. Used when you need to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollection

" + }, "CreateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the geofence collection was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" @@ -2140,6 +2197,14 @@ "Restrictions" ], "members":{ + "KeyName":{ + "shape":"ResourceName", + "documentation":"

A custom name for the API key resource.

Requirements:

  • Contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-), periods (.), and underscores (_).

  • Must be a unique API key name.

  • No spaces allowed. For example, ExampleAPIKey.

" + }, + "Restrictions":{ + "shape":"ApiKeyRestrictions", + "documentation":"

The API key restrictions for the API key resource.

" + }, "Description":{ "shape":"ResourceDescription", "documentation":"

An optional description for the API key resource.

" @@ -2148,18 +2213,10 @@ "shape":"Timestamp", "documentation":"

The optional timestamp for when the API key resource will expire in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. One of NoExpiry or ExpireTime must be set.

" }, - "KeyName":{ - "shape":"ResourceName", - "documentation":"

A custom name for the API key resource.

Requirements:

  • Contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-), periods (.), and underscores (_).

  • Must be a unique API key name.

  • No spaces allowed. For example, ExampleAPIKey.

" - }, "NoExpiry":{ "shape":"Boolean", "documentation":"

Optionally set to true to set no expiration time for the API key. One of NoExpiry or ExpireTime must be set.

" }, - "Restrictions":{ - "shape":"ApiKeyRestrictions", - "documentation":"

The API key restrictions for the API key resource.

" - }, "Tags":{ "shape":"TagMap", "documentation":"

Applies one or more tags to the map resource. A tag is a key-value pair that helps manage, identify, search, and filter your resources by labelling them.

Format: \"key\" : \"value\"

Restrictions:

  • Maximum 50 tags per resource

  • Each resource tag must be unique with a maximum of one value.

  • Maximum key length: 128 Unicode characters in UTF-8

  • Maximum value length: 256 Unicode characters in UTF-8

  • Can use alphanumeric characters (A–Z, a–z, 0–9), and the following characters: + - = . _ : / @.

  • Cannot use \"aws:\" as a prefix for a key.

" @@ -2169,16 +2226,12 @@ "CreateKeyResponse":{ "type":"structure", "required":[ - "CreateTime", "Key", "KeyArn", - "KeyName" + "KeyName", + "CreateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the API key resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" - }, "Key":{ "shape":"ApiKey", "documentation":"

The key value/string of an API key. This value is used when making API calls to authorize the call. For example, see GetMapGlyphs.

" @@ -2190,34 +2243,38 @@ "KeyName":{ "shape":"ResourceName", "documentation":"

The name of the API key resource.

" + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the API key resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" } } }, "CreateMapRequest":{ "type":"structure", "required":[ - "Configuration", - "MapName" + "MapName", + "Configuration" ], "members":{ - "Configuration":{ - "shape":"MapConfiguration", - "documentation":"

Specifies the MapConfiguration, including the map style, for the map resource that you create. The map style defines the look of maps and the data provider for your map resource.

" - }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

An optional description for the map resource.

" - }, "MapName":{ "shape":"ResourceName", "documentation":"

The name for the map resource.

Requirements:

  • Must contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-), periods (.), and underscores (_).

  • Must be a unique map resource name.

  • No spaces allowed. For example, ExampleMap.

" }, + "Configuration":{ + "shape":"MapConfiguration", + "documentation":"

Specifies the MapConfiguration, including the map style, for the map resource that you create. The map style defines the look of maps and the data provider for your map resource.

" + }, "PricingPlan":{ "shape":"PricingPlan", "documentation":"

No longer used. If included, the only allowed value is RequestBasedUsage.

", "deprecated":true, "deprecatedMessage":"Deprecated. If included, the only allowed value is RequestBasedUsage." }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

An optional description for the map resource.

" + }, "Tags":{ "shape":"TagMap", "documentation":"

Applies one or more tags to the map resource. A tag is a key-value pair helps manage, identify, search, and filter your resources by labelling them.

Format: \"key\" : \"value\"

Restrictions:

  • Maximum 50 tags per resource

  • Each resource tag must be unique with a maximum of one value.

  • Maximum key length: 128 Unicode characters in UTF-8

  • Maximum value length: 256 Unicode characters in UTF-8

  • Can use alphanumeric characters (A–Z, a–z, 0–9), and the following characters: + - = . _ : / @.

  • Cannot use \"aws:\" as a prefix for a key.

" @@ -2227,54 +2284,54 @@ "CreateMapResponse":{ "type":"structure", "required":[ - "CreateTime", + "MapName", "MapArn", - "MapName" + "CreateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the map resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + "MapName":{ + "shape":"ResourceName", + "documentation":"

The name of the map resource.

" }, "MapArn":{ "shape":"GeoArn", "documentation":"

The Amazon Resource Name (ARN) for the map resource. Used to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:map/ExampleMap

" }, - "MapName":{ - "shape":"ResourceName", - "documentation":"

The name of the map resource.

" + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the map resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" } } }, "CreatePlaceIndexRequest":{ "type":"structure", "required":[ - "DataSource", - "IndexName" + "IndexName", + "DataSource" ], "members":{ - "DataSource":{ - "shape":"String", - "documentation":"

Specifies the geospatial data provider for the new place index.

This field is case-sensitive. Enter the valid values as shown. For example, entering HERE returns an error.

Valid values include:

For additional information , see Data providers on the Amazon Location Service Developer Guide.

" - }, - "DataSourceConfiguration":{ - "shape":"DataSourceConfiguration", - "documentation":"

Specifies the data storage option requesting Places.

" - }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

The optional description for the place index resource.

" - }, "IndexName":{ "shape":"ResourceName", "documentation":"

The name of the place index resource.

Requirements:

  • Contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-), periods (.), and underscores (_).

  • Must be a unique place index resource name.

  • No spaces allowed. For example, ExamplePlaceIndex.

" }, + "DataSource":{ + "shape":"String", + "documentation":"

Specifies the geospatial data provider for the new place index.

This field is case-sensitive. Enter the valid values as shown. For example, entering HERE returns an error.

Valid values include:

For additional information , see Data providers on the Amazon Location Service Developer Guide.

" + }, "PricingPlan":{ "shape":"PricingPlan", "documentation":"

No longer used. If included, the only allowed value is RequestBasedUsage.

", "deprecated":true, "deprecatedMessage":"Deprecated. If included, the only allowed value is RequestBasedUsage." }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

The optional description for the place index resource.

" + }, + "DataSourceConfiguration":{ + "shape":"DataSourceConfiguration", + "documentation":"

Specifies the data storage option requesting Places.

" + }, "Tags":{ "shape":"TagMap", "documentation":"

Applies one or more tags to the place index resource. A tag is a key-value pair that helps you manage, identify, search, and filter your resources.

Format: \"key\" : \"value\"

Restrictions:

  • Maximum 50 tags per resource.

  • Each tag key must be unique and must have exactly one associated value.

  • Maximum key length: 128 Unicode characters in UTF-8.

  • Maximum value length: 256 Unicode characters in UTF-8.

  • Can use alphanumeric characters (A–Z, a–z, 0–9), and the following characters: + - = . _ : / @

  • Cannot use \"aws:\" as a prefix for a key.

" @@ -2284,22 +2341,22 @@ "CreatePlaceIndexResponse":{ "type":"structure", "required":[ - "CreateTime", + "IndexName", "IndexArn", - "IndexName" + "CreateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the place index resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + "IndexName":{ + "shape":"ResourceName", + "documentation":"

The name for the place index resource.

" }, "IndexArn":{ "shape":"GeoArn", "documentation":"

The Amazon Resource Name (ARN) for the place index resource. Used to specify a resource across Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:place-index/ExamplePlaceIndex

" }, - "IndexName":{ - "shape":"ResourceName", - "documentation":"

The name for the place index resource.

" + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the place index resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" } } }, @@ -2318,16 +2375,16 @@ "shape":"String", "documentation":"

Specifies the data provider of traffic and road network data.

This field is case-sensitive. Enter the valid values as shown. For example, entering HERE returns an error.

Valid values include:

For additional information , see Data providers on the Amazon Location Service Developer Guide.

" }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

The optional description for the route calculator resource.

" - }, "PricingPlan":{ "shape":"PricingPlan", "documentation":"

No longer used. If included, the only allowed value is RequestBasedUsage.

", "deprecated":true, "deprecatedMessage":"Deprecated. If included, the only allowed value is RequestBasedUsage." }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

The optional description for the route calculator resource.

" + }, "Tags":{ "shape":"TagMap", "documentation":"

Applies one or more tags to the route calculator resource. A tag is a key-value pair helps manage, identify, search, and filter your resources by labelling them.

  • For example: { \"tag1\" : \"value1\", \"tag2\" : \"value2\"}

Format: \"key\" : \"value\"

Restrictions:

  • Maximum 50 tags per resource

  • Each resource tag must be unique with a maximum of one value.

  • Maximum key length: 128 Unicode characters in UTF-8

  • Maximum value length: 256 Unicode characters in UTF-8

  • Can use alphanumeric characters (A–Z, a–z, 0–9), and the following characters: + - = . _ : / @.

  • Cannot use \"aws:\" as a prefix for a key.

" @@ -2337,19 +2394,19 @@ "CreateRouteCalculatorResponse":{ "type":"structure", "required":[ - "CalculatorArn", "CalculatorName", + "CalculatorArn", "CreateTime" ], "members":{ - "CalculatorArn":{ - "shape":"GeoArn", - "documentation":"

The Amazon Resource Name (ARN) for the route calculator resource. Use the ARN when you specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:route-calculator/ExampleCalculator

" - }, "CalculatorName":{ "shape":"ResourceName", "documentation":"

The name of the route calculator resource.

  • For example, ExampleRouteCalculator.

" }, + "CalculatorArn":{ + "shape":"GeoArn", + "documentation":"

The Amazon Resource Name (ARN) for the route calculator resource. Use the ARN when you specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:route-calculator/ExampleCalculator

" + }, "CreateTime":{ "shape":"Timestamp", "documentation":"

The timestamp when the route calculator resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

  • For example, 2020–07-2T12:15:20.000Z+01:00

" @@ -2360,25 +2417,9 @@ "type":"structure", "required":["TrackerName"], "members":{ - "Description":{ - "shape":"ResourceDescription", - "documentation":"

An optional description for the tracker resource.

" - }, - "EventBridgeEnabled":{ - "shape":"Boolean", - "documentation":"

Whether to enable position UPDATE events from this tracker to be sent to EventBridge.

You do not need enable this feature to get ENTER and EXIT events for geofences with this tracker. Those events are always sent to EventBridge.

" - }, - "KmsKeyEnableGeospatialQueries":{ - "shape":"Boolean", - "documentation":"

Enables GeospatialQueries for a tracker that uses a Amazon Web Services KMS customer managed key.

This parameter is only used if you are using a KMS customer managed key.

If you wish to encrypt your data using your own KMS customer managed key, then the Bounding Polygon Queries feature will be disabled by default. This is because by using this feature, a representation of your device positions will not be encrypted using the your KMS managed key. The exact device position, however; is still encrypted using your managed key.

You can choose to opt-in to the Bounding Polygon Quseries feature. This is done by setting the KmsKeyEnableGeospatialQueries parameter to true when creating or updating a Tracker.

" - }, - "KmsKeyId":{ - "shape":"KmsKeyId", - "documentation":"

A key identifier for an Amazon Web Services KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN.

" - }, - "PositionFiltering":{ - "shape":"PositionFiltering", - "documentation":"

Specifies the position filtering for the tracker resource.

Valid values:

  • TimeBased - Location updates are evaluated against linked geofence collections, but not every location update is stored. If your update frequency is more often than 30 seconds, only one update per 30 seconds is stored for each unique device ID.

  • DistanceBased - If the device has moved less than 30 m (98.4 ft), location updates are ignored. Location updates within this area are neither evaluated against linked geofence collections, nor stored. This helps control costs by reducing the number of geofence evaluations and historical device positions to paginate through. Distance-based filtering can also reduce the effects of GPS noise when displaying device trajectories on a map.

  • AccuracyBased - If the device has moved less than the measured accuracy, location updates are ignored. For example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 m, the second update is ignored if the device has moved less than 15 m. Ignored location updates are neither evaluated against linked geofence collections, nor stored. This can reduce the effects of GPS noise when displaying device trajectories on a map, and can help control your costs by reducing the number of geofence evaluations.

This field is optional. If not specified, the default value is TimeBased.

" + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

The name for the tracker resource.

Requirements:

  • Contain only alphanumeric characters (A-Z, a-z, 0-9) , hyphens (-), periods (.), and underscores (_).

  • Must be a unique tracker resource name.

  • No spaces allowed. For example, ExampleTracker.

" }, "PricingPlan":{ "shape":"PricingPlan", @@ -2386,41 +2427,57 @@ "deprecated":true, "deprecatedMessage":"Deprecated. If included, the only allowed value is RequestBasedUsage." }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

A key identifier for an Amazon Web Services KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN.

" + }, "PricingPlanDataSource":{ "shape":"String", "documentation":"

This parameter is no longer used.

", "deprecated":true, "deprecatedMessage":"Deprecated. No longer allowed." }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

An optional description for the tracker resource.

" + }, "Tags":{ "shape":"TagMap", "documentation":"

Applies one or more tags to the tracker resource. A tag is a key-value pair helps manage, identify, search, and filter your resources by labelling them.

Format: \"key\" : \"value\"

Restrictions:

  • Maximum 50 tags per resource

  • Each resource tag must be unique with a maximum of one value.

  • Maximum key length: 128 Unicode characters in UTF-8

  • Maximum value length: 256 Unicode characters in UTF-8

  • Can use alphanumeric characters (A–Z, a–z, 0–9), and the following characters: + - = . _ : / @.

  • Cannot use \"aws:\" as a prefix for a key.

" }, - "TrackerName":{ - "shape":"ResourceName", - "documentation":"

The name for the tracker resource.

Requirements:

  • Contain only alphanumeric characters (A-Z, a-z, 0-9) , hyphens (-), periods (.), and underscores (_).

  • Must be a unique tracker resource name.

  • No spaces allowed. For example, ExampleTracker.

" + "PositionFiltering":{ + "shape":"PositionFiltering", + "documentation":"

Specifies the position filtering for the tracker resource.

Valid values:

  • TimeBased - Location updates are evaluated against linked geofence collections, but not every location update is stored. If your update frequency is more often than 30 seconds, only one update per 30 seconds is stored for each unique device ID.

  • DistanceBased - If the device has moved less than 30 m (98.4 ft), location updates are ignored. Location updates within this area are neither evaluated against linked geofence collections, nor stored. This helps control costs by reducing the number of geofence evaluations and historical device positions to paginate through. Distance-based filtering can also reduce the effects of GPS noise when displaying device trajectories on a map.

  • AccuracyBased - If the device has moved less than the measured accuracy, location updates are ignored. For example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 m, the second update is ignored if the device has moved less than 15 m. Ignored location updates are neither evaluated against linked geofence collections, nor stored. This can reduce the effects of GPS noise when displaying device trajectories on a map, and can help control your costs by reducing the number of geofence evaluations.

This field is optional. If not specified, the default value is TimeBased.

" + }, + "EventBridgeEnabled":{ + "shape":"Boolean", + "documentation":"

Whether to enable position UPDATE events from this tracker to be sent to EventBridge.

You do not need enable this feature to get ENTER and EXIT events for geofences with this tracker. Those events are always sent to EventBridge.

" + }, + "KmsKeyEnableGeospatialQueries":{ + "shape":"Boolean", + "documentation":"

Enables GeospatialQueries for a tracker that uses a Amazon Web Services KMS customer managed key.

This parameter is only used if you are using a KMS customer managed key.

If you wish to encrypt your data using your own KMS customer managed key, then the Bounding Polygon Queries feature will be disabled by default. This is because by using this feature, a representation of your device positions will not be encrypted using the your KMS managed key. The exact device position, however; is still encrypted using your managed key.

You can choose to opt-in to the Bounding Polygon Quseries feature. This is done by setting the KmsKeyEnableGeospatialQueries parameter to true when creating or updating a Tracker.

" } } }, "CreateTrackerResponse":{ "type":"structure", "required":[ - "CreateTime", + "TrackerName", "TrackerArn", - "TrackerName" + "CreateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the tracker resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

The name of the tracker resource.

" }, "TrackerArn":{ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) for the tracker resource. Used when you need to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:tracker/ExampleTracker

" }, - "TrackerName":{ - "shape":"ResourceName", - "documentation":"

The name of the tracker resource.

" + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the tracker resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" } } }, @@ -2428,7 +2485,7 @@ "type":"string", "max":100, "min":1, - "pattern":"^[-._\\w]+$" + "pattern":"[-._\\w]+" }, "CustomLayerList":{ "type":"list", @@ -2467,17 +2524,17 @@ "type":"structure", "required":["KeyName"], "members":{ - "ForceDelete":{ - "shape":"Boolean", - "documentation":"

ForceDelete bypasses an API key's expiry conditions and deletes the key. Set the parameter true to delete the key or to false to not preemptively delete the API key.

Valid values: true, or false.

Required: No

This action is irreversible. Only use ForceDelete if you are certain the key is no longer in use.

", - "location":"querystring", - "locationName":"forceDelete" - }, "KeyName":{ "shape":"ResourceName", "documentation":"

The name of the API key to delete.

", "location":"uri", "locationName":"KeyName" + }, + "ForceDelete":{ + "shape":"Boolean", + "documentation":"

ForceDelete bypasses an API key's expiry conditions and deletes the key. Set the parameter true to delete the key or to false to not preemptively delete the API key.

Valid values: true, or false.

Required: No

This action is irreversible. Only use ForceDelete if you are certain the key is no longer in use.

", + "location":"querystring", + "locationName":"forceDelete" } } }, @@ -2569,37 +2626,25 @@ "DescribeGeofenceCollectionResponse":{ "type":"structure", "required":[ - "CollectionArn", "CollectionName", - "CreateTime", + "CollectionArn", "Description", + "CreateTime", "UpdateTime" ], "members":{ - "CollectionArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for the geofence collection resource. Used when you need to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollection

" - }, "CollectionName":{ "shape":"ResourceName", "documentation":"

The name of the geofence collection.

" }, - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the geofence resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" - }, - "Description":{ + "CollectionArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the geofence collection resource. Used when you need to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollection

" + }, + "Description":{ "shape":"ResourceDescription", "documentation":"

The optional description for the geofence collection.

" }, - "GeofenceCount":{ - "shape":"DescribeGeofenceCollectionResponseGeofenceCountInteger", - "documentation":"

The number of geofences in the geofence collection.

" - }, - "KmsKeyId":{ - "shape":"KmsKeyId", - "documentation":"

A key identifier for an Amazon Web Services KMS customer managed key assigned to the Amazon Location resource

" - }, "PricingPlan":{ "shape":"PricingPlan", "documentation":"

No longer used. Always returns RequestBasedUsage.

", @@ -2612,13 +2657,25 @@ "deprecated":true, "deprecatedMessage":"Deprecated. Unused." }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

A key identifier for an Amazon Web Services KMS customer managed key assigned to the Amazon Location resource

" + }, "Tags":{ "shape":"TagMap", "documentation":"

Displays the key, value pairs of tags associated with this resource.

" }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the geofence resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the geofence collection was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, + "GeofenceCount":{ + "shape":"DescribeGeofenceCollectionResponseGeofenceCountInteger", + "documentation":"

The number of geofences in the geofence collection.

" } } }, @@ -2642,27 +2699,15 @@ "DescribeKeyResponse":{ "type":"structure", "required":[ - "CreateTime", - "ExpireTime", "Key", "KeyArn", "KeyName", "Restrictions", + "CreateTime", + "ExpireTime", "UpdateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the API key resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" - }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

The optional description for the API key resource.

" - }, - "ExpireTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the API key resource will expire in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" - }, "Key":{ "shape":"ApiKey", "documentation":"

The key value/string of an API key.

" @@ -2676,13 +2721,25 @@ "documentation":"

The name of the API key resource.

" }, "Restrictions":{"shape":"ApiKeyRestrictions"}, - "Tags":{ - "shape":"TagMap", - "documentation":"

Tags associated with the API key resource.

" + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the API key resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, + "ExpireTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the API key resource will expire in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the API key resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

The optional description for the API key resource.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Tags associated with the API key resource.

" } } }, @@ -2701,49 +2758,49 @@ "DescribeMapResponse":{ "type":"structure", "required":[ - "Configuration", - "CreateTime", + "MapName", + "MapArn", "DataSource", + "Configuration", "Description", - "MapArn", - "MapName", + "CreateTime", "UpdateTime" ], "members":{ - "Configuration":{ - "shape":"MapConfiguration", - "documentation":"

Specifies the map tile style selected from a partner data provider.

" - }, - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the map resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" - }, - "DataSource":{ - "shape":"String", - "documentation":"

Specifies the data provider for the associated map tiles.

" - }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

The optional description for the map resource.

" + "MapName":{ + "shape":"ResourceName", + "documentation":"

The map style selected from an available provider.

" }, "MapArn":{ "shape":"GeoArn", "documentation":"

The Amazon Resource Name (ARN) for the map resource. Used to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:map/ExampleMap

" }, - "MapName":{ - "shape":"ResourceName", - "documentation":"

The map style selected from an available provider.

" - }, "PricingPlan":{ "shape":"PricingPlan", "documentation":"

No longer used. Always returns RequestBasedUsage.

", "deprecated":true, "deprecatedMessage":"Deprecated. Always returns RequestBasedUsage." }, + "DataSource":{ + "shape":"String", + "documentation":"

Specifies the data provider for the associated map tiles.

" + }, + "Configuration":{ + "shape":"MapConfiguration", + "documentation":"

Specifies the map tile style selected from a partner data provider.

" + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

The optional description for the map resource.

" + }, "Tags":{ "shape":"TagMap", "documentation":"

Tags associated with the map resource.

" }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the map resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the map resource was last update in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" @@ -2765,52 +2822,52 @@ "DescribePlaceIndexResponse":{ "type":"structure", "required":[ + "IndexName", + "IndexArn", + "Description", "CreateTime", + "UpdateTime", "DataSource", - "DataSourceConfiguration", - "Description", - "IndexArn", - "IndexName", - "UpdateTime" + "DataSourceConfiguration" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the place index resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" - }, - "DataSource":{ - "shape":"String", - "documentation":"

The data provider of geospatial data. Values can be one of the following:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" - }, - "DataSourceConfiguration":{ - "shape":"DataSourceConfiguration", - "documentation":"

The specified data storage option for requesting Places.

" - }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

The optional description for the place index resource.

" + "IndexName":{ + "shape":"ResourceName", + "documentation":"

The name of the place index resource being described.

" }, "IndexArn":{ "shape":"GeoArn", "documentation":"

The Amazon Resource Name (ARN) for the place index resource. Used to specify a resource across Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:place-index/ExamplePlaceIndex

" }, - "IndexName":{ - "shape":"ResourceName", - "documentation":"

The name of the place index resource being described.

" - }, "PricingPlan":{ "shape":"PricingPlan", "documentation":"

No longer used. Always returns RequestBasedUsage.

", "deprecated":true, "deprecatedMessage":"Deprecated. Always returns RequestBasedUsage." }, - "Tags":{ - "shape":"TagMap", - "documentation":"

Tags associated with place index resource.

" + "Description":{ + "shape":"ResourceDescription", + "documentation":"

The optional description for the place index resource.

" + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the place index resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the place index resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, + "DataSource":{ + "shape":"String", + "documentation":"

The data provider of geospatial data. Values can be one of the following:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" + }, + "DataSourceConfiguration":{ + "shape":"DataSourceConfiguration", + "documentation":"

The specified data storage option for requesting Places.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Tags associated with place index resource.

" } } }, @@ -2829,47 +2886,47 @@ "DescribeRouteCalculatorResponse":{ "type":"structure", "required":[ - "CalculatorArn", "CalculatorName", - "CreateTime", - "DataSource", + "CalculatorArn", "Description", - "UpdateTime" + "CreateTime", + "UpdateTime", + "DataSource" ], "members":{ + "CalculatorName":{ + "shape":"ResourceName", + "documentation":"

The name of the route calculator resource being described.

" + }, "CalculatorArn":{ "shape":"GeoArn", "documentation":"

The Amazon Resource Name (ARN) for the Route calculator resource. Use the ARN when you specify a resource across Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:route-calculator/ExampleCalculator

" }, - "CalculatorName":{ - "shape":"ResourceName", - "documentation":"

The name of the route calculator resource being described.

" + "PricingPlan":{ + "shape":"PricingPlan", + "documentation":"

Always returns RequestBasedUsage.

", + "deprecated":true, + "deprecatedMessage":"Deprecated. Always returns RequestBasedUsage." + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

The optional description of the route calculator resource.

" }, "CreateTime":{ "shape":"Timestamp", "documentation":"

The timestamp when the route calculator resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

  • For example, 2020–07-2T12:15:20.000Z+01:00

" }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the route calculator resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

  • For example, 2020–07-2T12:15:20.000Z+01:00

" + }, "DataSource":{ "shape":"String", "documentation":"

The data provider of traffic and road network data. Indicates one of the available providers:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

The optional description of the route calculator resource.

" - }, - "PricingPlan":{ - "shape":"PricingPlan", - "documentation":"

Always returns RequestBasedUsage.

", - "deprecated":true, - "deprecatedMessage":"Deprecated. Always returns RequestBasedUsage." - }, "Tags":{ "shape":"TagMap", "documentation":"

Tags associated with route calculator resource.

" - }, - "UpdateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp when the route calculator resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

  • For example, 2020–07-2T12:15:20.000Z+01:00

" } } }, @@ -2888,37 +2945,25 @@ "DescribeTrackerResponse":{ "type":"structure", "required":[ - "CreateTime", - "Description", - "TrackerArn", "TrackerName", + "TrackerArn", + "Description", + "CreateTime", "UpdateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the tracker resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

The name of the tracker resource.

" + }, + "TrackerArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the tracker resource. Used when you need to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:tracker/ExampleTracker

" }, "Description":{ "shape":"ResourceDescription", "documentation":"

The optional description for the tracker resource.

" }, - "EventBridgeEnabled":{ - "shape":"Boolean", - "documentation":"

Whether UPDATE events from this tracker in EventBridge are enabled. If set to true these events will be sent to EventBridge.

" - }, - "KmsKeyEnableGeospatialQueries":{ - "shape":"Boolean", - "documentation":"

Enables GeospatialQueries for a tracker that uses a Amazon Web Services KMS customer managed key.

This parameter is only used if you are using a KMS customer managed key.

If you wish to encrypt your data using your own KMS customer managed key, then the Bounding Polygon Queries feature will be disabled by default. This is because by using this feature, a representation of your device positions will not be encrypted using the your KMS managed key. The exact device position, however; is still encrypted using your managed key.

You can choose to opt-in to the Bounding Polygon Quseries feature. This is done by setting the KmsKeyEnableGeospatialQueries parameter to true when creating or updating a Tracker.

" - }, - "KmsKeyId":{ - "shape":"KmsKeyId", - "documentation":"

A key identifier for an Amazon Web Services KMS customer managed key assigned to the Amazon Location resource.

" - }, - "PositionFiltering":{ - "shape":"PositionFiltering", - "documentation":"

The position filtering method of the tracker resource.

" - }, "PricingPlan":{ "shape":"PricingPlan", "documentation":"

Always returns RequestBasedUsage.

", @@ -2935,51 +2980,63 @@ "shape":"TagMap", "documentation":"

The tags associated with the tracker resource.

" }, - "TrackerArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for the tracker resource. Used when you need to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:tracker/ExampleTracker

" - }, - "TrackerName":{ - "shape":"ResourceName", - "documentation":"

The name of the tracker resource.

" + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the tracker resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the tracker resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

A key identifier for an Amazon Web Services KMS customer managed key assigned to the Amazon Location resource.

" + }, + "PositionFiltering":{ + "shape":"PositionFiltering", + "documentation":"

The position filtering method of the tracker resource.

" + }, + "EventBridgeEnabled":{ + "shape":"Boolean", + "documentation":"

Whether UPDATE events from this tracker in EventBridge are enabled. If set to true these events will be sent to EventBridge.

" + }, + "KmsKeyEnableGeospatialQueries":{ + "shape":"Boolean", + "documentation":"

Enables GeospatialQueries for a tracker that uses a Amazon Web Services KMS customer managed key.

This parameter is only used if you are using a KMS customer managed key.

If you wish to encrypt your data using your own KMS customer managed key, then the Bounding Polygon Queries feature will be disabled by default. This is because by using this feature, a representation of your device positions will not be encrypted using the your KMS managed key. The exact device position, however; is still encrypted using your managed key.

You can choose to opt-in to the Bounding Polygon Quseries feature. This is done by setting the KmsKeyEnableGeospatialQueries parameter to true when creating or updating a Tracker.

" } } }, "DevicePosition":{ "type":"structure", "required":[ - "Position", + "SampleTime", "ReceivedTime", - "SampleTime" + "Position" ], "members":{ - "Accuracy":{ - "shape":"PositionalAccuracy", - "documentation":"

The accuracy of the device position.

" - }, "DeviceId":{ "shape":"Id", "documentation":"

The device whose position you retrieved.

" }, + "SampleTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp at which the device's position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, + "ReceivedTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the tracker resource received the device position in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, "Position":{ "shape":"Position", "documentation":"

The last known device position.

" }, + "Accuracy":{ + "shape":"PositionalAccuracy", + "documentation":"

The accuracy of the device position.

" + }, "PositionProperties":{ "shape":"PropertyMap", "documentation":"

The properties associated with the position.

" - }, - "ReceivedTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the tracker resource received the device position in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" - }, - "SampleTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp at which the device's position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" } }, "documentation":"

Contains the device position details.

" @@ -2992,32 +3049,72 @@ "type":"structure", "required":[ "DeviceId", - "Position", - "SampleTime" + "SampleTime", + "Position" ], "members":{ + "DeviceId":{ + "shape":"Id", + "documentation":"

The device associated to the position update.

" + }, + "SampleTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp at which the device's position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, + "Position":{ + "shape":"Position", + "documentation":"

The latest device position defined in WGS 84 format: [X or longitude, Y or latitude].

" + }, "Accuracy":{ "shape":"PositionalAccuracy", "documentation":"

The accuracy of the device position.

" }, + "PositionProperties":{ + "shape":"PropertyMap", + "documentation":"

Associates one of more properties with the position update. A property is a key-value pair stored with the position update and added to any geofence event the update may trigger.

Format: \"key\" : \"value\"

" + } + }, + "documentation":"

Contains the position update details for a device.

" + }, + "DeviceState":{ + "type":"structure", + "required":[ + "DeviceId", + "SampleTime", + "Position" + ], + "members":{ "DeviceId":{ "shape":"Id", - "documentation":"

The device associated to the position update.

" + "documentation":"

The device identifier.

" + }, + "SampleTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp at which the device's position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" }, "Position":{ "shape":"Position", - "documentation":"

The latest device position defined in WGS 84 format: [X or longitude, Y or latitude].

" + "documentation":"

The last known device position.

" }, - "PositionProperties":{ - "shape":"PropertyMap", - "documentation":"

Associates one of more properties with the position update. A property is a key-value pair stored with the position update and added to any geofence event the update may trigger.

Format: \"key\" : \"value\"

" + "Accuracy":{"shape":"PositionalAccuracy"}, + "Ipv4Address":{ + "shape":"DeviceStateIpv4AddressString", + "documentation":"

The device's Ipv4 address.

" }, - "SampleTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp at which the device's position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + "WiFiAccessPoints":{ + "shape":"WiFiAccessPointList", + "documentation":"

The Wi-Fi access points the device is using.

" + }, + "CellSignals":{ + "shape":"CellSignals", + "documentation":"

The cellular network infrastructure that the device is connected to.

" } }, - "documentation":"

Contains the position update details for a device.

" + "documentation":"

The device's position, IP address, and Wi-Fi access points.

" + }, + "DeviceStateIpv4AddressString":{ + "type":"string", + "pattern":"(?:(?:25[0-5]|(?:2[0-4]|1\\d|[0-9]|)\\d)\\.?\\b){4}" }, "DimensionUnit":{ "type":"string", @@ -3029,21 +3126,21 @@ "DisassociateTrackerConsumerRequest":{ "type":"structure", "required":[ - "ConsumerArn", - "TrackerName" + "TrackerName", + "ConsumerArn" ], "members":{ - "ConsumerArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for the geofence collection to be disassociated from the tracker resource. Used when you need to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollectionConsumer

", - "location":"uri", - "locationName":"ConsumerArn" - }, "TrackerName":{ "shape":"ResourceName", "documentation":"

The name of the tracker resource to be dissociated from the consumer.

", "location":"uri", "locationName":"TrackerName" + }, + "ConsumerArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the geofence collection to be disassociated from the tracker resource. Used when you need to specify a resource across all Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollectionConsumer

", + "location":"uri", + "locationName":"ConsumerArn" } } }, @@ -3063,53 +3160,220 @@ "type":"double", "box":true }, + "Earfcn":{ + "type":"integer", + "max":262143, + "min":0 + }, + "EutranCellId":{ + "type":"integer", + "max":268435455, + "min":0 + }, "FilterPlaceCategoryList":{ "type":"list", "member":{"shape":"PlaceCategory"}, "max":5, "min":1 }, + "ForecastGeofenceEventsDeviceState":{ + "type":"structure", + "required":["Position"], + "members":{ + "Position":{ + "shape":"Position", + "documentation":"

The device's position.

" + }, + "Speed":{ + "shape":"ForecastGeofenceEventsDeviceStateSpeedDouble", + "documentation":"

The device's speed.

" + } + }, + "documentation":"

The device's position, IP address, and WiFi access points.

" + }, + "ForecastGeofenceEventsDeviceStateSpeedDouble":{ + "type":"double", + "box":true, + "min":0 + }, + "ForecastGeofenceEventsRequest":{ + "type":"structure", + "required":[ + "CollectionName", + "DeviceState" + ], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

The name of the geofence collection.

", + "location":"uri", + "locationName":"CollectionName" + }, + "DeviceState":{ + "shape":"ForecastGeofenceEventsDeviceState", + "documentation":"

The device's state, including current position and speed.

" + }, + "TimeHorizonMinutes":{ + "shape":"ForecastGeofenceEventsRequestTimeHorizonMinutesDouble", + "documentation":"

Specifies the time horizon in minutes for the forecasted events.

" + }, + "DistanceUnit":{ + "shape":"DistanceUnit", + "documentation":"

The distance unit used for the NearestDistance property returned in a forecasted event. The measurement system must match for DistanceUnit and SpeedUnit; if Kilometers is specified for DistanceUnit, then SpeedUnit must be KilometersPerHour.

Default Value: Kilometers

" + }, + "SpeedUnit":{ + "shape":"SpeedUnit", + "documentation":"

The speed unit for the device captured by the device state. The measurement system must match for DistanceUnit and SpeedUnit; if Kilometers is specified for DistanceUnit, then SpeedUnit must be KilometersPerHour.

Default Value: KilometersPerHour.

" + }, + "NextToken":{ + "shape":"LargeToken", + "documentation":"

The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

Default value: null

" + }, + "MaxResults":{ + "shape":"ForecastGeofenceEventsRequestMaxResultsInteger", + "documentation":"

An optional limit for the number of resources returned in a single call.

Default value: 20

" + } + } + }, + "ForecastGeofenceEventsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":20, + "min":1 + }, + "ForecastGeofenceEventsRequestTimeHorizonMinutesDouble":{ + "type":"double", + "box":true, + "min":0 + }, + "ForecastGeofenceEventsResponse":{ + "type":"structure", + "required":[ + "ForecastedEvents", + "DistanceUnit", + "SpeedUnit" + ], + "members":{ + "ForecastedEvents":{ + "shape":"ForecastedEventsList", + "documentation":"

The list of forecasted events.

" + }, + "NextToken":{ + "shape":"LargeToken", + "documentation":"

The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

" + }, + "DistanceUnit":{ + "shape":"DistanceUnit", + "documentation":"

The distance unit for the forecasted events.

" + }, + "SpeedUnit":{ + "shape":"SpeedUnit", + "documentation":"

The speed unit for the forecasted events.

" + } + } + }, + "ForecastedEvent":{ + "type":"structure", + "required":[ + "EventId", + "GeofenceId", + "IsDeviceInGeofence", + "NearestDistance", + "EventType" + ], + "members":{ + "EventId":{ + "shape":"Uuid", + "documentation":"

The forecasted event identifier.

" + }, + "GeofenceId":{ + "shape":"Id", + "documentation":"

The geofence identifier pertaining to the forecasted event.

" + }, + "IsDeviceInGeofence":{ + "shape":"Boolean", + "documentation":"

Indicates if the device is located within the geofence.

" + }, + "NearestDistance":{ + "shape":"NearestDistance", + "documentation":"

The closest distance from the device's position to the geofence.

" + }, + "EventType":{ + "shape":"ForecastedGeofenceEventType", + "documentation":"

The event type, forecasting three states for which a device can be in relative to a geofence:

ENTER: If a device is outside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window.

EXIT: If a device is inside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window.

IDLE: If a device is inside of a geofence, and the device is not moving.

" + }, + "ForecastedBreachTime":{ + "shape":"Timestamp", + "documentation":"

The forecasted time the device will breach the geofence in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, + "GeofenceProperties":{ + "shape":"PropertyMap", + "documentation":"

The geofence properties.

" + } + }, + "documentation":"

A forecasted event represents a geofence event in relation to the requested device state, that may occur given the provided device state and time horizon.

" + }, + "ForecastedEventsList":{ + "type":"list", + "member":{"shape":"ForecastedEvent"} + }, + "ForecastedGeofenceEventType":{ + "type":"string", + "enum":[ + "ENTER", + "EXIT", + "IDLE" + ] + }, "GeoArn":{ "type":"string", "max":1600, "min":0, - "pattern":"^arn(:[a-z0-9]+([.-][a-z0-9]+)*):geo(:([a-z0-9]+([.-][a-z0-9]+)*))(:[0-9]+):((\\*)|([-a-z]+[/][*-._\\w]+))$" + "pattern":"arn(:[a-z0-9]+([.-][a-z0-9]+)*):geo(:([a-z0-9]+([.-][a-z0-9]+)*))(:[0-9]+):((\\*)|([-a-z]+[/][*-._\\w]+))" + }, + "GeoArnV2":{ + "type":"string", + "max":1600, + "min":0, + "pattern":".*(^arn(:[a-z0-9]+([.-][a-z0-9]+)*):geo(:([a-z0-9]+([.-][a-z0-9]+)*))(:[0-9]+):((\\*)|([-a-z]+[/][*-._\\w]+))$)|(^arn(:[a-z0-9]+([.-][a-z0-9]+)*):(geo-routes|geo-places|geo-maps)(:((\\*)|([a-z0-9]+([.-][a-z0-9]+)*)))::((provider[\\/][*-._\\w]+))$).*" }, "GeofenceGeometry":{ "type":"structure", "members":{ + "Polygon":{ + "shape":"LinearRings", + "documentation":"

A polygon is a list of linear rings which are each made up of a list of vertices.

Each vertex is a 2-dimensional point of the form: [longitude, latitude]. This is represented as an array of doubles of length 2 (so [double, double]).

An array of 4 or more vertices, where the first and last vertex are the same (to form a closed boundary), is called a linear ring. The linear ring vertices must be listed in counter-clockwise order around the ring’s interior. The linear ring is represented as an array of vertices, or an array of arrays of doubles ([[double, double], ...]).

A geofence consists of a single linear ring. To allow for future expansion, the Polygon parameter takes an array of linear rings, which is represented as an array of arrays of arrays of doubles ([[[double, double], ...], ...]).

A linear ring for use in geofences can consist of between 4 and 1,000 vertices.

" + }, "Circle":{ "shape":"Circle", "documentation":"

A circle on the earth, as defined by a center point and a radius.

" }, - "Polygon":{ - "shape":"LinearRings", - "documentation":"

A polygon is a list of linear rings which are each made up of a list of vertices.

Each vertex is a 2-dimensional point of the form: [longitude, latitude]. This is represented as an array of doubles of length 2 (so [double, double]).

An array of 4 or more vertices, where the first and last vertex are the same (to form a closed boundary), is called a linear ring. The linear ring vertices must be listed in counter-clockwise order around the ring’s interior. The linear ring is represented as an array of vertices, or an array of arrays of doubles ([[double, double], ...]).

A geofence consists of a single linear ring. To allow for future expansion, the Polygon parameter takes an array of linear rings, which is represented as an array of arrays of arrays of doubles ([[[double, double], ...], ...]).

A linear ring for use in geofences can consist of between 4 and 1,000 vertices.

" + "Geobuf":{ + "shape":"Base64EncodedGeobuf", + "documentation":"

Geobuf is a compact binary encoding for geographic data that provides lossless compression of GeoJSON polygons. The Geobuf must be Base64-encoded.

A polygon in Geobuf format can have up to 100,000 vertices.

" } }, - "documentation":"

Contains the geofence geometry details.

A geofence geometry is made up of either a polygon or a circle. Can be either a polygon or a circle. Including both will return a validation error.

Amazon Location doesn't currently support polygons with holes, multipolygons, polygons that are wound clockwise, or that cross the antimeridian.

" + "documentation":"

Contains the geofence geometry details.

A geofence geometry is made up of either a polygon or a circle. Can be a polygon, a circle or a polygon encoded in Geobuf format. Including multiple selections will return a validation error.

Amazon Location doesn't currently support polygons with holes, multipolygons, polygons that are wound clockwise, or that cross the antimeridian.

" }, "GetDevicePositionHistoryRequest":{ "type":"structure", "required":[ - "DeviceId", - "TrackerName" + "TrackerName", + "DeviceId" ], "members":{ + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

The tracker resource receiving the request for the device position history.

", + "location":"uri", + "locationName":"TrackerName" + }, "DeviceId":{ "shape":"Id", "documentation":"

The device whose position history you want to retrieve.

", "location":"uri", "locationName":"DeviceId" }, - "EndTimeExclusive":{ - "shape":"Timestamp", - "documentation":"

Specify the end time for the position history in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. By default, the value will be the time that the request is made.

Requirement:

  • The time specified for EndTimeExclusive must be after the time for StartTimeInclusive.

" - }, - "MaxResults":{ - "shape":"GetDevicePositionHistoryRequestMaxResultsInteger", - "documentation":"

An optional limit for the number of device positions returned in a single call.

Default value: 100

" - }, "NextToken":{ "shape":"Token", "documentation":"

The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

Default value: null

" @@ -3118,11 +3382,13 @@ "shape":"Timestamp", "documentation":"

Specify the start time for the position history in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. By default, the value will be 24 hours prior to the time that the request is made.

Requirement:

  • The time specified for StartTimeInclusive must be before EndTimeExclusive.

" }, - "TrackerName":{ - "shape":"ResourceName", - "documentation":"

The tracker resource receiving the request for the device position history.

", - "location":"uri", - "locationName":"TrackerName" + "EndTimeExclusive":{ + "shape":"Timestamp", + "documentation":"

Specify the end time for the position history in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. By default, the value will be the time that the request is made.

Requirement:

  • The time specified for EndTimeExclusive must be after the time for StartTimeInclusive.

" + }, + "MaxResults":{ + "shape":"GetDevicePositionHistoryRequestMaxResultsInteger", + "documentation":"

An optional limit for the number of device positions returned in a single call.

Default value: 100

" } } }, @@ -3149,55 +3415,55 @@ "GetDevicePositionRequest":{ "type":"structure", "required":[ - "DeviceId", - "TrackerName" + "TrackerName", + "DeviceId" ], "members":{ - "DeviceId":{ - "shape":"Id", - "documentation":"

The device whose position you want to retrieve.

", - "location":"uri", - "locationName":"DeviceId" - }, "TrackerName":{ "shape":"ResourceName", "documentation":"

The tracker resource receiving the position update.

", "location":"uri", "locationName":"TrackerName" + }, + "DeviceId":{ + "shape":"Id", + "documentation":"

The device whose position you want to retrieve.

", + "location":"uri", + "locationName":"DeviceId" } } }, "GetDevicePositionResponse":{ "type":"structure", "required":[ - "Position", + "SampleTime", "ReceivedTime", - "SampleTime" + "Position" ], "members":{ - "Accuracy":{ - "shape":"PositionalAccuracy", - "documentation":"

The accuracy of the device position.

" - }, "DeviceId":{ "shape":"Id", "documentation":"

The device whose position you retrieved.

" }, + "SampleTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp at which the device's position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, + "ReceivedTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the tracker resource received the device position. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, "Position":{ "shape":"Position", "documentation":"

The last known device position.

" }, + "Accuracy":{ + "shape":"PositionalAccuracy", + "documentation":"

The accuracy of the device position.

" + }, "PositionProperties":{ "shape":"PropertyMap", "documentation":"

The properties associated with the position.

" - }, - "ReceivedTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the tracker resource received the device position in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" - }, - "SampleTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp at which the device's position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" } } }, @@ -3225,25 +3491,17 @@ "GetGeofenceResponse":{ "type":"structure", "required":[ - "CreateTime", "GeofenceId", "Geometry", "Status", + "CreateTime", "UpdateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the geofence collection was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" - }, "GeofenceId":{ "shape":"Id", "documentation":"

The geofence identifier.

" }, - "GeofenceProperties":{ - "shape":"PropertyMap", - "documentation":"

User defined properties of the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.

Format: \"key\" : \"value\"

" - }, "Geometry":{ "shape":"GeofenceGeometry", "documentation":"

Contains the geofence geometry details describing a polygon or a circle.

" @@ -3252,23 +3510,37 @@ "shape":"String", "documentation":"

Identifies the state of the geofence. A geofence will hold one of the following states:

  • ACTIVE — The geofence has been indexed by the system.

  • PENDING — The geofence is being processed by the system.

  • FAILED — The geofence failed to be indexed by the system.

  • DELETED — The geofence has been deleted from the system index.

  • DELETING — The geofence is being deleted from the system index.

" }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the geofence collection was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the geofence collection was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, + "GeofenceProperties":{ + "shape":"PropertyMap", + "documentation":"

User defined properties of the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.

Format: \"key\" : \"value\"

" } } }, "GetMapGlyphsRequest":{ "type":"structure", "required":[ + "MapName", "FontStack", - "FontUnicodeRange", - "MapName" + "FontUnicodeRange" ], "members":{ + "MapName":{ + "shape":"ResourceName", + "documentation":"

The map resource associated with the glyph file.

", + "location":"uri", + "locationName":"MapName" + }, "FontStack":{ "shape":"String", - "documentation":"

A comma-separated list of fonts to load glyphs from in order of preference. For example, Noto Sans Regular, Arial Unicode.

Valid font stacks for Esri styles:

  • VectorEsriDarkGrayCanvas – Ubuntu Medium Italic | Ubuntu Medium | Ubuntu Italic | Ubuntu Regular | Ubuntu Bold

  • VectorEsriLightGrayCanvas – Ubuntu Italic | Ubuntu Regular | Ubuntu Light | Ubuntu Bold

  • VectorEsriTopographic – Noto Sans Italic | Noto Sans Regular | Noto Sans Bold | Noto Serif Regular | Roboto Condensed Light Italic

  • VectorEsriStreets – Arial Regular | Arial Italic | Arial Bold

  • VectorEsriNavigation – Arial Regular | Arial Italic | Arial Bold | Arial Unicode MS Bold | Arial Unicode MS Regular

Valid font stacks for HERE Technologies styles:

  • VectorHereContrast – Fira GO Regular | Fira GO Bold

  • VectorHereExplore, VectorHereExploreTruck, HybridHereExploreSatellite – Fira GO Italic | Fira GO Map | Fira GO Map Bold | Noto Sans CJK JP Bold | Noto Sans CJK JP Light | Noto Sans CJK JP Regular

Valid font stacks for GrabMaps styles:

  • VectorGrabStandardLight, VectorGrabStandardDark – Noto Sans Regular | Noto Sans Medium | Noto Sans Bold

Valid font stacks for Open Data styles:

  • VectorOpenDataStandardLight, VectorOpenDataStandardDark, VectorOpenDataVisualizationLight, VectorOpenDataVisualizationDark – Amazon Ember Regular,Noto Sans Regular | Amazon Ember Bold,Noto Sans Bold | Amazon Ember Medium,Noto Sans Medium | Amazon Ember Regular Italic,Noto Sans Italic | Amazon Ember Condensed RC Regular,Noto Sans Regular | Amazon Ember Condensed RC Bold,Noto Sans Bold | Amazon Ember Regular,Noto Sans Regular,Noto Sans Arabic Regular | Amazon Ember Condensed RC Bold,Noto Sans Bold,Noto Sans Arabic Condensed Bold | Amazon Ember Bold,Noto Sans Bold,Noto Sans Arabic Bold | Amazon Ember Regular Italic,Noto Sans Italic,Noto Sans Arabic Regular | Amazon Ember Condensed RC Regular,Noto Sans Regular,Noto Sans Arabic Condensed Regular | Amazon Ember Medium,Noto Sans Medium,Noto Sans Arabic Medium

The fonts used by the Open Data map styles are combined fonts that use Amazon Ember for most glyphs but Noto Sans for glyphs unsupported by Amazon Ember.

", + "documentation":"

A comma-separated list of fonts to load glyphs from in order of preference. For example, Noto Sans Regular, Arial Unicode.

Valid font stacks for Esri styles:

  • VectorEsriDarkGrayCanvas – Ubuntu Medium Italic | Ubuntu Medium | Ubuntu Italic | Ubuntu Regular | Ubuntu Bold

  • VectorEsriLightGrayCanvas – Ubuntu Italic | Ubuntu Regular | Ubuntu Light | Ubuntu Bold

  • VectorEsriTopographic – Noto Sans Italic | Noto Sans Regular | Noto Sans Bold | Noto Serif Regular | Roboto Condensed Light Italic

  • VectorEsriStreets – Arial Regular | Arial Italic | Arial Bold

  • VectorEsriNavigation – Arial Regular | Arial Italic | Arial Bold

Valid font stacks for HERE Technologies styles:

  • VectorHereContrast – Fira GO Regular | Fira GO Bold

  • VectorHereExplore, VectorHereExploreTruck, HybridHereExploreSatellite – Fira GO Italic | Fira GO Map | Fira GO Map Bold | Noto Sans CJK JP Bold | Noto Sans CJK JP Light | Noto Sans CJK JP Regular

Valid font stacks for GrabMaps styles:

  • VectorGrabStandardLight, VectorGrabStandardDark – Noto Sans Regular | Noto Sans Medium | Noto Sans Bold

Valid font stacks for Open Data styles:

  • VectorOpenDataStandardLight, VectorOpenDataStandardDark, VectorOpenDataVisualizationLight, VectorOpenDataVisualizationDark – Amazon Ember Regular,Noto Sans Regular | Amazon Ember Bold,Noto Sans Bold | Amazon Ember Medium,Noto Sans Medium | Amazon Ember Regular Italic,Noto Sans Italic | Amazon Ember Condensed RC Regular,Noto Sans Regular | Amazon Ember Condensed RC Bold,Noto Sans Bold | Amazon Ember Regular,Noto Sans Regular,Noto Sans Arabic Regular | Amazon Ember Condensed RC Bold,Noto Sans Bold,Noto Sans Arabic Condensed Bold | Amazon Ember Bold,Noto Sans Bold,Noto Sans Arabic Bold | Amazon Ember Regular Italic,Noto Sans Italic,Noto Sans Arabic Regular | Amazon Ember Condensed RC Regular,Noto Sans Regular,Noto Sans Arabic Condensed Regular | Amazon Ember Medium,Noto Sans Medium,Noto Sans Arabic Medium

The fonts used by the Open Data map styles are combined fonts that use Amazon Ember for most glyphs but Noto Sans for glyphs unsupported by Amazon Ember.

", "location":"uri", "locationName":"FontStack" }, @@ -3283,18 +3555,12 @@ "documentation":"

The optional API key to authorize the request.

", "location":"querystring", "locationName":"key" - }, - "MapName":{ - "shape":"ResourceName", - "documentation":"

The map resource associated with the glyph file.

", - "location":"uri", - "locationName":"MapName" } } }, "GetMapGlyphsRequestFontUnicodeRangeString":{ "type":"string", - "pattern":"^[0-9]+-[0-9]+\\.pbf$" + "pattern":"[0-9]+-[0-9]+\\.pbf" }, "GetMapGlyphsResponse":{ "type":"structure", @@ -3303,17 +3569,17 @@ "shape":"Blob", "documentation":"

The glyph, as binary blob.

" }, - "CacheControl":{ - "shape":"String", - "documentation":"

The HTTP Cache-Control directive for the value.

", - "location":"header", - "locationName":"Cache-Control" - }, "ContentType":{ "shape":"String", "documentation":"

The map glyph content type. For example, application/octet-stream.

", "location":"header", "locationName":"Content-Type" + }, + "CacheControl":{ + "shape":"String", + "documentation":"

The HTTP Cache-Control directive for the value.

", + "location":"header", + "locationName":"Cache-Control" } }, "payload":"Blob" @@ -3321,33 +3587,33 @@ "GetMapSpritesRequest":{ "type":"structure", "required":[ - "FileName", - "MapName" + "MapName", + "FileName" ], "members":{ + "MapName":{ + "shape":"ResourceName", + "documentation":"

The map resource associated with the sprite file.

", + "location":"uri", + "locationName":"MapName" + }, "FileName":{ "shape":"GetMapSpritesRequestFileNameString", "documentation":"

The name of the sprite file. Use the following file names for the sprite sheet:

  • sprites.png

  • sprites@2x.png for high pixel density displays

For the JSON document containing image offsets. Use the following file names:

  • sprites.json

  • sprites@2x.json for high pixel density displays

", "location":"uri", "locationName":"FileName" }, - "Key":{ - "shape":"ApiKey", - "documentation":"

The optional API key to authorize the request.

", - "location":"querystring", - "locationName":"key" - }, - "MapName":{ - "shape":"ResourceName", - "documentation":"

The map resource associated with the sprite file.

", - "location":"uri", - "locationName":"MapName" + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" } } }, "GetMapSpritesRequestFileNameString":{ "type":"string", - "pattern":"^sprites(@2x)?\\.(png|json)$" + "pattern":"sprites(@2x)?\\.(png|json)" }, "GetMapSpritesResponse":{ "type":"structure", @@ -3356,17 +3622,17 @@ "shape":"Blob", "documentation":"

Contains the body of the sprite sheet or JSON offset file.

" }, - "CacheControl":{ - "shape":"String", - "documentation":"

The HTTP Cache-Control directive for the value.

", - "location":"header", - "locationName":"Cache-Control" - }, "ContentType":{ "shape":"String", "documentation":"

The content type of the sprite sheet and offsets. For example, the sprite sheet content type is image/png, and the sprite offset JSON document is application/json.

", "location":"header", "locationName":"Content-Type" + }, + "CacheControl":{ + "shape":"String", + "documentation":"

The HTTP Cache-Control directive for the value.

", + "location":"header", + "locationName":"Cache-Control" } }, "payload":"Blob" @@ -3375,17 +3641,17 @@ "type":"structure", "required":["MapName"], "members":{ - "Key":{ - "shape":"ApiKey", - "documentation":"

The optional API key to authorize the request.

", - "location":"querystring", - "locationName":"key" - }, "MapName":{ "shape":"ResourceName", "documentation":"

The map resource to retrieve the style descriptor from.

", "location":"uri", "locationName":"MapName" + }, + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" } } }, @@ -3396,17 +3662,17 @@ "shape":"Blob", "documentation":"

Contains the body of the style descriptor.

" }, - "CacheControl":{ - "shape":"String", - "documentation":"

The HTTP Cache-Control directive for the value.

", - "location":"header", - "locationName":"Cache-Control" - }, "ContentType":{ "shape":"String", "documentation":"

The style descriptor's content type. For example, application/json.

", "location":"header", "locationName":"Content-Type" + }, + "CacheControl":{ + "shape":"String", + "documentation":"

The HTTP Cache-Control directive for the value.

", + "location":"header", + "locationName":"Cache-Control" } }, "payload":"Blob" @@ -3415,23 +3681,23 @@ "type":"structure", "required":[ "MapName", + "Z", "X", - "Y", - "Z" + "Y" ], "members":{ - "Key":{ - "shape":"ApiKey", - "documentation":"

The optional API key to authorize the request.

", - "location":"querystring", - "locationName":"key" - }, "MapName":{ "shape":"ResourceName", "documentation":"

The map resource to retrieve the map tiles from.

", "location":"uri", "locationName":"MapName" }, + "Z":{ + "shape":"GetMapTileRequestZString", + "documentation":"

The zoom value for the map tile.

", + "location":"uri", + "locationName":"Z" + }, "X":{ "shape":"GetMapTileRequestXString", "documentation":"

The X axis value for the map tile.

", @@ -3444,25 +3710,25 @@ "location":"uri", "locationName":"Y" }, - "Z":{ - "shape":"GetMapTileRequestZString", - "documentation":"

The zoom value for the map tile.

", - "location":"uri", - "locationName":"Z" + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" } } }, "GetMapTileRequestXString":{ "type":"string", - "pattern":"\\d+" + "pattern":".*\\d+.*" }, "GetMapTileRequestYString":{ "type":"string", - "pattern":"\\d+" + "pattern":".*\\d+.*" }, "GetMapTileRequestZString":{ "type":"string", - "pattern":"\\d+" + "pattern":".*\\d+.*" }, "GetMapTileResponse":{ "type":"structure", @@ -3471,17 +3737,17 @@ "shape":"Blob", "documentation":"

Contains Mapbox Vector Tile (MVT) data.

" }, - "CacheControl":{ - "shape":"String", - "documentation":"

The HTTP Cache-Control directive for the value.

", - "location":"header", - "locationName":"Cache-Control" - }, "ContentType":{ "shape":"String", "documentation":"

The map tile's content type. For example, application/vnd.mapbox-vector-tile.

", "location":"header", "locationName":"Content-Type" + }, + "CacheControl":{ + "shape":"String", + "documentation":"

The HTTP Cache-Control directive for the value.

", + "location":"header", + "locationName":"Cache-Control" } }, "payload":"Blob" @@ -3499,11 +3765,11 @@ "location":"uri", "locationName":"IndexName" }, - "Key":{ - "shape":"ApiKey", - "documentation":"

The optional API key to authorize the request.

", - "location":"querystring", - "locationName":"key" + "PlaceId":{ + "shape":"PlaceId", + "documentation":"

The identifier of the place to find.

", + "location":"uri", + "locationName":"PlaceId" }, "Language":{ "shape":"LanguageTag", @@ -3511,11 +3777,11 @@ "location":"querystring", "locationName":"language" }, - "PlaceId":{ - "shape":"PlaceId", - "documentation":"

The identifier of the place to find.

While you can use PlaceID in subsequent requests, PlaceID is not intended to be a permanent identifier and the ID can change between consecutive API calls. Please see the following PlaceID behaviour for each data provider:

  • Esri: Place IDs will change every quarter at a minimum. The typical time period for these changes would be March, June, September, and December. Place IDs might also change between the typical quarterly change but that will be much less frequent.

  • HERE: We recommend that you cache data for no longer than a week to keep your data data fresh. You can assume that less than 1% ID shifts will release over release which is approximately 1 - 2 times per week.

  • Grab: Place IDs can expire or become invalid in the following situations.

    • Data operations: The POI may be removed from Grab POI database by Grab Map Ops based on the ground-truth, such as being closed in the real world, being detected as a duplicate POI, or having incorrect information. Grab will synchronize data to the Waypoint environment on weekly basis.

    • Interpolated POI: Interpolated POI is a temporary POI generated in real time when serving a request, and it will be marked as derived in the place.result_type field in the response. The information of interpolated POIs will be retained for at least 30 days, which means that within 30 days, you are able to obtain POI details by Place ID from Place Details API. After 30 days, the interpolated POIs(both Place ID and details) may expire and inaccessible from the Places Details API.

", - "location":"uri", - "locationName":"PlaceId" + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" } } }, @@ -3533,7 +3799,30 @@ "type":"string", "max":100, "min":1, - "pattern":"^[-._\\p{L}\\p{N}]+$" + "pattern":"[-._\\p{L}\\p{N}]+" + }, + "InferredState":{ + "type":"structure", + "required":["ProxyDetected"], + "members":{ + "Position":{ + "shape":"Position", + "documentation":"

The device position inferred by the provided position, IP address, cellular signals, and Wi-Fi- access points.

" + }, + "Accuracy":{ + "shape":"PositionalAccuracy", + "documentation":"

The level of certainty of the inferred position.

" + }, + "DeviationDistance":{ + "shape":"Double", + "documentation":"

The distance between the inferred position and the device's self-reported position.

" + }, + "ProxyDetected":{ + "shape":"Boolean", + "documentation":"

Indicates if a proxy was used.

" + } + }, + "documentation":"

The inferred state of the device, given the provided position, IP address, cellular signals, and Wi-Fi- access points.

" }, "Integer":{ "type":"integer", @@ -3571,16 +3860,29 @@ "max":35, "min":2 }, + "LargeToken":{ + "type":"string", + "max":60000, + "min":1 + }, "Leg":{ "type":"structure", "required":[ + "StartPosition", + "EndPosition", "Distance", "DurationSeconds", - "EndPosition", - "StartPosition", "Steps" ], "members":{ + "StartPosition":{ + "shape":"Position", + "documentation":"

The starting position of the leg. Follows the format [longitude,latitude].

If the StartPosition isn't located on a road, it's snapped to a nearby road.

" + }, + "EndPosition":{ + "shape":"Position", + "documentation":"

The terminating position of the leg. Follows the format [longitude,latitude].

If the EndPosition isn't located on a road, it's snapped to a nearby road.

" + }, "Distance":{ "shape":"LegDistanceDouble", "documentation":"

The distance between the leg's StartPosition and EndPosition along a calculated route.

  • The default measurement is Kilometers unless the request specifies a DistanceUnit of Miles.

" @@ -3589,18 +3891,10 @@ "shape":"LegDurationSecondsDouble", "documentation":"

The estimated travel time between the leg's StartPosition and EndPosition. The travel mode and departure time that you specify in the request determines the calculated time.

" }, - "EndPosition":{ - "shape":"Position", - "documentation":"

The terminating position of the leg. Follows the format [longitude,latitude].

If the EndPosition isn't located on a road, it's snapped to a nearby road.

" - }, "Geometry":{ "shape":"LegGeometry", "documentation":"

Contains the calculated route's path as a linestring geometry.

" }, - "StartPosition":{ - "shape":"Position", - "documentation":"

The starting position of the leg. Follows the format [longitude,latitude].

If the StartPosition isn't located on a road, it's snapped to a nearby road.

" - }, "Steps":{ "shape":"StepList", "documentation":"

Contains a list of steps, which represent subsections of a leg. Each step provides instructions for how to move to the next step in the leg such as the step's start position, end position, travel distance, travel duration, and geometry offset.

" @@ -3651,9 +3945,11 @@ "type":"structure", "required":["TrackerName"], "members":{ - "FilterGeometry":{ - "shape":"TrackingFilterGeometry", - "documentation":"

The geometry used to filter device positions.

" + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

The tracker resource containing the requested devices.

", + "location":"uri", + "locationName":"TrackerName" }, "MaxResults":{ "shape":"ListDevicePositionsRequestMaxResultsInteger", @@ -3663,11 +3959,9 @@ "shape":"Token", "documentation":"

The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

Default value: null

" }, - "TrackerName":{ - "shape":"ResourceName", - "documentation":"

The tracker resource containing the requested devices.

", - "location":"uri", - "locationName":"TrackerName" + "FilterGeometry":{ + "shape":"TrackingFilterGeometry", + "documentation":"

The geometry used to filter device positions.

" } } }, @@ -3695,29 +3989,29 @@ "type":"structure", "required":[ "DeviceId", - "Position", - "SampleTime" + "SampleTime", + "Position" ], "members":{ - "Accuracy":{ - "shape":"PositionalAccuracy", - "documentation":"

The accuracy of the device position.

" - }, "DeviceId":{ "shape":"Id", "documentation":"

The ID of the device for this position.

" }, + "SampleTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp at which the device position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, "Position":{ "shape":"Position", "documentation":"

The last known device position. Empty if no positions currently stored.

" }, + "Accuracy":{ + "shape":"PositionalAccuracy", + "documentation":"

The accuracy of the device position.

" + }, "PositionProperties":{ "shape":"PropertyMap", "documentation":"

The properties associated with the position.

" - }, - "SampleTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp at which the device position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" } }, "documentation":"

Contains the tracker resource details.

" @@ -3763,8 +4057,8 @@ "type":"structure", "required":[ "CollectionName", - "CreateTime", "Description", + "CreateTime", "UpdateTime" ], "members":{ @@ -3772,10 +4066,6 @@ "shape":"ResourceName", "documentation":"

The name of the geofence collection.

" }, - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the geofence collection was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" - }, "Description":{ "shape":"ResourceDescription", "documentation":"

The description for the geofence collection

" @@ -3792,12 +4082,16 @@ "deprecated":true, "deprecatedMessage":"Deprecated. Unused." }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the geofence collection was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

Specifies a timestamp for when the resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" } }, - "documentation":"

Contains the geofence collection details.

" + "documentation":"

Contains the geofence collection details.

The returned geometry will always match the geometry format used when the geofence was created.

" }, "ListGeofenceCollectionsResponseEntryList":{ "type":"list", @@ -3806,25 +4100,17 @@ "ListGeofenceResponseEntry":{ "type":"structure", "required":[ - "CreateTime", "GeofenceId", "Geometry", "Status", + "CreateTime", "UpdateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the geofence was stored in a geofence collection in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" - }, "GeofenceId":{ "shape":"Id", "documentation":"

The geofence identifier.

" }, - "GeofenceProperties":{ - "shape":"PropertyMap", - "documentation":"

User defined properties of the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.

Format: \"key\" : \"value\"

" - }, "Geometry":{ "shape":"GeofenceGeometry", "documentation":"

Contains the geofence geometry details describing a polygon or a circle.

" @@ -3833,12 +4119,20 @@ "shape":"String", "documentation":"

Identifies the state of the geofence. A geofence will hold one of the following states:

  • ACTIVE — The geofence has been indexed by the system.

  • PENDING — The geofence is being processed by the system.

  • FAILED — The geofence failed to be indexed by the system.

  • DELETED — The geofence has been deleted from the system index.

  • DELETING — The geofence is being deleted from the system index.

" }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the geofence was stored in a geofence collection in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the geofence was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, + "GeofenceProperties":{ + "shape":"PropertyMap", + "documentation":"

User defined properties of the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.

Format: \"key\" : \"value\"

" } }, - "documentation":"

Contains a list of geofences stored in a given geofence collection.

" + "documentation":"

Contains a list of geofences stored in a given geofence collection.

The returned geometry will always match the geometry format used when the geofence was created.

" }, "ListGeofenceResponseEntryList":{ "type":"list", @@ -3854,13 +4148,13 @@ "location":"uri", "locationName":"CollectionName" }, + "NextToken":{ + "shape":"LargeToken", + "documentation":"

The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

Default value: null

" + }, "MaxResults":{ "shape":"ListGeofencesRequestMaxResultsInteger", "documentation":"

An optional limit for the number of geofences returned in a single call.

Default value: 100

" - }, - "NextToken":{ - "shape":"Token", - "documentation":"

The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

Default value: null

" } } }, @@ -3879,7 +4173,7 @@ "documentation":"

Contains a list of geofences stored in the geofence collection.

" }, "NextToken":{ - "shape":"Token", + "shape":"LargeToken", "documentation":"

A pagination token indicating there are additional pages available. You can use the token in a following request to fetch the next set of results.

" } } @@ -3887,10 +4181,6 @@ "ListKeysRequest":{ "type":"structure", "members":{ - "Filter":{ - "shape":"ApiKeyFilter", - "documentation":"

Optionally filter the list to only Active or Expired API keys.

" - }, "MaxResults":{ "shape":"ListKeysRequestMaxResultsInteger", "documentation":"

An optional limit for the number of resources returned in a single call.

Default value: 100

" @@ -3898,6 +4188,10 @@ "NextToken":{ "shape":"Token", "documentation":"

The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

Default value: null

" + }, + "Filter":{ + "shape":"ApiKeyFilter", + "documentation":"

Optionally filter the list to only Active or Expired API keys.

" } } }, @@ -3924,30 +4218,30 @@ "ListKeysResponseEntry":{ "type":"structure", "required":[ - "CreateTime", - "ExpireTime", "KeyName", + "ExpireTime", "Restrictions", + "CreateTime", "UpdateTime" ], "members":{ - "CreateTime":{ + "KeyName":{ + "shape":"ResourceName", + "documentation":"

The name of the API key resource.

" + }, + "ExpireTime":{ "shape":"Timestamp", - "documentation":"

The timestamp of when the API key was created, in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + "documentation":"

The timestamp for when the API key resource will expire, in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" }, "Description":{ "shape":"ResourceDescription", "documentation":"

The optional description for the API key resource.

" }, - "ExpireTime":{ + "Restrictions":{"shape":"ApiKeyRestrictions"}, + "CreateTime":{ "shape":"Timestamp", - "documentation":"

The timestamp for when the API key resource will expire, in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" - }, - "KeyName":{ - "shape":"ResourceName", - "documentation":"

The name of the API key resource.

" + "documentation":"

The timestamp of when the API key was created, in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" }, - "Restrictions":{"shape":"ApiKeyRestrictions"}, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp of when the API key was last updated, in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" @@ -3995,28 +4289,24 @@ "ListMapsResponseEntry":{ "type":"structure", "required":[ - "CreateTime", - "DataSource", - "Description", "MapName", + "Description", + "DataSource", + "CreateTime", "UpdateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the map resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" - }, - "DataSource":{ - "shape":"String", - "documentation":"

Specifies the data provider for the associated map tiles.

" + "MapName":{ + "shape":"ResourceName", + "documentation":"

The name of the associated map resource.

" }, "Description":{ "shape":"ResourceDescription", "documentation":"

The description for the map resource.

" }, - "MapName":{ - "shape":"ResourceName", - "documentation":"

The name of the associated map resource.

" + "DataSource":{ + "shape":"String", + "documentation":"

Specifies the data provider for the associated map tiles.

" }, "PricingPlan":{ "shape":"PricingPlan", @@ -4024,6 +4314,10 @@ "deprecated":true, "deprecatedMessage":"Deprecated. Always returns RequestBasedUsage." }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the map resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the map resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" @@ -4071,28 +4365,24 @@ "ListPlaceIndexesResponseEntry":{ "type":"structure", "required":[ - "CreateTime", - "DataSource", - "Description", "IndexName", + "Description", + "DataSource", + "CreateTime", "UpdateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the place index resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" - }, - "DataSource":{ - "shape":"String", - "documentation":"

The data provider of geospatial data. Values can be one of the following:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" + "IndexName":{ + "shape":"ResourceName", + "documentation":"

The name of the place index resource.

" }, "Description":{ "shape":"ResourceDescription", "documentation":"

The optional description for the place index resource.

" }, - "IndexName":{ - "shape":"ResourceName", - "documentation":"

The name of the place index resource.

" + "DataSource":{ + "shape":"String", + "documentation":"

The data provider of geospatial data. Values can be one of the following:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" }, "PricingPlan":{ "shape":"PricingPlan", @@ -4100,6 +4390,10 @@ "deprecated":true, "deprecatedMessage":"Deprecated. Always returns RequestBasedUsage." }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the place index resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the place index resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" @@ -4148,9 +4442,9 @@ "type":"structure", "required":[ "CalculatorName", - "CreateTime", - "DataSource", "Description", + "DataSource", + "CreateTime", "UpdateTime" ], "members":{ @@ -4158,24 +4452,24 @@ "shape":"ResourceName", "documentation":"

The name of the route calculator resource.

" }, - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp when the route calculator resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

  • For example, 2020–07-2T12:15:20.000Z+01:00

" + "Description":{ + "shape":"ResourceDescription", + "documentation":"

The optional description of the route calculator resource.

" }, "DataSource":{ "shape":"String", "documentation":"

The data provider of traffic and road network data. Indicates one of the available providers:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

The optional description of the route calculator resource.

" - }, "PricingPlan":{ "shape":"PricingPlan", "documentation":"

Always returns RequestBasedUsage.

", "deprecated":true, "deprecatedMessage":"Deprecated. Always returns RequestBasedUsage." }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the route calculator resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

  • For example, 2020–07-2T12:15:20.000Z+01:00

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp when the route calculator resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

  • For example, 2020–07-2T12:15:20.000Z+01:00

" @@ -4212,6 +4506,12 @@ "type":"structure", "required":["TrackerName"], "members":{ + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

The tracker resource whose associated geofence collections you want to list.

", + "location":"uri", + "locationName":"TrackerName" + }, "MaxResults":{ "shape":"ListTrackerConsumersRequestMaxResultsInteger", "documentation":"

An optional limit for the number of resources returned in a single call.

Default value: 100

" @@ -4219,12 +4519,6 @@ "NextToken":{ "shape":"Token", "documentation":"

The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

Default value: null

" - }, - "TrackerName":{ - "shape":"ResourceName", - "documentation":"

The tracker resource whose associated geofence collections you want to list.

", - "location":"uri", - "locationName":"TrackerName" } } }, @@ -4284,15 +4578,15 @@ "ListTrackersResponseEntry":{ "type":"structure", "required":[ - "CreateTime", - "Description", "TrackerName", + "Description", + "CreateTime", "UpdateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the tracker resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

The name of the tracker resource.

" }, "Description":{ "shape":"ResourceDescription", @@ -4310,9 +4604,9 @@ "deprecated":true, "deprecatedMessage":"Deprecated. Unused." }, - "TrackerName":{ - "shape":"ResourceName", - "documentation":"

The name of the tracker resource.

" + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the tracker resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" }, "UpdateTime":{ "shape":"Timestamp", @@ -4325,21 +4619,151 @@ "type":"list", "member":{"shape":"ListTrackersResponseEntry"} }, + "LteCellDetails":{ + "type":"structure", + "required":[ + "CellId", + "Mcc", + "Mnc" + ], + "members":{ + "CellId":{ + "shape":"EutranCellId", + "documentation":"

The E-UTRAN Cell Identifier (ECI).

" + }, + "Mcc":{ + "shape":"LteCellDetailsMccInteger", + "documentation":"

The Mobile Country Code (MCC).

" + }, + "Mnc":{ + "shape":"LteCellDetailsMncInteger", + "documentation":"

The Mobile Network Code (MNC)

" + }, + "LocalId":{ + "shape":"LteLocalId", + "documentation":"

The LTE local identification information (local ID).

" + }, + "NetworkMeasurements":{ + "shape":"LteCellDetailsNetworkMeasurementsList", + "documentation":"

The network measurements.

" + }, + "TimingAdvance":{ + "shape":"LteCellDetailsTimingAdvanceInteger", + "documentation":"

Timing Advance (TA).

" + }, + "NrCapable":{ + "shape":"Boolean", + "documentation":"

Indicates whether the LTE object is capable of supporting NR (new radio).

" + }, + "Rsrp":{ + "shape":"Rsrp", + "documentation":"

Signal power of the reference signal received, measured in decibel-milliwatts (dBm).

" + }, + "Rsrq":{ + "shape":"Rsrq", + "documentation":"

Signal quality of the reference Signal received, measured in decibels (dB).

" + }, + "Tac":{ + "shape":"LteCellDetailsTacInteger", + "documentation":"

LTE Tracking Area Code (TAC).

" + } + }, + "documentation":"

Details about the Long-Term Evolution (LTE) network.

" + }, + "LteCellDetailsMccInteger":{ + "type":"integer", + "box":true, + "max":999, + "min":200 + }, + "LteCellDetailsMncInteger":{ + "type":"integer", + "box":true, + "max":999, + "min":0 + }, + "LteCellDetailsNetworkMeasurementsList":{ + "type":"list", + "member":{"shape":"LteNetworkMeasurements"}, + "max":32, + "min":1 + }, + "LteCellDetailsTacInteger":{ + "type":"integer", + "box":true, + "max":65535, + "min":0 + }, + "LteCellDetailsTimingAdvanceInteger":{ + "type":"integer", + "box":true, + "max":1282, + "min":0 + }, + "LteLocalId":{ + "type":"structure", + "required":[ + "Earfcn", + "Pci" + ], + "members":{ + "Earfcn":{ + "shape":"Earfcn", + "documentation":"

E-UTRA (Evolved Universal Terrestrial Radio Access) absolute radio frequency channel number (EARFCN).

" + }, + "Pci":{ + "shape":"Pci", + "documentation":"

Physical Cell ID (PCI).

" + } + }, + "documentation":"

LTE local identification information (local ID).

" + }, + "LteNetworkMeasurements":{ + "type":"structure", + "required":[ + "Earfcn", + "CellId", + "Pci" + ], + "members":{ + "Earfcn":{ + "shape":"Earfcn", + "documentation":"

E-UTRA (Evolved Universal Terrestrial Radio Access) absolute radio frequency channel number (EARFCN).

" + }, + "CellId":{ + "shape":"EutranCellId", + "documentation":"

E-UTRAN Cell Identifier (ECI).

" + }, + "Pci":{ + "shape":"Pci", + "documentation":"

Physical Cell ID (PCI).

" + }, + "Rsrp":{ + "shape":"Rsrp", + "documentation":"

Signal power of the reference signal received, measured in dBm (decibel-milliwatts).

" + }, + "Rsrq":{ + "shape":"Rsrq", + "documentation":"

Signal quality of the reference Signal received, measured in decibels (dB).

" + } + }, + "documentation":"

LTE network measurements.

" + }, "MapConfiguration":{ "type":"structure", "required":["Style"], "members":{ - "CustomLayers":{ - "shape":"CustomLayerList", - "documentation":"

Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style. Default is unset.

Currenlty only VectorEsriNavigation supports CustomLayers. For more information, see Custom Layers.

" + "Style":{ + "shape":"MapStyle", + "documentation":"

Specifies the map style selected from an available data provider.

Valid Esri map styles:

  • VectorEsriDarkGrayCanvas – The Esri Dark Gray Canvas map style. A vector basemap with a dark gray, neutral background with minimal colors, labels, and features that's designed to draw attention to your thematic content.

  • RasterEsriImagery – The Esri Imagery map style. A raster basemap that provides one meter or better satellite and aerial imagery in many parts of the world and lower resolution satellite imagery worldwide.

  • VectorEsriLightGrayCanvas – The Esri Light Gray Canvas map style, which provides a detailed vector basemap with a light gray, neutral background style with minimal colors, labels, and features that's designed to draw attention to your thematic content.

  • VectorEsriTopographic – The Esri Light map style, which provides a detailed vector basemap with a classic Esri map style.

  • VectorEsriStreets – The Esri Street Map style, which provides a detailed vector basemap for the world symbolized with a classic Esri street map style. The vector tile layer is similar in content and style to the World Street Map raster map.

  • VectorEsriNavigation – The Esri Navigation map style, which provides a detailed basemap for the world symbolized with a custom navigation map style that's designed for use during the day in mobile devices.

Valid HERE Technologies map styles:

  • VectorHereContrast – The HERE Contrast (Berlin) map style is a high contrast detailed base map of the world that blends 3D and 2D rendering.

    The VectorHereContrast style has been renamed from VectorHereBerlin. VectorHereBerlin has been deprecated, but will continue to work in applications that use it.

  • VectorHereExplore – A default HERE map style containing a neutral, global map and its features including roads, buildings, landmarks, and water features. It also now includes a fully designed map of Japan.

  • VectorHereExploreTruck – A global map containing truck restrictions and attributes (e.g. width / height / HAZMAT) symbolized with highlighted segments and icons on top of HERE Explore to support use cases within transport and logistics.

  • RasterHereExploreSatellite – A global map containing high resolution satellite imagery.

  • HybridHereExploreSatellite – A global map displaying the road network, street names, and city labels over satellite imagery. This style will automatically retrieve both raster and vector tiles, and your charges will be based on total tiles retrieved.

    Hybrid styles use both vector and raster tiles when rendering the map that you see. This means that more tiles are retrieved than when using either vector or raster tiles alone. Your charges will include all tiles retrieved.

Valid GrabMaps map styles:

  • VectorGrabStandardLight – The Grab Standard Light map style provides a basemap with detailed land use coloring, area names, roads, landmarks, and points of interest covering Southeast Asia.

  • VectorGrabStandardDark – The Grab Standard Dark map style provides a dark variation of the standard basemap covering Southeast Asia.

Grab provides maps only for countries in Southeast Asia, and is only available in the Asia Pacific (Singapore) Region (ap-southeast-1). For more information, see GrabMaps countries and area covered.

Valid Open Data map styles:

  • VectorOpenDataStandardLight – The Open Data Standard Light map style provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries.

  • VectorOpenDataStandardDark – Open Data Standard Dark is a dark-themed map style that provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries.

  • VectorOpenDataVisualizationLight – The Open Data Visualization Light map style is a light-themed style with muted colors and fewer features that aids in understanding overlaid data.

  • VectorOpenDataVisualizationDark – The Open Data Visualization Dark map style is a dark-themed style with muted colors and fewer features that aids in understanding overlaid data.

" }, "PoliticalView":{ "shape":"CountryCode3", "documentation":"

Specifies the political view for the style. Leave unset to not use a political view, or, for styles that support specific political views, you can choose a view, such as IND for the Indian view.

Default is unset.

Not all map resources or styles support political view styles. See Political views for more information.

" }, - "Style":{ - "shape":"MapStyle", - "documentation":"

Specifies the map style selected from an available data provider.

Valid Esri map styles:

  • VectorEsriNavigation – The Esri Navigation map style, which provides a detailed basemap for the world symbolized with a custom navigation map style that's designed for use during the day in mobile devices. It also includes a richer set of places, such as shops, services, restaurants, attractions, and other points of interest. Enable the POI layer by setting it in CustomLayers to leverage the additional places data.

  • RasterEsriImagery – The Esri Imagery map style. A raster basemap that provides one meter or better satellite and aerial imagery in many parts of the world and lower resolution satellite imagery worldwide.

  • VectorEsriLightGrayCanvas – The Esri Light Gray Canvas map style, which provides a detailed vector basemap with a light gray, neutral background style with minimal colors, labels, and features that's designed to draw attention to your thematic content.

  • VectorEsriTopographic – The Esri Light map style, which provides a detailed vector basemap with a classic Esri map style.

  • VectorEsriStreets – The Esri Street Map style, which provides a detailed vector basemap for the world symbolized with a classic Esri street map style. The vector tile layer is similar in content and style to the World Street Map raster map.

  • VectorEsriDarkGrayCanvas – The Esri Dark Gray Canvas map style. A vector basemap with a dark gray, neutral background with minimal colors, labels, and features that's designed to draw attention to your thematic content.

Valid HERE Technologies map styles:

  • VectorHereExplore – A default HERE map style containing a neutral, global map and its features including roads, buildings, landmarks, and water features. It also now includes a fully designed map of Japan.

  • RasterHereExploreSatellite – A global map containing high resolution satellite imagery.

  • HybridHereExploreSatellite – A global map displaying the road network, street names, and city labels over satellite imagery. This style will automatically retrieve both raster and vector tiles, and your charges will be based on total tiles retrieved.

    Hybrid styles use both vector and raster tiles when rendering the map that you see. This means that more tiles are retrieved than when using either vector or raster tiles alone. Your charges will include all tiles retrieved.

  • VectorHereContrast – The HERE Contrast (Berlin) map style is a high contrast detailed base map of the world that blends 3D and 2D rendering.

    The VectorHereContrast style has been renamed from VectorHereBerlin. VectorHereBerlin has been deprecated, but will continue to work in applications that use it.

  • VectorHereExploreTruck – A global map containing truck restrictions and attributes (e.g. width / height / HAZMAT) symbolized with highlighted segments and icons on top of HERE Explore to support use cases within transport and logistics.

Valid GrabMaps map styles:

  • VectorGrabStandardLight – The Grab Standard Light map style provides a basemap with detailed land use coloring, area names, roads, landmarks, and points of interest covering Southeast Asia.

  • VectorGrabStandardDark – The Grab Standard Dark map style provides a dark variation of the standard basemap covering Southeast Asia.

Grab provides maps only for countries in Southeast Asia, and is only available in the Asia Pacific (Singapore) Region (ap-southeast-1). For more information, see GrabMaps countries and area covered.

Valid Open Data map styles:

  • VectorOpenDataStandardLight – The Open Data Standard Light map style provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries.

  • VectorOpenDataStandardDark – Open Data Standard Dark is a dark-themed map style that provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries.

  • VectorOpenDataVisualizationLight – The Open Data Visualization Light map style is a light-themed style with muted colors and fewer features that aids in understanding overlaid data.

  • VectorOpenDataVisualizationDark – The Open Data Visualization Dark map style is a dark-themed style with muted colors and fewer features that aids in understanding overlaid data.

" + "CustomLayers":{ + "shape":"CustomLayerList", + "documentation":"

Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style. Default is unset.

Not all map resources or styles support custom layers. See Custom Layers for more information.

" } }, "documentation":"

Specifies the map tile style selected from an available provider.

" @@ -4347,13 +4771,13 @@ "MapConfigurationUpdate":{ "type":"structure", "members":{ - "CustomLayers":{ - "shape":"CustomLayerList", - "documentation":"

Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style. Default is unset.

Currenlty only VectorEsriNavigation supports CustomLayers. For more information, see Custom Layers.

" - }, "PoliticalView":{ "shape":"CountryCode3OrEmpty", "documentation":"

Specifies the political view for the style. Set to an empty string to not use a political view, or, for styles that support specific political views, you can choose a view, such as IND for the Indian view.

Not all map resources or styles support political view styles. See Political views for more information.

" + }, + "CustomLayers":{ + "shape":"CustomLayerList", + "documentation":"

Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style. Default is unset.

Not all map resources or styles support custom layers. See Custom Layers for more information.

" } }, "documentation":"

Specifies the political view for the style.

" @@ -4362,7 +4786,11 @@ "type":"string", "max":100, "min":1, - "pattern":"^[-._\\w]+$" + "pattern":"[-._\\w]+" + }, + "NearestDistance":{ + "type":"double", + "min":0 }, "OptimizationMode":{ "type":"string", @@ -4371,74 +4799,79 @@ "ShortestRoute" ] }, + "Pci":{ + "type":"integer", + "max":503, + "min":0 + }, "Place":{ "type":"structure", "required":["Geometry"], "members":{ + "Label":{ + "shape":"String", + "documentation":"

The full name and address of the point of interest such as a city, region, or country. For example, 123 Any Street, Any Town, USA.

" + }, + "Geometry":{"shape":"PlaceGeometry"}, "AddressNumber":{ "shape":"String", "documentation":"

The numerical portion of an address, such as a building number.

" }, - "Categories":{ - "shape":"PlaceCategoryList", - "documentation":"

The Amazon Location categories that describe this Place.

For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

" - }, - "Country":{ + "Street":{ "shape":"String", - "documentation":"

A country/region specified using ISO 3166 3-digit country/region code. For example, CAN.

" - }, - "Geometry":{"shape":"PlaceGeometry"}, - "Interpolated":{ - "shape":"Boolean", - "documentation":"

True if the result is interpolated from other known places.

False if the Place is a known place.

Not returned when the partner does not provide the information.

For example, returns False for an address location that is found in the partner data, but returns True if an address does not exist in the partner data and its location is calculated by interpolating between other known addresses.

" + "documentation":"

The name for a street or a road to identify a location. For example, Main Street.

" }, - "Label":{ + "Neighborhood":{ "shape":"String", - "documentation":"

The full name and address of the point of interest such as a city, region, or country. For example, 123 Any Street, Any Town, USA.

" + "documentation":"

The name of a community district. For example, Downtown.

" }, "Municipality":{ "shape":"String", "documentation":"

A name for a local area, such as a city or town name. For example, Toronto.

" }, - "Neighborhood":{ - "shape":"String", - "documentation":"

The name of a community district. For example, Downtown.

" - }, - "PostalCode":{ + "SubRegion":{ "shape":"String", - "documentation":"

A group of numbers and letters in a country-specific format, which accompanies the address for the purpose of identifying a location.

" + "documentation":"

A county, or an area that's part of a larger region. For example, Metro Vancouver.

" }, "Region":{ "shape":"String", "documentation":"

A name for an area or geographical division, such as a province or state name. For example, British Columbia.

" }, - "Street":{ - "shape":"String", - "documentation":"

The name for a street or a road to identify a location. For example, Main Street.

" - }, - "SubMunicipality":{ + "Country":{ "shape":"String", - "documentation":"

An area that's part of a larger municipality. For example, Blissville is a submunicipality in the Queen County in New York.

This property is only returned for a place index that uses Esri as a data provider. The property is represented as a district.

For more information about data providers, see Amazon Location Service data providers.

" + "documentation":"

A country/region specified using ISO 3166 3-digit country/region code. For example, CAN.

" }, - "SubRegion":{ + "PostalCode":{ "shape":"String", - "documentation":"

A county, or an area that's part of a larger region. For example, Metro Vancouver.

" + "documentation":"

A group of numbers and letters in a country-specific format, which accompanies the address for the purpose of identifying a location.

" }, - "SupplementalCategories":{ - "shape":"PlaceSupplementalCategoryList", - "documentation":"

Categories from the data provider that describe the Place that are not mapped to any Amazon Location categories.

" + "Interpolated":{ + "shape":"Boolean", + "documentation":"

True if the result is interpolated from other known places.

False if the Place is a known place.

Not returned when the partner does not provide the information.

For example, returns False for an address location that is found in the partner data, but returns True if an address does not exist in the partner data and its location is calculated by interpolating between other known addresses.

" }, "TimeZone":{ "shape":"TimeZone", "documentation":"

The time zone in which the Place is located. Returned only when using HERE or Grab as the selected partner.

" }, + "UnitType":{ + "shape":"String", + "documentation":"

For addresses with a UnitNumber, the type of unit. For example, Apartment.

Returned only for a place index that uses Esri as a data provider.

" + }, "UnitNumber":{ "shape":"String", - "documentation":"

For addresses with multiple units, the unit identifier. Can include numbers and letters, for example 3B or Unit 123.

This property is returned only for a place index that uses Esri or Grab as a data provider. It is not returned for SearchPlaceIndexForPosition.

" + "documentation":"

For addresses with multiple units, the unit identifier. Can include numbers and letters, for example 3B or Unit 123.

Returned only for a place index that uses Esri or Grab as a data provider. Is not returned for SearchPlaceIndexForPosition.

" }, - "UnitType":{ + "Categories":{ + "shape":"PlaceCategoryList", + "documentation":"

The Amazon Location categories that describe this Place.

For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

" + }, + "SupplementalCategories":{ + "shape":"PlaceSupplementalCategoryList", + "documentation":"

Categories from the data provider that describe the Place that are not mapped to any Amazon Location categories.

" + }, + "SubMunicipality":{ "shape":"String", - "documentation":"

For addresses with a UnitNumber, the type of unit. For example, Apartment.

This property is returned only for a place index that uses Esri as a data provider.

" + "documentation":"

An area that's part of a larger municipality. For example, Blissville is a submunicipality in the Queen County in New York.

This property supported by Esri and OpenData. The Esri property is district, and the OpenData property is borough.

" } }, "documentation":"

Contains details about addresses or points of interest that match the search criteria.

Not all details are included with all responses. Some details may only be returned by specific data partners.

" @@ -4511,7 +4944,7 @@ "PositionalAccuracyHorizontalDouble":{ "type":"double", "box":true, - "max":10000, + "max":10000000, "min":0 }, "PricingPlan":{ @@ -4560,32 +4993,32 @@ "location":"uri", "locationName":"GeofenceId" }, + "Geometry":{ + "shape":"GeofenceGeometry", + "documentation":"

Contains the details to specify the position of the geofence. Can be a polygon, a circle or a polygon encoded in Geobuf format. Including multiple selections will return a validation error.

The geofence polygon format supports a maximum of 1,000 vertices. The Geofence Geobuf format supports a maximum of 100,000 vertices.

" + }, "GeofenceProperties":{ "shape":"PropertyMap", "documentation":"

Associates one of more properties with the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.

Format: \"key\" : \"value\"

" - }, - "Geometry":{ - "shape":"GeofenceGeometry", - "documentation":"

Contains the details to specify the position of the geofence. Can be either a polygon or a circle. Including both will return a validation error.

Each geofence polygon can have a maximum of 1,000 vertices.

" } } }, "PutGeofenceResponse":{ "type":"structure", "required":[ - "CreateTime", "GeofenceId", + "CreateTime", "UpdateTime" ], "members":{ - "CreateTime":{ - "shape":"Timestamp", - "documentation":"

The timestamp for when the geofence was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" - }, "GeofenceId":{ "shape":"Id", "documentation":"

The geofence identifier entered in the request.

" }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the geofence was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the geofence was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" @@ -4596,7 +5029,7 @@ "type":"string", "max":253, "min":0, - "pattern":"^([$\\-._+!*\\x{60}(),;/?:@=&\\w]|%([0-9a-fA-F?]{2}|[0-9a-fA-F?]?[*]))+$" + "pattern":"([$\\-._+!*\\x{60}(),;/?:@=&\\w]|%([0-9a-fA-F?]{2}|[0-9a-fA-F?]?[*]))+" }, "ResourceDescription":{ "type":"string", @@ -4607,7 +5040,7 @@ "type":"string", "max":100, "min":1, - "pattern":"^[-._\\w]+$" + "pattern":"[-._\\w]+" }, "ResourceNotFoundException":{ "type":"structure", @@ -4687,21 +5120,33 @@ "type":"list", "member":{"shape":"RouteMatrixEntry"} }, + "Rsrp":{ + "type":"integer", + "box":true, + "max":-44, + "min":-140 + }, + "Rsrq":{ + "type":"float", + "box":true, + "max":-3, + "min":-19.5 + }, "SearchForPositionResult":{ "type":"structure", "required":[ - "Distance", - "Place" + "Place", + "Distance" ], "members":{ - "Distance":{ - "shape":"SearchForPositionResultDistanceDouble", - "documentation":"

The distance in meters of a great-circle arc between the query position and the result.

A great-circle arc is the shortest path on a sphere, in this case the Earth. This returns the shortest distance between two locations.

" - }, "Place":{ "shape":"Place", "documentation":"

Details about the search result, such as its address and position.

" }, + "Distance":{ + "shape":"SearchForPositionResultDistanceDouble", + "documentation":"

The distance in meters of a great-circle arc between the query position and the result.

A great-circle arc is the shortest path on a sphere, in this case the Earth. This returns the shortest distance between two locations.

" + }, "PlaceId":{ "shape":"PlaceId", "documentation":"

The unique identifier of the place. You can use this with the GetPlace operation to find the place again later.

For SearchPlaceIndexForPosition operations, the PlaceId is returned only by place indexes that use HERE or Grab as a data provider.

" @@ -4722,21 +5167,21 @@ "type":"structure", "required":["Text"], "members":{ - "Categories":{ - "shape":"PlaceCategoryList", - "documentation":"

The Amazon Location categories that describe the Place.

For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

" + "Text":{ + "shape":"String", + "documentation":"

The text of the place suggestion, typically formatted as an address string.

" }, "PlaceId":{ "shape":"PlaceId", - "documentation":"

The unique identifier of the Place. You can use this with the GetPlace operation to find the place again later, or to get full information for the Place.

The GetPlace request must use the same PlaceIndex resource as the SearchPlaceIndexForSuggestions that generated the Place ID.

For SearchPlaceIndexForSuggestions operations, the PlaceId is returned by place indexes that use Esri, Grab, or HERE as data providers.

While you can use PlaceID in subsequent requests, PlaceID is not intended to be a permanent identifier and the ID can change between consecutive API calls. Please see the following PlaceID behaviour for each data provider:

  • Esri: Place IDs will change every quarter at a minimum. The typical time period for these changes would be March, June, September, and December. Place IDs might also change between the typical quarterly change but that will be much less frequent.

  • HERE: We recommend that you cache data for no longer than a week to keep your data data fresh. You can assume that less than 1% ID shifts will release over release which is approximately 1 - 2 times per week.

  • Grab: Place IDs can expire or become invalid in the following situations.

    • Data operations: The POI may be removed from Grab POI database by Grab Map Ops based on the ground-truth, such as being closed in the real world, being detected as a duplicate POI, or having incorrect information. Grab will synchronize data to the Waypoint environment on weekly basis.

    • Interpolated POI: Interpolated POI is a temporary POI generated in real time when serving a request, and it will be marked as derived in the place.result_type field in the response. The information of interpolated POIs will be retained for at least 30 days, which means that within 30 days, you are able to obtain POI details by Place ID from Place Details API. After 30 days, the interpolated POIs(both Place ID and details) may expire and inaccessible from the Places Details API.

" + "documentation":"

The unique identifier of the Place. You can use this with the GetPlace operation to find the place again later, or to get full information for the Place.

The GetPlace request must use the same PlaceIndex resource as the SearchPlaceIndexForSuggestions that generated the Place ID.

For SearchPlaceIndexForSuggestions operations, the PlaceId is returned by place indexes that use Esri, Grab, or HERE as data providers.

" + }, + "Categories":{ + "shape":"PlaceCategoryList", + "documentation":"

The Amazon Location categories that describe the Place.

For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

" }, "SupplementalCategories":{ "shape":"PlaceSupplementalCategoryList", "documentation":"

Categories from the data provider that describe the Place that are not mapped to any Amazon Location categories.

" - }, - "Text":{ - "shape":"String", - "documentation":"

The text of the place suggestion, typically formatted as an address string.

" } }, "documentation":"

Contains a place suggestion resulting from a place suggestion query that is run on a place index resource.

" @@ -4747,23 +5192,23 @@ }, "SearchForTextResult":{ "type":"structure", - "required":["Place"], - "members":{ - "Distance":{ - "shape":"SearchForTextResultDistanceDouble", - "documentation":"

The distance in meters of a great-circle arc between the bias position specified and the result. Distance will be returned only if a bias position was specified in the query.

A great-circle arc is the shortest path on a sphere, in this case the Earth. This returns the shortest distance between two locations.

" - }, + "required":["Place"], + "members":{ "Place":{ "shape":"Place", "documentation":"

Details about the search result, such as its address and position.

" }, - "PlaceId":{ - "shape":"PlaceId", - "documentation":"

The unique identifier of the place. You can use this with the GetPlace operation to find the place again later.

For SearchPlaceIndexForText operations, the PlaceId is returned only by place indexes that use HERE or Grab as a data provider.

" + "Distance":{ + "shape":"SearchForTextResultDistanceDouble", + "documentation":"

The distance in meters of a great-circle arc between the bias position specified and the result. Distance will be returned only if a bias position was specified in the query.

A great-circle arc is the shortest path on a sphere, in this case the Earth. This returns the shortest distance between two locations.

" }, "Relevance":{ "shape":"SearchForTextResultRelevanceDouble", "documentation":"

The relative confidence in the match for a result among the results returned. For example, if more fields for an address match (including house number, street, city, country/region, and postal code), the relevance score is closer to 1.

Returned only when the partner selected is Esri or Grab.

" + }, + "PlaceId":{ + "shape":"PlaceId", + "documentation":"

The unique identifier of the place. You can use this with the GetPlace operation to find the place again later.

For SearchPlaceIndexForText operations, the PlaceId is returned only by place indexes that use HERE or Grab as a data provider.

" } }, "documentation":"

Contains a search result from a text search query that is run on a place index resource.

" @@ -4796,50 +5241,58 @@ "location":"uri", "locationName":"IndexName" }, - "Key":{ - "shape":"ApiKey", - "documentation":"

The optional API key to authorize the request.

", - "location":"querystring", - "locationName":"key" - }, - "Language":{ - "shape":"LanguageTag", - "documentation":"

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.

For an example, we'll use the Greek language. You search for a location around Athens, Greece, with the language parameter set to en. The city in the results will most likely be returned as Athens.

If you set the language parameter to el, for Greek, then the city in the results will more likely be returned as Αθήνα.

If the data provider does not have a value for Greek, the result will be in a language that the provider does support.

" + "Position":{ + "shape":"Position", + "documentation":"

Specifies the longitude and latitude of the position to query.

This parameter must contain a pair of numbers. The first number represents the X coordinate, or longitude; the second number represents the Y coordinate, or latitude.

For example, [-123.1174, 49.2847] represents a position with longitude -123.1174 and latitude 49.2847.

" }, "MaxResults":{ "shape":"PlaceIndexSearchResultLimit", "documentation":"

An optional parameter. The maximum number of results returned per request.

Default value: 50

" }, - "Position":{ - "shape":"Position", - "documentation":"

Specifies the longitude and latitude of the position to query.

This parameter must contain a pair of numbers. The first number represents the X coordinate, or longitude; the second number represents the Y coordinate, or latitude.

For example, [-123.1174, 49.2847] represents a position with longitude -123.1174 and latitude 49.2847.

" + "Language":{ + "shape":"LanguageTag", + "documentation":"

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.

For an example, we'll use the Greek language. You search for a location around Athens, Greece, with the language parameter set to en. The city in the results will most likely be returned as Athens.

If you set the language parameter to el, for Greek, then the city in the results will more likely be returned as Αθήνα.

If the data provider does not have a value for Greek, the result will be in a language that the provider does support.

" + }, + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" } } }, "SearchPlaceIndexForPositionResponse":{ "type":"structure", "required":[ - "Results", - "Summary" + "Summary", + "Results" ], "members":{ - "Results":{ - "shape":"SearchForPositionResultList", - "documentation":"

Returns a list of Places closest to the specified position. Each result contains additional information about the Places returned.

" - }, "Summary":{ "shape":"SearchPlaceIndexForPositionSummary", "documentation":"

Contains a summary of the request. Echoes the input values for Position, Language, MaxResults, and the DataSource of the place index.

" + }, + "Results":{ + "shape":"SearchForPositionResultList", + "documentation":"

Returns a list of Places closest to the specified position. Each result contains additional information about the Places returned.

" } } }, "SearchPlaceIndexForPositionSummary":{ "type":"structure", "required":[ - "DataSource", - "Position" + "Position", + "DataSource" ], "members":{ + "Position":{ + "shape":"Position", + "documentation":"

The position specified in the request.

" + }, + "MaxResults":{ + "shape":"PlaceIndexSearchResultLimit", + "documentation":"

Contains the optional result count limit that is specified in the request.

Default value: 50

" + }, "DataSource":{ "shape":"String", "documentation":"

The geospatial data provider attached to the place index resource specified in the request. Values can be one of the following:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" @@ -4847,14 +5300,6 @@ "Language":{ "shape":"LanguageTag", "documentation":"

The preferred language used to return results. Matches the language in the request. The value is a valid BCP 47 language tag, for example, en for English.

" - }, - "MaxResults":{ - "shape":"PlaceIndexSearchResultLimit", - "documentation":"

Contains the optional result count limit that is specified in the request.

Default value: 50

" - }, - "Position":{ - "shape":"Position", - "documentation":"

The position specified in the request.

" } }, "documentation":"

A summary of the request sent by using SearchPlaceIndexForPosition.

" @@ -4866,6 +5311,16 @@ "Text" ], "members":{ + "IndexName":{ + "shape":"ResourceName", + "documentation":"

The name of the place index resource you want to use for the search.

", + "location":"uri", + "locationName":"IndexName" + }, + "Text":{ + "shape":"SearchPlaceIndexForSuggestionsRequestTextString", + "documentation":"

The free-form partial text to use to generate place suggestions. For example, eiffel tow.

" + }, "BiasPosition":{ "shape":"Position", "documentation":"

An optional parameter that indicates a preference for place suggestions that are closer to a specified position.

If provided, this parameter must contain a pair of numbers. The first number represents the X coordinate, or longitude; the second number represents the Y coordinate, or latitude.

For example, [-123.1174, 49.2847] represents the position with longitude -123.1174 and latitude 49.2847.

BiasPosition and FilterBBox are mutually exclusive. Specifying both options results in an error.

" @@ -4874,37 +5329,27 @@ "shape":"BoundingBox", "documentation":"

An optional parameter that limits the search results by returning only suggestions within a specified bounding box.

If provided, this parameter must contain a total of four consecutive numbers in two pairs. The first pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the southwest corner of the bounding box; the second pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the northeast corner of the bounding box.

For example, [-12.7935, -37.4835, -12.0684, -36.9542] represents a bounding box where the southwest corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542.

FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error.

" }, - "FilterCategories":{ - "shape":"FilterPlaceCategoryList", - "documentation":"

A list of one or more Amazon Location categories to filter the returned places. If you include more than one category, the results will include results that match any of the categories listed.

For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

" - }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

An optional parameter that limits the search results by returning only suggestions within the provided list of countries.

  • Use the ISO 3166 3-digit country code. For example, Australia uses three upper-case characters: AUS.

" }, - "IndexName":{ - "shape":"ResourceName", - "documentation":"

The name of the place index resource you want to use for the search.

", - "location":"uri", - "locationName":"IndexName" - }, - "Key":{ - "shape":"ApiKey", - "documentation":"

The optional API key to authorize the request.

", - "location":"querystring", - "locationName":"key" + "MaxResults":{ + "shape":"SearchPlaceIndexForSuggestionsRequestMaxResultsInteger", + "documentation":"

An optional parameter. The maximum number of results returned per request.

The default: 5

" }, "Language":{ "shape":"LanguageTag", "documentation":"

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.

For an example, we'll use the Greek language. You search for Athens, Gr to get suggestions with the language parameter set to en. The results found will most likely be returned as Athens, Greece.

If you set the language parameter to el, for Greek, then the result found will more likely be returned as Αθήνα, Ελλάδα.

If the data provider does not have a value for Greek, the result will be in a language that the provider does support.

" }, - "MaxResults":{ - "shape":"SearchPlaceIndexForSuggestionsRequestMaxResultsInteger", - "documentation":"

An optional parameter. The maximum number of results returned per request.

The default: 5

" + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

A list of one or more Amazon Location categories to filter the returned places. If you include more than one category, the results will include results that match any of the categories listed.

For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

" }, - "Text":{ - "shape":"SearchPlaceIndexForSuggestionsRequestTextString", - "documentation":"

The free-form partial text to use to generate place suggestions. For example, eiffel tow.

" + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" } } }, @@ -4923,58 +5368,58 @@ "SearchPlaceIndexForSuggestionsResponse":{ "type":"structure", "required":[ - "Results", - "Summary" + "Summary", + "Results" ], "members":{ - "Results":{ - "shape":"SearchForSuggestionsResultList", - "documentation":"

A list of place suggestions that best match the search text.

" - }, "Summary":{ "shape":"SearchPlaceIndexForSuggestionsSummary", "documentation":"

Contains a summary of the request. Echoes the input values for BiasPosition, FilterBBox, FilterCountries, Language, MaxResults, and Text. Also includes the DataSource of the place index.

" + }, + "Results":{ + "shape":"SearchForSuggestionsResultList", + "documentation":"

A list of place suggestions that best match the search text.

" } } }, "SearchPlaceIndexForSuggestionsSummary":{ "type":"structure", "required":[ - "DataSource", - "Text" + "Text", + "DataSource" ], "members":{ + "Text":{ + "shape":"SensitiveString", + "documentation":"

The free-form partial text input specified in the request.

" + }, "BiasPosition":{ "shape":"Position", "documentation":"

Contains the coordinates for the optional bias position specified in the request.

This parameter contains a pair of numbers. The first number represents the X coordinate, or longitude; the second number represents the Y coordinate, or latitude.

For example, [-123.1174, 49.2847] represents the position with longitude -123.1174 and latitude 49.2847.

" }, - "DataSource":{ - "shape":"String", - "documentation":"

The geospatial data provider attached to the place index resource specified in the request. Values can be one of the following:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" - }, "FilterBBox":{ "shape":"BoundingBox", "documentation":"

Contains the coordinates for the optional bounding box specified in the request.

" }, - "FilterCategories":{ - "shape":"FilterPlaceCategoryList", - "documentation":"

The optional category filter specified in the request.

" - }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

Contains the optional country filter specified in the request.

" }, - "Language":{ - "shape":"LanguageTag", - "documentation":"

The preferred language used to return results. Matches the language in the request. The value is a valid BCP 47 language tag, for example, en for English.

" - }, "MaxResults":{ "shape":"Integer", "documentation":"

Contains the optional result count limit specified in the request.

" }, - "Text":{ - "shape":"SensitiveString", - "documentation":"

The free-form partial text input specified in the request.

" + "DataSource":{ + "shape":"String", + "documentation":"

The geospatial data provider attached to the place index resource specified in the request. Values can be one of the following:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" + }, + "Language":{ + "shape":"LanguageTag", + "documentation":"

The preferred language used to return results. Matches the language in the request. The value is a valid BCP 47 language tag, for example, en for English.

" + }, + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

The optional category filter specified in the request.

" } }, "documentation":"

A summary of the request sent by using SearchPlaceIndexForSuggestions.

" @@ -4986,6 +5431,16 @@ "Text" ], "members":{ + "IndexName":{ + "shape":"ResourceName", + "documentation":"

The name of the place index resource you want to use for the search.

", + "location":"uri", + "locationName":"IndexName" + }, + "Text":{ + "shape":"SearchPlaceIndexForTextRequestTextString", + "documentation":"

The address, name, city, or region to be used in the search in free-form text format. For example, 123 Any Street.

" + }, "BiasPosition":{ "shape":"Position", "documentation":"

An optional parameter that indicates a preference for places that are closer to a specified position.

If provided, this parameter must contain a pair of numbers. The first number represents the X coordinate, or longitude; the second number represents the Y coordinate, or latitude.

For example, [-123.1174, 49.2847] represents the position with longitude -123.1174 and latitude 49.2847.

BiasPosition and FilterBBox are mutually exclusive. Specifying both options results in an error.

" @@ -4994,37 +5449,27 @@ "shape":"BoundingBox", "documentation":"

An optional parameter that limits the search results by returning only places that are within the provided bounding box.

If provided, this parameter must contain a total of four consecutive numbers in two pairs. The first pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the southwest corner of the bounding box; the second pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the northeast corner of the bounding box.

For example, [-12.7935, -37.4835, -12.0684, -36.9542] represents a bounding box where the southwest corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542.

FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error.

" }, - "FilterCategories":{ - "shape":"FilterPlaceCategoryList", - "documentation":"

A list of one or more Amazon Location categories to filter the returned places. If you include more than one category, the results will include results that match any of the categories listed.

For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

" - }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

An optional parameter that limits the search results by returning only places that are in a specified list of countries.

  • Valid values include ISO 3166 3-digit country codes. For example, Australia uses three upper-case characters: AUS.

" }, - "IndexName":{ - "shape":"ResourceName", - "documentation":"

The name of the place index resource you want to use for the search.

", - "location":"uri", - "locationName":"IndexName" - }, - "Key":{ - "shape":"ApiKey", - "documentation":"

The optional API key to authorize the request.

", - "location":"querystring", - "locationName":"key" + "MaxResults":{ + "shape":"PlaceIndexSearchResultLimit", + "documentation":"

An optional parameter. The maximum number of results returned per request.

The default: 50

" }, "Language":{ "shape":"LanguageTag", "documentation":"

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.

For an example, we'll use the Greek language. You search for Athens, Greece, with the language parameter set to en. The result found will most likely be returned as Athens.

If you set the language parameter to el, for Greek, then the result found will more likely be returned as Αθήνα.

If the data provider does not have a value for Greek, the result will be in a language that the provider does support.

" }, - "MaxResults":{ - "shape":"PlaceIndexSearchResultLimit", - "documentation":"

An optional parameter. The maximum number of results returned per request.

The default: 50

" + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

A list of one or more Amazon Location categories to filter the returned places. If you include more than one category, the results will include results that match any of the categories listed.

For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

" }, - "Text":{ - "shape":"SearchPlaceIndexForTextRequestTextString", - "documentation":"

The address, name, city, or region to be used in the search in free-form text format. For example, 123 Any Street.

" + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" } } }, @@ -5037,51 +5482,43 @@ "SearchPlaceIndexForTextResponse":{ "type":"structure", "required":[ - "Results", - "Summary" + "Summary", + "Results" ], "members":{ - "Results":{ - "shape":"SearchForTextResultList", - "documentation":"

A list of Places matching the input text. Each result contains additional information about the specific point of interest.

Not all response properties are included with all responses. Some properties may only be returned by specific data partners.

" - }, "Summary":{ "shape":"SearchPlaceIndexForTextSummary", "documentation":"

Contains a summary of the request. Echoes the input values for BiasPosition, FilterBBox, FilterCountries, Language, MaxResults, and Text. Also includes the DataSource of the place index and the bounding box, ResultBBox, which surrounds the search results.

" + }, + "Results":{ + "shape":"SearchForTextResultList", + "documentation":"

A list of Places matching the input text. Each result contains additional information about the specific point of interest.

Not all response properties are included with all responses. Some properties may only be returned by specific data partners.

" } } }, "SearchPlaceIndexForTextSummary":{ "type":"structure", "required":[ - "DataSource", - "Text" + "Text", + "DataSource" ], "members":{ + "Text":{ + "shape":"SensitiveString", + "documentation":"

The search text specified in the request.

" + }, "BiasPosition":{ "shape":"Position", "documentation":"

Contains the coordinates for the optional bias position specified in the request.

This parameter contains a pair of numbers. The first number represents the X coordinate, or longitude; the second number represents the Y coordinate, or latitude.

For example, [-123.1174, 49.2847] represents the position with longitude -123.1174 and latitude 49.2847.

" }, - "DataSource":{ - "shape":"String", - "documentation":"

The geospatial data provider attached to the place index resource specified in the request. Values can be one of the following:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" - }, "FilterBBox":{ "shape":"BoundingBox", "documentation":"

Contains the coordinates for the optional bounding box specified in the request.

" }, - "FilterCategories":{ - "shape":"FilterPlaceCategoryList", - "documentation":"

The optional category filter specified in the request.

" - }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

Contains the optional country filter specified in the request.

" }, - "Language":{ - "shape":"LanguageTag", - "documentation":"

The preferred language used to return results. Matches the language in the request. The value is a valid BCP 47 language tag, for example, en for English.

" - }, "MaxResults":{ "shape":"PlaceIndexSearchResultLimit", "documentation":"

Contains the optional result count limit specified in the request.

" @@ -5090,9 +5527,17 @@ "shape":"BoundingBox", "documentation":"

The bounding box that fully contains all search results.

If you specified the optional FilterBBox parameter in the request, ResultBBox is contained within FilterBBox.

" }, - "Text":{ - "shape":"SensitiveString", - "documentation":"

The search text specified in the request.

" + "DataSource":{ + "shape":"String", + "documentation":"

The geospatial data provider attached to the place index resource specified in the request. Values can be one of the following:

  • Esri

  • Grab

  • Here

For more information about data providers, see Amazon Location Service data providers.

" + }, + "Language":{ + "shape":"LanguageTag", + "documentation":"

The preferred language used to return results. Matches the language in the request. The value is a valid BCP 47 language tag, for example, en for English.

" + }, + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

The optional category filter specified in the request.

" } }, "documentation":"

A summary of the request sent by using SearchPlaceIndexForText.

" @@ -5118,6 +5563,13 @@ }, "exception":true }, + "SpeedUnit":{ + "type":"string", + "enum":[ + "KilometersPerHour", + "MilesPerHour" + ] + }, "Status":{ "type":"string", "enum":[ @@ -5128,31 +5580,31 @@ "Step":{ "type":"structure", "required":[ - "Distance", - "DurationSeconds", + "StartPosition", "EndPosition", - "StartPosition" + "Distance", + "DurationSeconds" ], "members":{ - "Distance":{ - "shape":"StepDistanceDouble", - "documentation":"

The travel distance between the step's StartPosition and EndPosition.

" - }, - "DurationSeconds":{ - "shape":"StepDurationSecondsDouble", - "documentation":"

The estimated travel time, in seconds, from the step's StartPosition to the EndPosition. . The travel mode and departure time that you specify in the request determines the calculated time.

" + "StartPosition":{ + "shape":"Position", + "documentation":"

The starting position of a step. If the position is the first step in the leg, this position is the same as the start position of the leg.

" }, "EndPosition":{ "shape":"Position", "documentation":"

The end position of a step. If the position the last step in the leg, this position is the same as the end position of the leg.

" }, + "Distance":{ + "shape":"StepDistanceDouble", + "documentation":"

The travel distance between the step's StartPosition and EndPosition.

" + }, + "DurationSeconds":{ + "shape":"StepDurationSecondsDouble", + "documentation":"

The estimated travel time, in seconds, from the step's StartPosition to the EndPosition. . The travel mode and departure time that you specify in the request determines the calculated time.

" + }, "GeometryOffset":{ "shape":"StepGeometryOffsetInteger", "documentation":"

Represents the start position, or index, in a sequence of steps within the leg's line string geometry. For example, the index of the first step in a leg geometry is 0.

Included in the response for queries that set IncludeLegGeometry to True.

" - }, - "StartPosition":{ - "shape":"Position", - "documentation":"

The starting position of a step. If the position is the first step in the leg, this position is the same as the start position of the leg.

" } }, "documentation":"

Represents an element of a leg within a route. A step contains instructions for how to move to the next step in the leg.

" @@ -5181,7 +5633,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z+-=._:/]+$" + "pattern":"[a-zA-Z+-=._:/]+" }, "TagKeys":{ "type":"list", @@ -5224,7 +5676,7 @@ "type":"string", "max":256, "min":0, - "pattern":"^[A-Za-z0-9 _=@:.+-/]*$" + "pattern":"[A-Za-z0-9 _=@:.+-/]*" }, "ThrottlingException":{ "type":"structure", @@ -5290,21 +5742,21 @@ "TruckDimensions":{ "type":"structure", "members":{ - "Height":{ - "shape":"TruckDimensionsHeightDouble", - "documentation":"

The height of the truck.

  • For example, 4.5.

For routes calculated with a HERE resource, this value must be between 0 and 50 meters.

" - }, "Length":{ "shape":"TruckDimensionsLengthDouble", "documentation":"

The length of the truck.

  • For example, 15.5.

For routes calculated with a HERE resource, this value must be between 0 and 300 meters.

" }, - "Unit":{ - "shape":"DimensionUnit", - "documentation":"

Specifies the unit of measurement for the truck dimensions.

Default Value: Meters

" + "Height":{ + "shape":"TruckDimensionsHeightDouble", + "documentation":"

The height of the truck.

  • For example, 4.5.

For routes calculated with a HERE resource, this value must be between 0 and 50 meters.

" }, "Width":{ "shape":"TruckDimensionsWidthDouble", "documentation":"

The width of the truck.

  • For example, 4.5.

For routes calculated with a HERE resource, this value must be between 0 and 50 meters.

" + }, + "Unit":{ + "shape":"DimensionUnit", + "documentation":"

Specifies the unit of measurement for the truck dimensions.

Default Value: Meters

" } }, "documentation":"

Contains details about the truck dimensions in the unit of measurement that you specify. Used to filter out roads that can't support or allow the specified dimensions for requests that specify TravelMode as Truck.

" @@ -5379,10 +5831,6 @@ "location":"uri", "locationName":"CollectionName" }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

Updates the description for the geofence collection.

" - }, "PricingPlan":{ "shape":"PricingPlan", "documentation":"

No longer used. If included, the only allowed value is RequestBasedUsage.

", @@ -5394,25 +5842,29 @@ "documentation":"

This parameter is no longer used.

", "deprecated":true, "deprecatedMessage":"Deprecated. No longer allowed." + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

Updates the description for the geofence collection.

" } } }, "UpdateGeofenceCollectionResponse":{ "type":"structure", "required":[ - "CollectionArn", "CollectionName", + "CollectionArn", "UpdateTime" ], "members":{ - "CollectionArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the updated geofence collection. Used to specify a resource across Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollection

" - }, "CollectionName":{ "shape":"ResourceName", "documentation":"

The name of the updated geofence collection.

" }, + "CollectionArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the updated geofence collection. Used to specify a resource across Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollection

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The time when the geofence collection was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" @@ -5423,6 +5875,12 @@ "type":"structure", "required":["KeyName"], "members":{ + "KeyName":{ + "shape":"ResourceName", + "documentation":"

The name of the API key resource to update.

", + "location":"uri", + "locationName":"KeyName" + }, "Description":{ "shape":"ResourceDescription", "documentation":"

Updates the description for the API key resource.

" @@ -5431,20 +5889,14 @@ "shape":"Timestamp", "documentation":"

Updates the timestamp for when the API key resource will expire in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" }, - "ForceUpdate":{ - "shape":"Boolean", - "documentation":"

The boolean flag to be included for updating ExpireTime or Restrictions details.

Must be set to true to update an API key resource that has been used in the past 7 days.

False if force update is not preferred

Default value: False

" - }, - "KeyName":{ - "shape":"ResourceName", - "documentation":"

The name of the API key resource to update.

", - "location":"uri", - "locationName":"KeyName" - }, "NoExpiry":{ "shape":"Boolean", "documentation":"

Whether the API key should expire. Set to true to set the API key to have no expiration time.

" }, + "ForceUpdate":{ + "shape":"Boolean", + "documentation":"

The boolean flag to be included for updating ExpireTime or Restrictions details.

Must be set to true to update an API key resource that has been used in the past 7 days.

False if force update is not preferred

Default value: False

" + }, "Restrictions":{ "shape":"ApiKeyRestrictions", "documentation":"

Updates the API key restrictions for the API key resource.

" @@ -5477,14 +5929,6 @@ "type":"structure", "required":["MapName"], "members":{ - "ConfigurationUpdate":{ - "shape":"MapConfigurationUpdate", - "documentation":"

Updates the parts of the map configuration that can be updated, including the political view.

" - }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

Updates the description for the map resource.

" - }, "MapName":{ "shape":"ResourceName", "documentation":"

The name of the map resource to update.

", @@ -5496,25 +5940,33 @@ "documentation":"

No longer used. If included, the only allowed value is RequestBasedUsage.

", "deprecated":true, "deprecatedMessage":"Deprecated. If included, the only allowed value is RequestBasedUsage." + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

Updates the description for the map resource.

" + }, + "ConfigurationUpdate":{ + "shape":"MapConfigurationUpdate", + "documentation":"

Updates the parts of the map configuration that can be updated, including the political view.

" } } }, "UpdateMapResponse":{ "type":"structure", "required":[ - "MapArn", "MapName", + "MapArn", "UpdateTime" ], "members":{ - "MapArn":{ - "shape":"GeoArn", - "documentation":"

The Amazon Resource Name (ARN) of the updated map resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:map/ExampleMap

" - }, "MapName":{ "shape":"ResourceName", "documentation":"

The name of the updated map resource.

" }, + "MapArn":{ + "shape":"GeoArn", + "documentation":"

The Amazon Resource Name (ARN) of the updated map resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:map/ExampleMap

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the map resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" @@ -5525,14 +5977,6 @@ "type":"structure", "required":["IndexName"], "members":{ - "DataSourceConfiguration":{ - "shape":"DataSourceConfiguration", - "documentation":"

Updates the data storage option for the place index resource.

" - }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

Updates the description for the place index resource.

" - }, "IndexName":{ "shape":"ResourceName", "documentation":"

The name of the place index resource to update.

", @@ -5544,25 +5988,33 @@ "documentation":"

No longer used. If included, the only allowed value is RequestBasedUsage.

", "deprecated":true, "deprecatedMessage":"Deprecated. If included, the only allowed value is RequestBasedUsage." + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

Updates the description for the place index resource.

" + }, + "DataSourceConfiguration":{ + "shape":"DataSourceConfiguration", + "documentation":"

Updates the data storage option for the place index resource.

" } } }, "UpdatePlaceIndexResponse":{ "type":"structure", "required":[ - "IndexArn", "IndexName", + "IndexArn", "UpdateTime" ], "members":{ - "IndexArn":{ - "shape":"GeoArn", - "documentation":"

The Amazon Resource Name (ARN) of the upated place index resource. Used to specify a resource across Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:place- index/ExamplePlaceIndex

" - }, "IndexName":{ "shape":"ResourceName", "documentation":"

The name of the updated place index resource.

" }, + "IndexArn":{ + "shape":"GeoArn", + "documentation":"

The Amazon Resource Name (ARN) of the upated place index resource. Used to specify a resource across Amazon Web Services.

  • Format example: arn:aws:geo:region:account-id:place- index/ExamplePlaceIndex

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the place index resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" @@ -5579,34 +6031,34 @@ "location":"uri", "locationName":"CalculatorName" }, - "Description":{ - "shape":"ResourceDescription", - "documentation":"

Updates the description for the route calculator resource.

" - }, "PricingPlan":{ "shape":"PricingPlan", "documentation":"

No longer used. If included, the only allowed value is RequestBasedUsage.

", "deprecated":true, "deprecatedMessage":"Deprecated. If included, the only allowed value is RequestBasedUsage." + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

Updates the description for the route calculator resource.

" } } }, "UpdateRouteCalculatorResponse":{ "type":"structure", "required":[ - "CalculatorArn", "CalculatorName", + "CalculatorArn", "UpdateTime" ], "members":{ - "CalculatorArn":{ - "shape":"GeoArn", - "documentation":"

The Amazon Resource Name (ARN) of the updated route calculator resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:route- calculator/ExampleCalculator

" - }, "CalculatorName":{ "shape":"ResourceName", "documentation":"

The name of the updated route calculator resource.

" }, + "CalculatorArn":{ + "shape":"GeoArn", + "documentation":"

The Amazon Resource Name (ARN) of the updated route calculator resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:route- calculator/ExampleCalculator

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the route calculator was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" @@ -5617,21 +6069,11 @@ "type":"structure", "required":["TrackerName"], "members":{ - "Description":{ - "shape":"ResourceDescription", - "documentation":"

Updates the description for the tracker resource.

" - }, - "EventBridgeEnabled":{ - "shape":"Boolean", - "documentation":"

Whether to enable position UPDATE events from this tracker to be sent to EventBridge.

You do not need enable this feature to get ENTER and EXIT events for geofences with this tracker. Those events are always sent to EventBridge.

" - }, - "KmsKeyEnableGeospatialQueries":{ - "shape":"Boolean", - "documentation":"

Enables GeospatialQueries for a tracker that uses a Amazon Web Services KMS customer managed key.

This parameter is only used if you are using a KMS customer managed key.

" - }, - "PositionFiltering":{ - "shape":"PositionFiltering", - "documentation":"

Updates the position filtering for the tracker resource.

Valid values:

  • TimeBased - Location updates are evaluated against linked geofence collections, but not every location update is stored. If your update frequency is more often than 30 seconds, only one update per 30 seconds is stored for each unique device ID.

  • DistanceBased - If the device has moved less than 30 m (98.4 ft), location updates are ignored. Location updates within this distance are neither evaluated against linked geofence collections, nor stored. This helps control costs by reducing the number of geofence evaluations and historical device positions to paginate through. Distance-based filtering can also reduce the effects of GPS noise when displaying device trajectories on a map.

  • AccuracyBased - If the device has moved less than the measured accuracy, location updates are ignored. For example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 m, the second update is ignored if the device has moved less than 15 m. Ignored location updates are neither evaluated against linked geofence collections, nor stored. This helps educe the effects of GPS noise when displaying device trajectories on a map, and can help control costs by reducing the number of geofence evaluations.

" + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

The name of the tracker resource to update.

", + "location":"uri", + "locationName":"TrackerName" }, "PricingPlan":{ "shape":"PricingPlan", @@ -5645,49 +6087,58 @@ "deprecated":true, "deprecatedMessage":"Deprecated. No longer allowed." }, - "TrackerName":{ - "shape":"ResourceName", - "documentation":"

The name of the tracker resource to update.

", - "location":"uri", - "locationName":"TrackerName" + "Description":{ + "shape":"ResourceDescription", + "documentation":"

Updates the description for the tracker resource.

" + }, + "PositionFiltering":{ + "shape":"PositionFiltering", + "documentation":"

Updates the position filtering for the tracker resource.

Valid values:

  • TimeBased - Location updates are evaluated against linked geofence collections, but not every location update is stored. If your update frequency is more often than 30 seconds, only one update per 30 seconds is stored for each unique device ID.

  • DistanceBased - If the device has moved less than 30 m (98.4 ft), location updates are ignored. Location updates within this distance are neither evaluated against linked geofence collections, nor stored. This helps control costs by reducing the number of geofence evaluations and historical device positions to paginate through. Distance-based filtering can also reduce the effects of GPS noise when displaying device trajectories on a map.

  • AccuracyBased - If the device has moved less than the measured accuracy, location updates are ignored. For example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 m, the second update is ignored if the device has moved less than 15 m. Ignored location updates are neither evaluated against linked geofence collections, nor stored. This helps educe the effects of GPS noise when displaying device trajectories on a map, and can help control costs by reducing the number of geofence evaluations.

" + }, + "EventBridgeEnabled":{ + "shape":"Boolean", + "documentation":"

Whether to enable position UPDATE events from this tracker to be sent to EventBridge.

You do not need enable this feature to get ENTER and EXIT events for geofences with this tracker. Those events are always sent to EventBridge.

" + }, + "KmsKeyEnableGeospatialQueries":{ + "shape":"Boolean", + "documentation":"

Enables GeospatialQueries for a tracker that uses a Amazon Web Services KMS customer managed key.

This parameter is only used if you are using a KMS customer managed key.

" } } }, "UpdateTrackerResponse":{ "type":"structure", "required":[ - "TrackerArn", "TrackerName", + "TrackerArn", "UpdateTime" ], "members":{ - "TrackerArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the updated tracker resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:tracker/ExampleTracker

" - }, "TrackerName":{ "shape":"ResourceName", "documentation":"

The name of the updated tracker resource.

" }, + "TrackerArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the updated tracker resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:tracker/ExampleTracker

" + }, "UpdateTime":{ "shape":"Timestamp", "documentation":"

The timestamp for when the tracker resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" } } }, + "Uuid":{ + "type":"string", + "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + }, "ValidationException":{ "type":"structure", "required":[ - "FieldList", "Message", - "Reason" + "Reason", + "FieldList" ], "members":{ - "FieldList":{ - "shape":"ValidationExceptionFieldList", - "documentation":"

The field where the invalid entry was detected.

", - "locationName":"fieldList" - }, "Message":{ "shape":"String", "locationName":"message" @@ -5696,6 +6147,11 @@ "shape":"ValidationExceptionReason", "documentation":"

A message with the reason for the validation exception error.

", "locationName":"reason" + }, + "FieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

The field where the invalid entry was detected.

", + "locationName":"fieldList" } }, "documentation":"

The input failed to meet the constraints specified by the AWS service.

", @@ -5708,19 +6164,19 @@ "ValidationExceptionField":{ "type":"structure", "required":[ - "Message", - "Name" + "Name", + "Message" ], "members":{ - "Message":{ - "shape":"String", - "documentation":"

A message with the reason for the validation exception error.

", - "locationName":"message" - }, "Name":{ "shape":"String", "documentation":"

The field name where the invalid entry was detected.

", "locationName":"name" + }, + "Message":{ + "shape":"String", + "documentation":"

A message with the reason for the validation exception error.

", + "locationName":"message" } }, "documentation":"

The input failed to meet the constraints specified by the AWS service in a specified field.

" @@ -5745,6 +6201,95 @@ "Kilograms", "Pounds" ] + }, + "VerifyDevicePositionRequest":{ + "type":"structure", + "required":[ + "TrackerName", + "DeviceState" + ], + "members":{ + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

The name of the tracker resource to be associated with verification request.

", + "location":"uri", + "locationName":"TrackerName" + }, + "DeviceState":{ + "shape":"DeviceState", + "documentation":"

The device's state, including position, IP address, cell signals and Wi-Fi access points.

" + }, + "DistanceUnit":{ + "shape":"DistanceUnit", + "documentation":"

The distance unit for the verification request.

Default Value: Kilometers

" + } + } + }, + "VerifyDevicePositionResponse":{ + "type":"structure", + "required":[ + "InferredState", + "DeviceId", + "SampleTime", + "ReceivedTime", + "DistanceUnit" + ], + "members":{ + "InferredState":{ + "shape":"InferredState", + "documentation":"

The inferred state of the device, given the provided position, IP address, cellular signals, and Wi-Fi- access points.

" + }, + "DeviceId":{ + "shape":"Id", + "documentation":"

The device identifier.

" + }, + "SampleTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp at which the device's position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, + "ReceivedTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the tracker resource received the device position in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + }, + "DistanceUnit":{ + "shape":"DistanceUnit", + "documentation":"

The distance unit for the verification response.

" + } + } + }, + "WiFiAccessPoint":{ + "type":"structure", + "required":[ + "MacAddress", + "Rss" + ], + "members":{ + "MacAddress":{ + "shape":"WiFiAccessPointMacAddressString", + "documentation":"

Medium access control address (Mac).

" + }, + "Rss":{ + "shape":"WiFiAccessPointRssInteger", + "documentation":"

Received signal strength (dBm) of the WLAN measurement data.

" + } + }, + "documentation":"

Wi-Fi access point.

" + }, + "WiFiAccessPointList":{ + "type":"list", + "member":{"shape":"WiFiAccessPoint"} + }, + "WiFiAccessPointMacAddressString":{ + "type":"string", + "max":17, + "min":12, + "pattern":"([0-9A-Fa-f]{2}[:-]?){5}([0-9A-Fa-f]{2})" + }, + "WiFiAccessPointRssInteger":{ + "type":"integer", + "box":true, + "max":0, + "min":-128 } }, "documentation":"

\"Suite of geospatial services including Maps, Places, Routes, Tracking, and Geofencing\"

" diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index 0191b1d28d..ad965a676c 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"logs", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon CloudWatch Logs", "serviceId":"CloudWatch Logs", "signatureVersion":"v4", "targetPrefix":"Logs_20140328", - "uid":"logs-2014-03-28" + "uid":"logs-2014-03-28", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateKmsKey":{ diff --git a/botocore/data/macie2/2020-01-01/paginators-1.json b/botocore/data/macie2/2020-01-01/paginators-1.json index 56d8f702f8..8037ac751e 100644 --- a/botocore/data/macie2/2020-01-01/paginators-1.json +++ b/botocore/data/macie2/2020-01-01/paginators-1.json @@ -95,6 +95,12 @@ "input_token": "nextToken", "output_token": "nextToken", "result_key": "artifacts" + }, + "ListAutomatedDiscoveryAccounts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" } } } diff --git a/botocore/data/macie2/2020-01-01/service-2.json b/botocore/data/macie2/2020-01-01/service-2.json index 88e01cd1c7..6678e53a65 100644 --- a/botocore/data/macie2/2020-01-01/service-2.json +++ b/botocore/data/macie2/2020-01-01/service-2.json @@ -8,7 +8,10 @@ "protocol": "rest-json", "jsonVersion": "1.1", "uid": "macie2-2020-01-01", - "signatureVersion": "v4" + "signatureVersion": "v4", + "auth": [ + "aws.auth#sigv4" + ] }, "operations": { "AcceptInvitation": { @@ -103,6 +106,44 @@ ], "documentation": "

Retrieves information about one or more custom data identifiers.

" }, + "BatchUpdateAutomatedDiscoveryAccounts": { + "name": "BatchUpdateAutomatedDiscoveryAccounts", + "http": { + "method": "PATCH", + "requestUri": "/automated-discovery/accounts", + "responseCode": 200 + }, + "input": { + "shape": "BatchUpdateAutomatedDiscoveryAccountsRequest" + }, + "output": { + "shape": "BatchUpdateAutomatedDiscoveryAccountsResponse", + "documentation": "

The request succeeded. However, the update might have failed for one or more accounts.

" + }, + "errors": [ + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ValidationException", + "documentation": "

The request failed because the input doesn't satisfy the constraints specified by the service.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + } + ], + "documentation": "

Changes the status of automated sensitive data discovery for one or more accounts.

" + }, "CreateAllowList": { "name": "CreateAllowList", "http": { @@ -1266,7 +1307,7 @@ "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" } ], - "documentation": "

Retrieves the configuration settings and status of automated sensitive data discovery for an account.

" + "documentation": "

Retrieves the configuration settings and status of automated sensitive data discovery for an organization or standalone account.

" }, "GetBucketStatistics": { "name": "GetBucketStatistics", @@ -2128,6 +2169,44 @@ ], "documentation": "

Retrieves a subset of information about all the allow lists for an account.

" }, + "ListAutomatedDiscoveryAccounts": { + "name": "ListAutomatedDiscoveryAccounts", + "http": { + "method": "GET", + "requestUri": "/automated-discovery/accounts", + "responseCode": 200 + }, + "input": { + "shape": "ListAutomatedDiscoveryAccountsRequest" + }, + "output": { + "shape": "ListAutomatedDiscoveryAccountsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ValidationException", + "documentation": "

The request failed because the input doesn't satisfy the constraints specified by the service.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + } + ], + "documentation": "

Retrieves the status of automated sensitive data discovery for one or more accounts.

" + }, "ListClassificationJobs": { "name": "ListClassificationJobs", "http": { @@ -2390,7 +2469,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves information about the Amazon Macie membership invitations that were received by an account.

" + "documentation": "

Retrieves information about Amazon Macie membership invitations that were received by an account.

" }, "ListManagedDataIdentifiers": { "name": "ListManagedDataIdentifiers", @@ -2406,7 +2485,9 @@ "shape": "ListManagedDataIdentifiersResponse", "documentation": "

The request succeeded.

" }, - "errors": [], + "errors": [ + + ], "documentation": "

Retrieves information about all the managed data identifiers that Amazon Macie currently provides.

" }, "ListMembers": { @@ -2537,7 +2618,7 @@ "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" } ], - "documentation": "

Retrieves information about objects that were selected from an S3 bucket for automated sensitive data discovery.

" + "documentation": "

Retrieves information about objects that Amazon Macie selected from an S3 bucket for automated sensitive data discovery.

" }, "ListResourceProfileDetections": { "name": "ListResourceProfileDetections", @@ -2633,7 +2714,9 @@ "shape": "ListTagsForResourceResponse", "documentation": "

The request succeeded.

" }, - "errors": [], + "errors": [ + + ], "documentation": "

Retrieves the tags (keys and values) that are associated with an Amazon Macie resource.

" }, "PutClassificationExportConfiguration": { @@ -2680,7 +2763,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Creates or updates the configuration settings for storing data classification results.

" + "documentation": "

Adds or updates the configuration settings for storing data classification results.

" }, "PutFindingsPublicationConfiguration": { "name": "PutFindingsPublicationConfiguration", @@ -2788,7 +2871,9 @@ "shape": "TagResourceResponse", "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" }, - "errors": [], + "errors": [ + + ], "documentation": "

Adds or updates one or more tags (keys and values) that are associated with an Amazon Macie resource.

" }, "TestCustomDataIdentifier": { @@ -2835,7 +2920,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Tests a custom data identifier.

" + "documentation": "

Tests criteria for a custom data identifier.

" }, "UntagResource": { "name": "UntagResource", @@ -2851,7 +2936,9 @@ "shape": "UntagResourceResponse", "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" }, - "errors": [], + "errors": [ + + ], "documentation": "

Removes one or more tags (keys and values) from an Amazon Macie resource.

" }, "UpdateAllowList": { @@ -2904,7 +2991,7 @@ }, "output": { "shape": "UpdateAutomatedDiscoveryConfigurationResponse", - "documentation": "

The request succeeded. The status of the automated sensitive data discovery configuration for the account was updated and there isn't any content to include in the body of the response (No Content).

" + "documentation": "

The request succeeded. The status was updated and there isn't any content to include in the body of the response (No Content).

" }, "errors": [ { @@ -2924,7 +3011,7 @@ "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" } ], - "documentation": "

Enables or disables automated sensitive data discovery for an account.

" + "documentation": "

Changes the configuration settings and status of automated sensitive data discovery for an organization or standalone account.

" }, "UpdateClassificationJob": { "name": "UpdateClassificationJob", @@ -3377,7 +3464,8 @@ }, "AcceptInvitationResponse": { "type": "structure", - "members": {} + "members": { + } }, "AccessControlList": { "type": "structure", @@ -3615,9 +3703,90 @@ }, "documentation": "

Provides information about an identity that performed an action on an affected resource by using temporary security credentials. The credentials were obtained using the AssumeRole operation of the Security Token Service (STS) API.

" }, + "AutoEnableMode": { + "type": "string", + "documentation": "

Specifies whether to automatically enable automated sensitive data discovery for accounts that are part of an organization in Amazon Macie. Valid values are:

", + "enum": [ + "ALL", + "NEW", + "NONE" + ] + }, + "AutomatedDiscoveryAccount": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The Amazon Web Services account ID for the account.

" + }, + "status": { + "shape": "AutomatedDiscoveryAccountStatus", + "locationName": "status", + "documentation": "

The current status of automated sensitive data discovery for the account. Possible values are: ENABLED, perform automated sensitive data discovery activities for the account; and, DISABLED, don't perform automated sensitive data discovery activities for the account.

" + } + }, + "documentation": "

Provides information about the status of automated sensitive data discovery for an Amazon Macie account.

" + }, + "AutomatedDiscoveryAccountStatus": { + "type": "string", + "documentation": "

The status of automated sensitive data discovery for an Amazon Macie account. Valid values are:

", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, + "AutomatedDiscoveryAccountUpdate": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The Amazon Web Services account ID for the account.

" + }, + "status": { + "shape": "AutomatedDiscoveryAccountStatus", + "locationName": "status", + "documentation": "

The new status of automated sensitive data discovery for the account. Valid values are: ENABLED, perform automated sensitive data discovery activities for the account; and, DISABLED, don't perform automated sensitive data discovery activities for the account.

" + } + }, + "documentation": "

Changes the status of automated sensitive data discovery for an Amazon Macie account.

" + }, + "AutomatedDiscoveryAccountUpdateError": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The Amazon Web Services account ID for the account that the request applied to.

" + }, + "errorCode": { + "shape": "AutomatedDiscoveryAccountUpdateErrorCode", + "locationName": "errorCode", + "documentation": "

The error code for the error that caused the request to fail for the account (accountId). Possible values are: ACCOUNT_NOT_FOUND, the account doesn’t exist or you're not the Amazon Macie administrator for the account; and, ACCOUNT_PAUSED, Macie isn’t enabled for the account in the current Amazon Web Services Region.

" + } + }, + "documentation": "

Provides information about a request that failed to change the status of automated sensitive data discovery for an Amazon Macie account.

" + }, + "AutomatedDiscoveryAccountUpdateErrorCode": { + "type": "string", + "documentation": "

The error code that indicates why a request failed to change the status of automated sensitive data discovery for an Amazon Macie account. Possible values are:

", + "enum": [ + "ACCOUNT_PAUSED", + "ACCOUNT_NOT_FOUND" + ] + }, + "AutomatedDiscoveryMonitoringStatus": { + "type": "string", + "documentation": "

Specifies whether automated sensitive data discovery is currently configured to analyze objects in an S3 bucket. Possible values are:

", + "enum": [ + "MONITORED", + "NOT_MONITORED" + ] + }, "AutomatedDiscoveryStatus": { "type": "string", - "documentation": "

The status of the automated sensitive data discovery configuration for an Amazon Macie account. Valid values are:

", + "documentation": "

The status of the automated sensitive data discovery configuration for an organization in Amazon Macie or a standalone Macie account. Valid values are:

", "enum": [ "ENABLED", "DISABLED" @@ -3719,6 +3888,26 @@ } } }, + "BatchUpdateAutomatedDiscoveryAccountsRequest": { + "type": "structure", + "members": { + "accounts": { + "shape": "__listOfAutomatedDiscoveryAccountUpdate", + "locationName": "accounts", + "documentation": "

An array of objects, one for each account to change the status of automated sensitive data discovery for. Each object specifies the Amazon Web Services account ID for an account and a new status for that account.

" + } + } + }, + "BatchUpdateAutomatedDiscoveryAccountsResponse": { + "type": "structure", + "members": { + "errors": { + "shape": "__listOfAutomatedDiscoveryAccountUpdateError", + "locationName": "errors", + "documentation": "

An array of objects, one for each account whose status wasn’t changed. Each object identifies the account and explains why the status of automated sensitive data discovery wasn’t changed for the account. This value is null if the request succeeded for all specified accounts.

" + } + } + }, "BlockPublicAccess": { "type": "structure", "members": { @@ -3929,6 +4118,11 @@ "locationName": "allowsUnencryptedObjectUploads", "documentation": "

Specifies whether the bucket policy for the bucket requires server-side encryption of objects when objects are added to the bucket. Possible values are:

  • FALSE - The bucket policy requires server-side encryption of new objects. PutObject requests must include a valid server-side encryption header.

  • TRUE - The bucket doesn't have a bucket policy or it has a bucket policy that doesn't require server-side encryption of new objects. If a bucket policy exists, it doesn't require PutObject requests to include a valid server-side encryption header.

  • UNKNOWN - Amazon Macie can't determine whether the bucket policy requires server-side encryption of new objects.

Valid server-side encryption headers are: x-amz-server-side-encryption with a value of AES256 or aws:kms, and x-amz-server-side-encryption-customer-algorithm with a value of AES256.

" }, + "automatedDiscoveryMonitoringStatus": { + "shape": "AutomatedDiscoveryMonitoringStatus", + "locationName": "automatedDiscoveryMonitoringStatus", + "documentation": "

Specifies whether automated sensitive data discovery is currently configured to analyze objects in the bucket. Possible values are: MONITORED, the bucket is included in analyses; and, NOT_MONITORED, the bucket is excluded from analyses. If automated sensitive data discovery is disabled for your account, this value is NOT_MONITORED.

" + }, "bucketArn": { "shape": "__string", "locationName": "bucketArn", @@ -3967,12 +4161,12 @@ "jobDetails": { "shape": "JobDetails", "locationName": "jobDetails", - "documentation": "

Specifies whether any one-time or recurring classification jobs are configured to analyze data in the bucket, and, if so, the details of the job that ran most recently.

" + "documentation": "

Specifies whether any one-time or recurring classification jobs are configured to analyze objects in the bucket, and, if so, the details of the job that ran most recently.

" }, "lastAutomatedDiscoveryTime": { "shape": "__timestampIso8601", "locationName": "lastAutomatedDiscoveryTime", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed data in the bucket while performing automated sensitive data discovery for your account. This value is null if automated sensitive data discovery is currently disabled for your account.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account.

" }, "lastUpdated": { "shape": "__timestampIso8601", @@ -4007,7 +4201,7 @@ "sensitivityScore": { "shape": "__integer", "locationName": "sensitivityScore", - "documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive). This value is null if automated sensitive data discovery is currently disabled for your account.

" + "documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses.

" }, "serverSideEncryption": { "shape": "BucketServerSideEncryption", @@ -4054,7 +4248,7 @@ }, "BucketMetadataErrorCode": { "type": "string", - "documentation": "

The error code for an error that prevented Amazon Macie from retrieving and processing metadata from Amazon S3 for an S3 bucket and the bucket's objects.

", + "documentation": "

The error code for an error that prevented Amazon Macie from retrieving and processing information about an S3 bucket and the bucket's objects.

", "enum": [ "ACCESS_DENIED" ] @@ -4238,7 +4432,7 @@ "documentation": "

The S3 bucket to store data classification results in, and the encryption settings to use when storing results in that bucket.

" } }, - "documentation": "

Specifies where to store data classification results, and the encryption settings to use when storing results in that location. The location must be an S3 bucket.

" + "documentation": "

Specifies where to store data classification results, and the encryption settings to use when storing results in that location. The location must be an S3 general purpose bucket.

" }, "ClassificationResult": { "type": "structure", @@ -4425,7 +4619,7 @@ "jobType": { "shape": "JobType", "locationName": "jobType", - "documentation": "

The schedule for running the job. Valid values are:

  • ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the scheduleFrequency property.

  • SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this value, use the scheduleFrequency property to define the recurrence pattern for the job.

" + "documentation": "

The schedule for running the job. Valid values are:

  • ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the scheduleFrequency property.

  • SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this value, use the scheduleFrequency property to specify the recurrence pattern for the job.

" }, "managedDataIdentifierIds": { "shape": "__listOf__string", @@ -4435,7 +4629,7 @@ "managedDataIdentifierSelector": { "shape": "ManagedDataIdentifierSelector", "locationName": "managedDataIdentifierSelector", - "documentation": "

The selection type to apply when determining which managed data identifiers the job uses to analyze data. Valid values are:

  • ALL - Use all managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property.

  • EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.

  • INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.

  • NONE - Don't use any managed data identifiers. If you specify this value, specify at least one value for the customDataIdentifierIds property and don't specify any values for the managedDataIdentifierIds property.

  • RECOMMENDED (default) - Use the recommended set of managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property.

If you don't specify a value for this property, the job uses the recommended set of managed data identifiers.

If the job is a recurring job and you specify ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If you don't specify a value for this property or you specify RECOMMENDED for a recurring job, each job run automatically uses all the managed data identifiers that are in the recommended set when the run starts.

For information about individual managed data identifiers or to determine which ones are in the recommended set, see Using managed data identifiers and Recommended managed data identifiers in the Amazon Macie User Guide.

" + "documentation": "

The selection type to apply when determining which managed data identifiers the job uses to analyze data. Valid values are:

  • ALL - Use all managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property.

  • EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.

  • INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.

  • NONE - Don't use any managed data identifiers. If you specify this value, specify at least one value for the customDataIdentifierIds property and don't specify any values for the managedDataIdentifierIds property.

  • RECOMMENDED (default) - Use the recommended set of managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property.

If you don't specify a value for this property, the job uses the recommended set of managed data identifiers.

If the job is a recurring job and you specify ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If you don't specify a value for this property or you specify RECOMMENDED for a recurring job, each job run automatically uses all the managed data identifiers that are in the recommended set when the run starts.

To learn about individual managed data identifiers or determine which ones are in the recommended set, see Using managed data identifiers or Recommended managed data identifiers in the Amazon Macie User Guide.

" }, "name": { "shape": "__string", @@ -4684,7 +4878,8 @@ }, "CreateSampleFindingsResponse": { "type": "structure", - "members": {} + "members": { + } }, "CriteriaBlockForJob": { "type": "structure", @@ -4853,7 +5048,8 @@ }, "DailySchedule": { "type": "structure", - "members": {}, + "members": { + }, "documentation": "

Specifies that a classification job runs once a day, every day. This is an empty object.

" }, "DataIdentifierSeverity": { @@ -4958,7 +5154,8 @@ }, "DeleteAllowListResponse": { "type": "structure", - "members": {} + "members": { + } }, "DeleteCustomDataIdentifierRequest": { "type": "structure", @@ -4976,7 +5173,8 @@ }, "DeleteCustomDataIdentifierResponse": { "type": "structure", - "members": {} + "members": { + } }, "DeleteFindingsFilterRequest": { "type": "structure", @@ -4994,7 +5192,8 @@ }, "DeleteFindingsFilterResponse": { "type": "structure", - "members": {} + "members": { + } }, "DeleteInvitationsRequest": { "type": "structure", @@ -5035,7 +5234,8 @@ }, "DeleteMemberResponse": { "type": "structure", - "members": {} + "members": { + } }, "DescribeBucketsRequest": { "type": "structure", @@ -5097,7 +5297,7 @@ "allowListIds": { "shape": "__listOf__string", "locationName": "allowListIds", - "documentation": "

An array of unique identifiers, one for each allow list that the job uses when it analyzes data.

" + "documentation": "

An array of unique identifiers, one for each allow list that the job is configured to use when it analyzes data.

" }, "clientToken": { "shape": "__string", @@ -5113,7 +5313,7 @@ "customDataIdentifierIds": { "shape": "__listOf__string", "locationName": "customDataIdentifierIds", - "documentation": "

An array of unique identifiers, one for each custom data identifier that the job uses when it analyzes data. This value is null if the job uses only managed data identifiers to analyze data.

" + "documentation": "

An array of unique identifiers, one for each custom data identifier that the job is configured to use when it analyzes data. This value is null if the job is configured to use only managed data identifiers to analyze data.

" }, "description": { "shape": "__string", @@ -5163,7 +5363,7 @@ "managedDataIdentifierSelector": { "shape": "ManagedDataIdentifierSelector", "locationName": "managedDataIdentifierSelector", - "documentation": "

The selection type that determines which managed data identifiers the job uses when it analyzes data. Possible values are:

  • ALL - Use all managed data identifiers.

  • EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.

  • INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.

  • NONE - Don't use any managed data identifiers. Use only custom data identifiers (customDataIdentifierIds).

  • RECOMMENDED (default) - Use the recommended set of managed data identifiers.

If this value is null, the job uses the recommended set of managed data identifiers.

If the job is a recurring job and this value is ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If this value is null or RECOMMENDED for a recurring job, each job run uses all the managed data identifiers that are in the recommended set when the run starts.

For information about individual managed data identifiers or to determine which ones are in the recommended set, see Using managed data identifiers and Recommended managed data identifiers in the Amazon Macie User Guide.

" + "documentation": "

The selection type that determines which managed data identifiers the job uses when it analyzes data. Possible values are:

  • ALL - Use all managed data identifiers.

  • EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.

  • INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.

  • NONE - Don't use any managed data identifiers. Use only custom data identifiers (customDataIdentifierIds).

  • RECOMMENDED (default) - Use the recommended set of managed data identifiers.

If this value is null, the job uses the recommended set of managed data identifiers.

If the job is a recurring job and this value is ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If this value is null or RECOMMENDED for a recurring job, each job run uses all the managed data identifiers that are in the recommended set when the run starts.

To learn about individual managed data identifiers or determine which ones are in the recommended set, see Using managed data identifiers or Recommended managed data identifiers in the Amazon Macie User Guide.

" }, "name": { "shape": "__string", @@ -5193,7 +5393,7 @@ "tags": { "shape": "TagMap", "locationName": "tags", - "documentation": "

A map of key-value pairs that specifies which tags (keys and values) are associated with the classification job.

" + "documentation": "

A map of key-value pairs that specifies which tags (keys and values) are associated with the job.

" }, "userPausedDetails": { "shape": "UserPausedDetails", @@ -5204,7 +5404,8 @@ }, "DescribeOrganizationConfigurationRequest": { "type": "structure", - "members": {} + "members": { + } }, "DescribeOrganizationConfigurationResponse": { "type": "structure", @@ -5269,15 +5470,17 @@ "documentation": "

The type of data identifier that detected the sensitive data. Possible values are: CUSTOM, for a custom data identifier; and, MANAGED, for a managed data identifier.

" } }, - "documentation": "

Provides information about a type of sensitive data that Amazon Macie found in an S3 bucket while performing automated sensitive data discovery for the bucket. The information also specifies the custom data identifier or managed data identifier that detected the data. This information is available only if automated sensitive data discovery is currently enabled for your account.

" + "documentation": "

Provides information about a type of sensitive data that Amazon Macie found in an S3 bucket while performing automated sensitive data discovery for an account. The information also specifies the custom or managed data identifier that detected the data. This information is available only if automated sensitive data discovery has been enabled for the account.

" }, "DisableMacieRequest": { "type": "structure", - "members": {} + "members": { + } }, "DisableMacieResponse": { "type": "structure", - "members": {} + "members": { + } }, "DisableOrganizationAdminAccountRequest": { "type": "structure", @@ -5295,23 +5498,28 @@ }, "DisableOrganizationAdminAccountResponse": { "type": "structure", - "members": {} + "members": { + } }, "DisassociateFromAdministratorAccountRequest": { "type": "structure", - "members": {} + "members": { + } }, "DisassociateFromAdministratorAccountResponse": { "type": "structure", - "members": {} + "members": { + } }, "DisassociateFromMasterAccountRequest": { "type": "structure", - "members": {} + "members": { + } }, "DisassociateFromMasterAccountResponse": { "type": "structure", - "members": {} + "members": { + } }, "DisassociateMemberRequest": { "type": "structure", @@ -5329,7 +5537,8 @@ }, "DisassociateMemberResponse": { "type": "structure", - "members": {} + "members": { + } }, "DomainDetails": { "type": "structure", @@ -5352,7 +5561,8 @@ }, "Empty": { "type": "structure", - "members": {}, + "members": { + }, "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" }, "EnableMacieRequest": { @@ -5378,7 +5588,8 @@ }, "EnableMacieResponse": { "type": "structure", - "members": {} + "members": { + } }, "EnableOrganizationAdminAccountRequest": { "type": "structure", @@ -5401,7 +5612,8 @@ }, "EnableOrganizationAdminAccountResponse": { "type": "structure", - "members": {} + "members": { + } }, "EncryptionType": { "type": "string", @@ -5583,12 +5795,12 @@ "ipAddressDetails": { "shape": "IpAddressDetails", "locationName": "ipAddressDetails", - "documentation": "

The IP address of the device that the entity used to perform the action on the affected resource. This object also provides information such as the owner and geographic location for the IP address.

" + "documentation": "

The IP address and related details about the device that the entity used to perform the action on the affected resource. The details can include information such as the owner and geographic location of the IP address.

" }, "userIdentity": { "shape": "UserIdentity", "locationName": "userIdentity", - "documentation": "

The type and other characteristics of the entity that performed the action on the affected resource.

" + "documentation": "

The type and other characteristics of the entity that performed the action on the affected resource. This value is null if the action was performed by an anonymous (unauthenticated) entity.

" } }, "documentation": "

Provides information about an entity that performed an action that produced a policy finding for a resource.

" @@ -5703,7 +5915,8 @@ }, "GetAdministratorAccountRequest": { "type": "structure", - "members": {} + "members": { + } }, "GetAdministratorAccountResponse": { "type": "structure", @@ -5781,40 +5994,46 @@ }, "GetAutomatedDiscoveryConfigurationRequest": { "type": "structure", - "members": {} + "members": { + } }, "GetAutomatedDiscoveryConfigurationResponse": { "type": "structure", "members": { + "autoEnableOrganizationMembers": { + "shape": "AutoEnableMode", + "locationName": "autoEnableOrganizationMembers", + "documentation": "

Specifies whether automated sensitive data discovery is enabled automatically for accounts in the organization. Possible values are: ALL, enable it for all existing accounts and new member accounts; NEW, enable it only for new member accounts; and, NONE, don't enable it for any accounts.

" + }, "classificationScopeId": { "shape": "ClassificationScopeId", "locationName": "classificationScopeId", - "documentation": "

The unique identifier for the classification scope that's used when performing automated sensitive data discovery for the account. The classification scope specifies S3 buckets to exclude from automated sensitive data discovery.

" + "documentation": "

The unique identifier for the classification scope that's used when performing automated sensitive data discovery. The classification scope specifies S3 buckets to exclude from analyses.

" }, "disabledAt": { "shape": "Timestamp", "locationName": "disabledAt", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was most recently disabled for the account. This value is null if automated sensitive data discovery wasn't enabled and subsequently disabled for the account.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was most recently disabled. This value is null if automated sensitive data discovery is currently enabled.

" }, "firstEnabledAt": { "shape": "Timestamp", "locationName": "firstEnabledAt", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was initially enabled for the account. This value is null if automated sensitive data discovery has never been enabled for the account.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was initially enabled. This value is null if automated sensitive data discovery has never been enabled.

" }, "lastUpdatedAt": { "shape": "Timestamp", "locationName": "lastUpdatedAt", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was most recently enabled or disabled for the account.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the configuration settings or status of automated sensitive data discovery was most recently changed.

" }, "sensitivityInspectionTemplateId": { "shape": "SensitivityInspectionTemplateId", "locationName": "sensitivityInspectionTemplateId", - "documentation": "

The unique identifier for the sensitivity inspection template that's used when performing automated sensitive data discovery for the account. The template specifies which allow lists, custom data identifiers, and managed data identifiers to use when analyzing data.

" + "documentation": "

The unique identifier for the sensitivity inspection template that's used when performing automated sensitive data discovery. The template specifies which allow lists, custom data identifiers, and managed data identifiers to use when analyzing data.

" }, "status": { "shape": "AutomatedDiscoveryStatus", "locationName": "status", - "documentation": "

The current status of the automated sensitive data discovery configuration for the account. Possible values are: ENABLED, use the specified settings to perform automated sensitive data discovery activities for the account; and, DISABLED, don't perform automated sensitive data discovery activities for the account.

" + "documentation": "

The current status of automated sensitive data discovery for the organization or account. Possible values are: ENABLED, use the specified settings to perform automated sensitive data discovery activities; and, DISABLED, don't perform automated sensitive data discovery activities.

" } } }, @@ -5905,7 +6124,8 @@ }, "GetClassificationExportConfigurationRequest": { "type": "structure", - "members": {} + "members": { + } }, "GetClassificationExportConfigurationResponse": { "type": "structure", @@ -6129,7 +6349,8 @@ }, "GetFindingsPublicationConfigurationRequest": { "type": "structure", - "members": {} + "members": { + } }, "GetFindingsPublicationConfigurationResponse": { "type": "structure", @@ -6171,7 +6392,8 @@ }, "GetInvitationsCountRequest": { "type": "structure", - "members": {} + "members": { + } }, "GetInvitationsCountResponse": { "type": "structure", @@ -6185,7 +6407,8 @@ }, "GetMacieSessionRequest": { "type": "structure", - "members": {} + "members": { + } }, "GetMacieSessionResponse": { "type": "structure", @@ -6219,7 +6442,8 @@ }, "GetMasterAccountRequest": { "type": "structure", - "members": {} + "members": { + } }, "GetMasterAccountResponse": { "type": "structure", @@ -6336,7 +6560,8 @@ }, "GetRevealConfigurationRequest": { "type": "structure", - "members": {} + "members": { + } }, "GetRevealConfigurationResponse": { "type": "structure", @@ -6378,7 +6603,7 @@ "reasons": { "shape": "__listOfUnavailabilityReasonCode", "locationName": "reasons", - "documentation": "

Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you\u2019re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that's currently disabled.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can\u2019t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

This value is null if sensitive data can be retrieved for the finding.

" + "documentation": "

Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that's currently disabled.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

This value is null if sensitive data can be retrieved for the finding.

" } } }, @@ -6441,12 +6666,12 @@ "excludes": { "shape": "SensitivityInspectionTemplateExcludes", "locationName": "excludes", - "documentation": "

The managed data identifiers that are explicitly excluded (not used) when analyzing data.

" + "documentation": "

The managed data identifiers that are explicitly excluded (not used) when performing automated sensitive data discovery.

" }, "includes": { "shape": "SensitivityInspectionTemplateIncludes", "locationName": "includes", - "documentation": "

The allow lists, custom data identifiers, and managed data identifiers that are explicitly included (used) when analyzing data.

" + "documentation": "

The allow lists, custom data identifiers, and managed data identifiers that are explicitly included (used) when performing automated sensitive data discovery.

" }, "name": { "shape": "__string", @@ -6764,17 +6989,17 @@ "isDefinedInJob": { "shape": "IsDefinedInJob", "locationName": "isDefinedInJob", - "documentation": "

Specifies whether any one-time or recurring jobs are configured to analyze data in the bucket. Possible values are:

  • TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more jobs and at least one of those jobs has a status other than CANCELLED. Or the bucket matched the bucket criteria (S3BucketCriteriaForJob) for at least one job that previously ran.

  • FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any jobs, all the jobs that explicitly include the bucket in their bucket definitions have a status of CANCELLED, or the bucket didn't match the bucket criteria (S3BucketCriteriaForJob) for any jobs that previously ran.

  • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

" + "documentation": "

Specifies whether any one-time or recurring jobs are configured to analyze objects in the bucket. Possible values are:

  • TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more jobs and at least one of those jobs has a status other than CANCELLED. Or the bucket matched the bucket criteria (S3BucketCriteriaForJob) for at least one job that previously ran.

  • FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any jobs, all the jobs that explicitly include the bucket in their bucket definitions have a status of CANCELLED, or the bucket didn't match the bucket criteria (S3BucketCriteriaForJob) for any jobs that previously ran.

  • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

" }, "isMonitoredByJob": { "shape": "IsMonitoredByJob", "locationName": "isMonitoredByJob", - "documentation": "

Specifies whether any recurring jobs are configured to analyze data in the bucket. Possible values are:

  • TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more recurring jobs or the bucket matches the bucket criteria (S3BucketCriteriaForJob) for one or more recurring jobs. At least one of those jobs has a status other than CANCELLED.

  • FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any recurring jobs, the bucket doesn't match the bucket criteria (S3BucketCriteriaForJob) for any recurring jobs, or all the recurring jobs that are configured to analyze data in the bucket have a status of CANCELLED.

  • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

" + "documentation": "

Specifies whether any recurring jobs are configured to analyze objects in the bucket. Possible values are:

  • TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more recurring jobs or the bucket matches the bucket criteria (S3BucketCriteriaForJob) for one or more recurring jobs. At least one of those jobs has a status other than CANCELLED.

  • FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any recurring jobs, the bucket doesn't match the bucket criteria (S3BucketCriteriaForJob) for any recurring jobs, or all the recurring jobs that are configured to analyze data in the bucket have a status of CANCELLED.

  • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

" }, "lastJobId": { "shape": "__string", "locationName": "lastJobId", - "documentation": "

The unique identifier for the job that ran most recently and is configured to analyze data in the bucket, either the latest run of a recurring job or the only run of a one-time job.

This value is typically null if the value for the isDefinedInJob property is FALSE or UNKNOWN.

" + "documentation": "

The unique identifier for the job that ran most recently and is configured to analyze objects in the bucket, either the latest run of a recurring job or the only run of a one-time job.

This value is typically null if the value for the isDefinedInJob property is FALSE or UNKNOWN.

" }, "lastJobRunTime": { "shape": "__timestampIso8601", @@ -6782,7 +7007,7 @@ "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the job (lastJobId) started. If the job is a recurring job, this value indicates when the most recent run started.

This value is typically null if the value for the isDefinedInJob property is FALSE or UNKNOWN.

" } }, - "documentation": "

Specifies whether any one-time or recurring classification jobs are configured to analyze data in an S3 bucket, and, if so, the details of the job that ran most recently.

" + "documentation": "

Specifies whether any one-time or recurring classification jobs are configured to analyze objects in an S3 bucket, and, if so, the details of the job that ran most recently.

" }, "JobScheduleFrequency": { "type": "structure", @@ -6977,6 +7202,44 @@ } } }, + "ListAutomatedDiscoveryAccountsRequest": { + "type": "structure", + "members": { + "accountIds": { + "shape": "__listOf__string", + "location": "querystring", + "locationName": "accountIds", + "documentation": "

The Amazon Web Services account ID for each account, for as many as 50 accounts. To retrieve the status for multiple accounts, append the accountIds parameter and argument for each account, separated by an ampersand (&). To retrieve the status for all the accounts in an organization, omit this parameter.

" + }, + "maxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of items to include in each page of a paginated response.

" + }, + "nextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

" + } + } + }, + "ListAutomatedDiscoveryAccountsResponse": { + "type": "structure", + "members": { + "items": { + "shape": "__listOfAutomatedDiscoveryAccount", + "locationName": "items", + "documentation": "

An array of objects, one for each account specified in the request. Each object specifies the Amazon Web Services account ID for an account and the current status of automated sensitive data discovery for that account.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + } + } + }, "ListClassificationJobsRequest": { "type": "structure", "members": { @@ -7526,6 +7789,11 @@ "locationName": "accountId", "documentation": "

The unique identifier for the Amazon Web Services account that owns the bucket.

" }, + "automatedDiscoveryMonitoringStatus": { + "shape": "AutomatedDiscoveryMonitoringStatus", + "locationName": "automatedDiscoveryMonitoringStatus", + "documentation": "

Specifies whether automated sensitive data discovery is currently configured to analyze objects in the bucket. Possible values are: MONITORED, the bucket is included in analyses; and, NOT_MONITORED, the bucket is excluded from analyses. If automated sensitive data discovery is disabled for your account, this value is NOT_MONITORED.

" + }, "bucketName": { "shape": "__string", "locationName": "bucketName", @@ -7559,7 +7827,7 @@ "lastAutomatedDiscoveryTime": { "shape": "__timestampIso8601", "locationName": "lastAutomatedDiscoveryTime", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed data in the bucket while performing automated sensitive data discovery for your account. This value is null if automated sensitive data discovery is currently disabled for your account.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account.

" }, "objectCount": { "shape": "__long", @@ -7574,7 +7842,7 @@ "sensitivityScore": { "shape": "__integer", "locationName": "sensitivityScore", - "documentation": "

The current sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive). This value is null if automated sensitive data discovery is currently disabled for your account.

" + "documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses.

" }, "sizeInBytes": { "shape": "__long", @@ -7865,7 +8133,8 @@ }, "PutFindingsPublicationConfigurationResponse": { "type": "structure", - "members": {} + "members": { + } }, "Range": { "type": "structure", @@ -7989,7 +8258,7 @@ "documentation": "

Specifies whether Amazon Macie found sensitive data in the object.

" } }, - "documentation": "

Provides information about an S3 object that Amazon Macie selected for analysis while performing automated sensitive data discovery for an S3 bucket, and the status and results of the analysis. This information is available only if automated sensitive data discovery is currently enabled for your account.

", + "documentation": "

Provides information about an S3 object that Amazon Macie selected for analysis while performing automated sensitive data discovery for an account, and the status and results of the analysis. This information is available only if automated sensitive data discovery has been enabled for the account.

", "required": [ "classificationResultStatus", "arn" @@ -8044,7 +8313,7 @@ "documentation": "

The total number of objects that Amazon Macie wasn't able to analyze in the bucket due to the permissions settings for the objects or the permissions settings for the keys that were used to encrypt the objects.

" } }, - "documentation": "

Provides statistical data for sensitive data discovery metrics that apply to an S3 bucket that Amazon Macie monitors and analyzes for your account. The statistics capture the results of automated sensitive data discovery activities that Macie has performed for the bucket. The data is available only if automated sensitive data discovery is currently enabled for your account.

" + "documentation": "

Provides statistical data for sensitive data discovery metrics that apply to an S3 bucket that Amazon Macie monitors and analyzes for an account, if automated sensitive data discovery has been enabled for the account. The data captures the results of automated sensitive data discovery activities that Macie has performed for the bucket.

" }, "ResourcesAffected": { "type": "structure", @@ -8301,7 +8570,7 @@ "bucketName": { "shape": "__string", "locationName": "bucketName", - "documentation": "

The name of the bucket.

" + "documentation": "

The name of the bucket. This must be the name of an existing general purpose bucket.

" }, "keyPrefix": { "shape": "__string", @@ -8560,7 +8829,7 @@ "values": { "shape": "__listOf__string", "locationName": "values", - "documentation": "

An array that lists one or more values to use in the condition. If you specify multiple values, Amazon Macie uses OR logic to join the values. Valid values for each supported property (key) are:

  • ACCOUNT_ID - A string that represents the unique identifier for the Amazon Web Services account that owns the resource.

  • S3_BUCKET_EFFECTIVE_PERMISSION - A string that represents an enumerated value that Macie defines for the BucketPublicAccess.effectivePermission property of an S3 bucket.

  • S3_BUCKET_NAME - A string that represents the name of an S3 bucket.

  • S3_BUCKET_SHARED_ACCESS - A string that represents an enumerated value that Macie defines for the BucketMetadata.sharedAccess property of an S3 bucket.

Values are case sensitive. Also, Macie doesn't support use of partial values or wildcard characters in values.

" + "documentation": "

An array that lists one or more values to use in the condition. If you specify multiple values, Amazon Macie uses OR logic to join the values. Valid values for each supported property (key) are:

  • ACCOUNT_ID - A string that represents the unique identifier for the Amazon Web Services account that owns the resource.

  • AUTOMATED_DISCOVERY_MONITORING_STATUS - A string that represents an enumerated value that Macie defines for the BucketMetadata.automatedDiscoveryMonitoringStatus property of an S3 bucket.

  • S3_BUCKET_EFFECTIVE_PERMISSION - A string that represents an enumerated value that Macie defines for the BucketPublicAccess.effectivePermission property of an S3 bucket.

  • S3_BUCKET_NAME - A string that represents the name of an S3 bucket.

  • S3_BUCKET_SHARED_ACCESS - A string that represents an enumerated value that Macie defines for the BucketMetadata.sharedAccess property of an S3 bucket.

Values are case sensitive. Also, Macie doesn't support use of partial values or wildcard characters in values.

" } }, "documentation": "

Specifies a property-based filter condition that determines which Amazon Web Services resources are included or excluded from the query results.

" @@ -8572,7 +8841,8 @@ "ACCOUNT_ID", "S3_BUCKET_NAME", "S3_BUCKET_EFFECTIVE_PERMISSION", - "S3_BUCKET_SHARED_ACCESS" + "S3_BUCKET_SHARED_ACCESS", + "AUTOMATED_DISCOVERY_MONITORING_STATUS" ] }, "SearchResourcesSortAttributeName": { @@ -8736,7 +9006,7 @@ "documentation": "

An array of unique identifiers, one for each managed data identifier to exclude. To retrieve a list of valid values, use the ListManagedDataIdentifiers operation.

" } }, - "documentation": "

Specifies managed data identifiers to exclude (not use) when performing automated sensitive data discovery for an Amazon Macie account. For information about the managed data identifiers that Amazon Macie currently provides, see Using managed data identifiers in the Amazon Macie User Guide.

" + "documentation": "

Specifies managed data identifiers to exclude (not use) when performing automated sensitive data discovery. For information about the managed data identifiers that Amazon Macie currently provides, see Using managed data identifiers in the Amazon Macie User Guide.

" }, "SensitivityInspectionTemplateId": { "type": "string", @@ -8761,7 +9031,7 @@ "documentation": "

An array of unique identifiers, one for each managed data identifier to include.

Amazon Macie uses these managed data identifiers in addition to managed data identifiers that are subsequently released and recommended for automated sensitive data discovery. To retrieve a list of valid values for the managed data identifiers that are currently available, use the ListManagedDataIdentifiers operation.

" } }, - "documentation": "

Specifies the allow lists, custom data identifiers, and managed data identifiers to include (use) when performing automated sensitive data discovery for an Amazon Macie account. The configuration must specify at least one custom data identifier or managed data identifier. For information about the managed data identifiers that Amazon Macie currently provides, see Using managed data identifiers in the Amazon Macie User Guide.

" + "documentation": "

Specifies the allow lists, custom data identifiers, and managed data identifiers to include (use) when performing automated sensitive data discovery. The configuration must specify at least one custom data identifier or managed data identifier. For information about the managed data identifiers that Amazon Macie currently provides, see Using managed data identifiers in the Amazon Macie User Guide.

" }, "SensitivityInspectionTemplatesEntry": { "type": "structure", @@ -8777,7 +9047,7 @@ "documentation": "

The name of the sensitivity inspection template: automated-sensitive-data-discovery.

" } }, - "documentation": "

Provides information about the sensitivity inspection template for an Amazon Macie account. Macie uses the template's settings when it performs automated sensitive data discovery for the account.

" + "documentation": "

Provides information about the sensitivity inspection template for an Amazon Macie account.

" }, "ServerSideEncryption": { "type": "structure", @@ -9134,7 +9404,8 @@ }, "TagResourceResponse": { "type": "structure", - "members": {} + "members": { + } }, "TagScopeTerm": { "type": "structure", @@ -9295,7 +9566,7 @@ "message": { "shape": "__string", "locationName": "message", - "documentation": "

The type of error that occurred and prevented Amazon Macie from retrieving occurrences of sensitive data reported by the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you\u2019re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that's currently disabled.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can\u2019t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

" + "documentation": "

The type of error that occurred and prevented Amazon Macie from retrieving occurrences of sensitive data reported by the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that's currently disabled.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

" } }, "documentation": "

Provides information about an error that occurred due to an unprocessable entity.

", @@ -9351,7 +9622,8 @@ }, "UntagResourceResponse": { "type": "structure", - "members": {} + "members": { + } }, "UpdateAllowListRequest": { "type": "structure", @@ -9402,10 +9674,15 @@ "UpdateAutomatedDiscoveryConfigurationRequest": { "type": "structure", "members": { + "autoEnableOrganizationMembers": { + "shape": "AutoEnableMode", + "locationName": "autoEnableOrganizationMembers", + "documentation": "

Specifies whether to automatically enable automated sensitive data discovery for accounts in the organization. Valid values are: ALL (default), enable it for all existing accounts and new member accounts; NEW, enable it only for new member accounts; and, NONE, don't enable it for any accounts.

If you specify NEW or NONE, automated sensitive data discovery continues to be enabled for any existing accounts that it's currently enabled for. To enable or disable it for individual member accounts, specify NEW or NONE, and then enable or disable it for each account by using the BatchUpdateAutomatedDiscoveryAccounts operation.

" + }, "status": { "shape": "AutomatedDiscoveryStatus", "locationName": "status", - "documentation": "

The new status of automated sensitive data discovery for the account. Valid values are: ENABLED, start or resume automated sensitive data discovery activities for the account; and, DISABLED, stop performing automated sensitive data discovery activities for the account.

When you enable automated sensitive data discovery for the first time, Amazon Macie uses default configuration settings to determine which data sources to analyze and which managed data identifiers to use. To change these settings, use the UpdateClassificationScope and UpdateSensitivityInspectionTemplate operations, respectively. If you change the settings and subsequently disable the configuration, Amazon Macie retains your changes.

" + "documentation": "

The new status of automated sensitive data discovery for the organization or account. Valid values are: ENABLED, start or resume all automated sensitive data discovery activities; and, DISABLED, stop performing all automated sensitive data discovery activities.

If you specify DISABLED for an administrator account, you also disable automated sensitive data discovery for all member accounts in the organization.

" } }, "required": [ @@ -9414,7 +9691,8 @@ }, "UpdateAutomatedDiscoveryConfigurationResponse": { "type": "structure", - "members": {} + "members": { + } }, "UpdateClassificationJobRequest": { "type": "structure", @@ -9438,7 +9716,8 @@ }, "UpdateClassificationJobResponse": { "type": "structure", - "members": {} + "members": { + } }, "UpdateClassificationScopeRequest": { "type": "structure", @@ -9461,7 +9740,8 @@ }, "UpdateClassificationScopeResponse": { "type": "structure", - "members": {} + "members": { + } }, "UpdateFindingsFilterRequest": { "type": "structure", @@ -9540,7 +9820,8 @@ }, "UpdateMacieSessionResponse": { "type": "structure", - "members": {} + "members": { + } }, "UpdateMemberSessionRequest": { "type": "structure", @@ -9564,7 +9845,8 @@ }, "UpdateMemberSessionResponse": { "type": "structure", - "members": {} + "members": { + } }, "UpdateOrganizationConfigurationRequest": { "type": "structure", @@ -9572,7 +9854,7 @@ "autoEnable": { "shape": "__boolean", "locationName": "autoEnable", - "documentation": "

Specifies whether to enable Amazon Macie automatically for an account when the account is added to the organization in Organizations.

" + "documentation": "

Specifies whether to enable Amazon Macie automatically for accounts that are added to the organization in Organizations.

" } }, "required": [ @@ -9581,7 +9863,8 @@ }, "UpdateOrganizationConfigurationResponse": { "type": "structure", - "members": {} + "members": { + } }, "UpdateResourceProfileDetectionsRequest": { "type": "structure", @@ -9604,7 +9887,8 @@ }, "UpdateResourceProfileDetectionsResponse": { "type": "structure", - "members": {} + "members": { + } }, "UpdateResourceProfileRequest": { "type": "structure", @@ -9627,7 +9911,8 @@ }, "UpdateResourceProfileResponse": { "type": "structure", - "members": {} + "members": { + } }, "UpdateRetrievalConfiguration": { "type": "structure", @@ -9692,7 +9977,7 @@ "excludes": { "shape": "SensitivityInspectionTemplateExcludes", "locationName": "excludes", - "documentation": "

The managed data identifiers to explicitly exclude (not use) when analyzing data.

To exclude an allow list or custom data identifier that's currently included by the template, update the values for the SensitivityInspectionTemplateIncludes.allowListIds and SensitivityInspectionTemplateIncludes.customDataIdentifierIds properties, respectively.

" + "documentation": "

The managed data identifiers to explicitly exclude (not use) when performing automated sensitive data discovery.

To exclude an allow list or custom data identifier that's currently included by the template, update the values for the SensitivityInspectionTemplateIncludes.allowListIds and SensitivityInspectionTemplateIncludes.customDataIdentifierIds properties, respectively.

" }, "id": { "shape": "__string", @@ -9703,7 +9988,7 @@ "includes": { "shape": "SensitivityInspectionTemplateIncludes", "locationName": "includes", - "documentation": "

The allow lists, custom data identifiers, and managed data identifiers to explicitly include (use) when analyzing data.

" + "documentation": "

The allow lists, custom data identifiers, and managed data identifiers to explicitly include (use) when performing automated sensitive data discovery.

" } }, "required": [ @@ -9712,7 +9997,8 @@ }, "UpdateSensitivityInspectionTemplateResponse": { "type": "structure", - "members": {} + "members": { + } }, "UsageByAccount": { "type": "structure", @@ -9751,7 +10037,7 @@ "automatedDiscoveryFreeTrialStartDate": { "shape": "__timestampIso8601", "locationName": "automatedDiscoveryFreeTrialStartDate", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the free trial of automated sensitive data discovery started for the account. If the account is a member account in an organization, this value is the same as the value for the organization's Amazon Macie administrator account.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the free trial of automated sensitive data discovery started for the account. This value is null if automated sensitive data discovery hasn't been enabled for the account.

" }, "freeTrialStartDate": { "shape": "__timestampIso8601", @@ -10009,6 +10295,24 @@ "shape": "AllowListSummary" } }, + "__listOfAutomatedDiscoveryAccount": { + "type": "list", + "member": { + "shape": "AutomatedDiscoveryAccount" + } + }, + "__listOfAutomatedDiscoveryAccountUpdate": { + "type": "list", + "member": { + "shape": "AutomatedDiscoveryAccountUpdate" + } + }, + "__listOfAutomatedDiscoveryAccountUpdateError": { + "type": "list", + "member": { + "shape": "AutomatedDiscoveryAccountUpdateError" + } + }, "__listOfBatchGetCustomDataIdentifierSummary": { "type": "list", "member": { diff --git a/botocore/data/mailmanager/2023-10-17/endpoint-rule-set-1.json b/botocore/data/mailmanager/2023-10-17/endpoint-rule-set-1.json new file mode 100644 index 0000000000..17751d95d6 --- /dev/null +++ b/botocore/data/mailmanager/2023-10-17/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mail-manager-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mail-manager-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mail-manager.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mail-manager.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/botocore/data/mailmanager/2023-10-17/paginators-1.json b/botocore/data/mailmanager/2023-10-17/paginators-1.json new file mode 100644 index 0000000000..70309f1c2d --- /dev/null +++ b/botocore/data/mailmanager/2023-10-17/paginators-1.json @@ -0,0 +1,58 @@ +{ + "pagination": { + "ListAddonInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "AddonInstances" + }, + "ListAddonSubscriptions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "AddonSubscriptions" + }, + "ListArchiveExports": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "Exports" + }, + "ListArchiveSearches": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "Searches" + }, + "ListArchives": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "Archives" + }, + "ListIngressPoints": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "IngressPoints" + }, + "ListRelays": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "Relays" + }, + "ListRuleSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "RuleSets" + }, + "ListTrafficPolicies": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "TrafficPolicies" + } + } +} diff --git a/botocore/data/mailmanager/2023-10-17/service-2.json b/botocore/data/mailmanager/2023-10-17/service-2.json new file mode 100644 index 0000000000..1991fd06c2 --- /dev/null +++ b/botocore/data/mailmanager/2023-10-17/service-2.json @@ -0,0 +1,4056 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2023-10-17", + "endpointPrefix":"mail-manager", + "jsonVersion":"1.0", + "protocol":"json", + "protocols":["json"], + "serviceFullName":"MailManager", + "serviceId":"MailManager", + "signatureVersion":"v4", + "signingName":"ses", + "targetPrefix":"MailManagerSvc", + "uid":"mailmanager-2023-10-17" + }, + "operations":{ + "CreateAddonInstance":{ + "name":"CreateAddonInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAddonInstanceRequest"}, + "output":{"shape":"CreateAddonInstanceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Creates an Add On instance for the subscription indicated in the request. The resulting Amazon Resource Name (ARN) can be used in a conditional statement for a rule set or traffic policy.

", + "idempotent":true + }, + "CreateAddonSubscription":{ + "name":"CreateAddonSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAddonSubscriptionRequest"}, + "output":{"shape":"CreateAddonSubscriptionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Creates a subscription for an Add On representing the acceptance of its terms of use and additional pricing. The subscription can then be used to create an instance for use in rule sets or traffic policies.

", + "idempotent":true + }, + "CreateArchive":{ + "name":"CreateArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateArchiveRequest"}, + "output":{"shape":"CreateArchiveResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Creates a new email archive resource for storing and retaining emails.

", + "idempotent":true + }, + "CreateIngressPoint":{ + "name":"CreateIngressPoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateIngressPointRequest"}, + "output":{"shape":"CreateIngressPointResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Provision a new ingress endpoint resource.

", + "idempotent":true + }, + "CreateRelay":{ + "name":"CreateRelay", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRelayRequest"}, + "output":{"shape":"CreateRelayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Creates a relay resource which can be used in rules to relay incoming emails to defined relay destinations.

", + "idempotent":true + }, + "CreateRuleSet":{ + "name":"CreateRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRuleSetRequest"}, + "output":{"shape":"CreateRuleSetResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Provision a new rule set.

", + "idempotent":true + }, + "CreateTrafficPolicy":{ + "name":"CreateTrafficPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTrafficPolicyRequest"}, + "output":{"shape":"CreateTrafficPolicyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Provision a new traffic policy resource.

", + "idempotent":true + }, + "DeleteAddonInstance":{ + "name":"DeleteAddonInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAddonInstanceRequest"}, + "output":{"shape":"DeleteAddonInstanceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes an Add On instance.

", + "idempotent":true + }, + "DeleteAddonSubscription":{ + "name":"DeleteAddonSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAddonSubscriptionRequest"}, + "output":{"shape":"DeleteAddonSubscriptionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes an Add On subscription.

", + "idempotent":true + }, + "DeleteArchive":{ + "name":"DeleteArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteArchiveRequest"}, + "output":{"shape":"DeleteArchiveResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Initiates deletion of an email archive. This changes the archive state to pending deletion. In this state, no new emails can be added, and existing archived emails become inaccessible (search, export, download). The archive and all of its contents will be permanently deleted 30 days after entering the pending deletion state, regardless of the configured retention period.

", + "idempotent":true + }, + "DeleteIngressPoint":{ + "name":"DeleteIngressPoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIngressPointRequest"}, + "output":{"shape":"DeleteIngressPointResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Delete an ingress endpoint resource.

", + "idempotent":true + }, + "DeleteRelay":{ + "name":"DeleteRelay", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRelayRequest"}, + "output":{"shape":"DeleteRelayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes an existing relay resource.

", + "idempotent":true + }, + "DeleteRuleSet":{ + "name":"DeleteRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRuleSetRequest"}, + "output":{"shape":"DeleteRuleSetResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Delete a rule set.

", + "idempotent":true + }, + "DeleteTrafficPolicy":{ + "name":"DeleteTrafficPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTrafficPolicyRequest"}, + "output":{"shape":"DeleteTrafficPolicyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Delete a traffic policy resource.

", + "idempotent":true + }, + "GetAddonInstance":{ + "name":"GetAddonInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAddonInstanceRequest"}, + "output":{"shape":"GetAddonInstanceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets detailed information about an Add On instance.

" + }, + "GetAddonSubscription":{ + "name":"GetAddonSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAddonSubscriptionRequest"}, + "output":{"shape":"GetAddonSubscriptionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets detailed information about an Add On subscription.

" + }, + "GetArchive":{ + "name":"GetArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetArchiveRequest"}, + "output":{"shape":"GetArchiveResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves the full details and current state of a specified email archive.

" + }, + "GetArchiveExport":{ + "name":"GetArchiveExport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetArchiveExportRequest"}, + "output":{"shape":"GetArchiveExportResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves the details and current status of a specific email archive export job.

" + }, + "GetArchiveMessage":{ + "name":"GetArchiveMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetArchiveMessageRequest"}, + "output":{"shape":"GetArchiveMessageResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a pre-signed URL that provides temporary download access to the specific email message stored in the archive.

" + }, + "GetArchiveMessageContent":{ + "name":"GetArchiveMessageContent", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetArchiveMessageContentRequest"}, + "output":{"shape":"GetArchiveMessageContentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns the textual content of a specific email message stored in the archive. Attachments are not included.

" + }, + "GetArchiveSearch":{ + "name":"GetArchiveSearch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetArchiveSearchRequest"}, + "output":{"shape":"GetArchiveSearchResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves the details and current status of a specific email archive search job.

" + }, + "GetArchiveSearchResults":{ + "name":"GetArchiveSearchResults", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetArchiveSearchResultsRequest"}, + "output":{"shape":"GetArchiveSearchResultsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns the results of a completed email archive search job.

" + }, + "GetIngressPoint":{ + "name":"GetIngressPoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIngressPointRequest"}, + "output":{"shape":"GetIngressPointResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Fetch ingress endpoint resource attributes.

" + }, + "GetRelay":{ + "name":"GetRelay", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRelayRequest"}, + "output":{"shape":"GetRelayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Fetch the relay resource and it's attributes.

" + }, + "GetRuleSet":{ + "name":"GetRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRuleSetRequest"}, + "output":{"shape":"GetRuleSetResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Fetch attributes of a rule set.

" + }, + "GetTrafficPolicy":{ + "name":"GetTrafficPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTrafficPolicyRequest"}, + "output":{"shape":"GetTrafficPolicyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Fetch attributes of a traffic policy resource.

" + }, + "ListAddonInstances":{ + "name":"ListAddonInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAddonInstancesRequest"}, + "output":{"shape":"ListAddonInstancesResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists all Add On instances in your account.

" + }, + "ListAddonSubscriptions":{ + "name":"ListAddonSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAddonSubscriptionsRequest"}, + "output":{"shape":"ListAddonSubscriptionsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists all Add On subscriptions in your account.

" + }, + "ListArchiveExports":{ + "name":"ListArchiveExports", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListArchiveExportsRequest"}, + "output":{"shape":"ListArchiveExportsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of email archive export jobs.

" + }, + "ListArchiveSearches":{ + "name":"ListArchiveSearches", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListArchiveSearchesRequest"}, + "output":{"shape":"ListArchiveSearchesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of email archive search jobs.

" + }, + "ListArchives":{ + "name":"ListArchives", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListArchivesRequest"}, + "output":{"shape":"ListArchivesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of all email archives in your account.

" + }, + "ListIngressPoints":{ + "name":"ListIngressPoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIngressPointsRequest"}, + "output":{"shape":"ListIngressPointsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

List all ingress endpoint resources.

" + }, + "ListRelays":{ + "name":"ListRelays", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRelaysRequest"}, + "output":{"shape":"ListRelaysResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists all the existing relay resources.

" + }, + "ListRuleSets":{ + "name":"ListRuleSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRuleSetsRequest"}, + "output":{"shape":"ListRuleSetsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

List rule sets for this account.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves the list of tags (keys and values) assigned to the resource.

" + }, + "ListTrafficPolicies":{ + "name":"ListTrafficPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTrafficPoliciesRequest"}, + "output":{"shape":"ListTrafficPoliciesResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

List traffic policy resources.

" + }, + "StartArchiveExport":{ + "name":"StartArchiveExport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartArchiveExportRequest"}, + "output":{"shape":"StartArchiveExportResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Initiates an export of emails from the specified archive.

" + }, + "StartArchiveSearch":{ + "name":"StartArchiveSearch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartArchiveSearchRequest"}, + "output":{"shape":"StartArchiveSearchResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Initiates a search across emails in the specified archive.

" + }, + "StopArchiveExport":{ + "name":"StopArchiveExport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopArchiveExportRequest"}, + "output":{"shape":"StopArchiveExportResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Stops an in-progress export of emails from an archive.

" + }, + "StopArchiveSearch":{ + "name":"StopArchiveSearch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopArchiveSearchRequest"}, + "output":{"shape":"StopArchiveSearchResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Stops an in-progress archive search job.

", + "idempotent":true + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds one or more tags (keys and values) to a specified resource.

", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Remove one or more tags (keys and values) from a specified resource.

", + "idempotent":true + }, + "UpdateArchive":{ + "name":"UpdateArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateArchiveRequest"}, + "output":{"shape":"UpdateArchiveResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates the attributes of an existing email archive.

", + "idempotent":true + }, + "UpdateIngressPoint":{ + "name":"UpdateIngressPoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateIngressPointRequest"}, + "output":{"shape":"UpdateIngressPointResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Update attributes of a provisioned ingress endpoint resource.

", + "idempotent":true + }, + "UpdateRelay":{ + "name":"UpdateRelay", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRelayRequest"}, + "output":{"shape":"UpdateRelayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates the attributes of an existing relay resource.

", + "idempotent":true + }, + "UpdateRuleSet":{ + "name":"UpdateRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRuleSetRequest"}, + "output":{"shape":"UpdateRuleSetResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

>Update attributes of an already provisioned rule set.

", + "idempotent":true + }, + "UpdateTrafficPolicy":{ + "name":"UpdateTrafficPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTrafficPolicyRequest"}, + "output":{"shape":"UpdateTrafficPolicyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Update attributes of an already provisioned traffic policy resource.

", + "idempotent":true + } + }, + "shapes":{ + "AcceptAction":{ + "type":"string", + "enum":[ + "ALLOW", + "DENY" + ] + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Occurs when a user is denied access to a specific resource or action.

", + "exception":true + }, + "ActionFailurePolicy":{ + "type":"string", + "enum":[ + "CONTINUE", + "DROP" + ] + }, + "AddHeaderAction":{ + "type":"structure", + "required":[ + "HeaderName", + "HeaderValue" + ], + "members":{ + "HeaderName":{ + "shape":"HeaderName", + "documentation":"

The name of the header to add to an email. The header must be prefixed with \"X-\". Headers are added regardless of whether the header name pre-existed in the email.

" + }, + "HeaderValue":{ + "shape":"HeaderValue", + "documentation":"

The value of the header to add to the email.

" + } + }, + "documentation":"

The action to add a header to a message. When executed, this action will add the given header to the message.

" + }, + "AddonInstance":{ + "type":"structure", + "members":{ + "AddonInstanceArn":{ + "shape":"AddonInstanceArn", + "documentation":"

The Amazon Resource Name (ARN) of the Add On instance.

" + }, + "AddonInstanceId":{ + "shape":"AddonInstanceId", + "documentation":"

The unique ID of the Add On instance.

" + }, + "AddonName":{ + "shape":"AddonName", + "documentation":"

The name of the Add On for the instance.

" + }, + "AddonSubscriptionId":{ + "shape":"AddonSubscriptionId", + "documentation":"

The subscription ID for the instance.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the Add On instance was created.

" + } + }, + "documentation":"

An Add On instance represents a specific configuration of an Add On.

" + }, + "AddonInstanceArn":{"type":"string"}, + "AddonInstanceId":{ + "type":"string", + "max":67, + "min":4, + "pattern":"^ai-[a-zA-Z0-9]{1,64}$" + }, + "AddonInstances":{ + "type":"list", + "member":{"shape":"AddonInstance"} + }, + "AddonName":{"type":"string"}, + "AddonSubscription":{ + "type":"structure", + "members":{ + "AddonName":{ + "shape":"AddonName", + "documentation":"

The name of the Add On.

" + }, + "AddonSubscriptionArn":{ + "shape":"AddonSubscriptionArn", + "documentation":"

The Amazon Resource Name (ARN) of the Add On subscription.

" + }, + "AddonSubscriptionId":{ + "shape":"AddonSubscriptionId", + "documentation":"

The unique ID of the Add On subscription.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the Add On subscription was created.

" + } + }, + "documentation":"

A subscription for an Add On representing the acceptance of its terms of use and additional pricing.

" + }, + "AddonSubscriptionArn":{"type":"string"}, + "AddonSubscriptionId":{ + "type":"string", + "max":67, + "min":4, + "pattern":"^as-[a-zA-Z0-9]{1,64}$" + }, + "AddonSubscriptions":{ + "type":"list", + "member":{"shape":"AddonSubscription"} + }, + "Analysis":{ + "type":"structure", + "required":[ + "Analyzer", + "ResultField" + ], + "members":{ + "Analyzer":{ + "shape":"AnalyzerArn", + "documentation":"

The Amazon Resource Name (ARN) of an Add On.

" + }, + "ResultField":{ + "shape":"ResultField", + "documentation":"

The returned value from an Add On.

" + } + }, + "documentation":"

The result of an analysis can be used in conditions to trigger actions. Analyses can inspect the email content and report a certain aspect of the email.

" + }, + "AnalyzerArn":{ + "type":"string", + "pattern":"^[a-zA-Z0-9:_/+=,@.#-]+$" + }, + "Archive":{ + "type":"structure", + "required":["ArchiveId"], + "members":{ + "ArchiveId":{ + "shape":"ArchiveIdString", + "documentation":"

The unique identifier of the archive.

" + }, + "ArchiveName":{ + "shape":"ArchiveNameString", + "documentation":"

The unique name assigned to the archive.

" + }, + "ArchiveState":{ + "shape":"ArchiveState", + "documentation":"

The current state of the archive:

  • ACTIVE – The archive is ready and available for use.

  • PENDING_DELETION – The archive has been marked for deletion and will be permanently deleted in 30 days. No further modifications can be made in this state.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the archive was last updated.

" + } + }, + "documentation":"

An archive resource for storing and retaining emails.

" + }, + "ArchiveAction":{ + "type":"structure", + "required":["TargetArchive"], + "members":{ + "ActionFailurePolicy":{ + "shape":"ActionFailurePolicy", + "documentation":"

A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the specified archive has been deleted.

" + }, + "TargetArchive":{ + "shape":"NameOrArn", + "documentation":"

The identifier of the archive to send the email to.

" + } + }, + "documentation":"

The action to archive the email by delivering the email to an Amazon SES archive.

" + }, + "ArchiveArn":{"type":"string"}, + "ArchiveBooleanEmailAttribute":{ + "type":"string", + "enum":["HAS_ATTACHMENTS"] + }, + "ArchiveBooleanExpression":{ + "type":"structure", + "required":[ + "Evaluate", + "Operator" + ], + "members":{ + "Evaluate":{ + "shape":"ArchiveBooleanToEvaluate", + "documentation":"

The email attribute value to evaluate.

" + }, + "Operator":{ + "shape":"ArchiveBooleanOperator", + "documentation":"

The boolean operator to use for evaluation.

" + } + }, + "documentation":"

A boolean expression to evaluate email attribute values.

" + }, + "ArchiveBooleanOperator":{ + "type":"string", + "enum":[ + "IS_TRUE", + "IS_FALSE" + ] + }, + "ArchiveBooleanToEvaluate":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"ArchiveBooleanEmailAttribute", + "documentation":"

The name of the email attribute to evaluate.

" + } + }, + "documentation":"

The attribute to evaluate in a boolean expression.

", + "union":true + }, + "ArchiveFilterCondition":{ + "type":"structure", + "members":{ + "BooleanExpression":{ + "shape":"ArchiveBooleanExpression", + "documentation":"

A boolean expression to evaluate against email attributes.

" + }, + "StringExpression":{ + "shape":"ArchiveStringExpression", + "documentation":"

A string expression to evaluate against email attributes.

" + } + }, + "documentation":"

A filter condition used to include or exclude emails when exporting from or searching an archive.

", + "union":true + }, + "ArchiveFilterConditions":{ + "type":"list", + "member":{"shape":"ArchiveFilterCondition"}, + "max":10, + "min":0 + }, + "ArchiveFilters":{ + "type":"structure", + "members":{ + "Include":{ + "shape":"ArchiveFilterConditions", + "documentation":"

The filter conditions for emails to include.

" + }, + "Unless":{ + "shape":"ArchiveFilterConditions", + "documentation":"

The filter conditions for emails to exclude.

" + } + }, + "documentation":"

A set of filter conditions to include and/or exclude emails.

" + }, + "ArchiveId":{ + "type":"string", + "max":66, + "min":3, + "pattern":"^a-[\\w]{1,64}$" + }, + "ArchiveIdString":{ + "type":"string", + "max":66, + "min":1 + }, + "ArchiveNameString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9_-]*[a-zA-Z0-9]$" + }, + "ArchiveRetention":{ + "type":"structure", + "members":{ + "RetentionPeriod":{ + "shape":"RetentionPeriod", + "documentation":"

The enum value sets the period for retaining emails in an archive.

" + } + }, + "documentation":"

The retention policy for an email archive that specifies how long emails are kept before being automatically deleted.

", + "union":true + }, + "ArchiveState":{ + "type":"string", + "enum":[ + "ACTIVE", + "PENDING_DELETION" + ] + }, + "ArchiveStringEmailAttribute":{ + "type":"string", + "enum":[ + "TO", + "FROM", + "CC", + "SUBJECT" + ] + }, + "ArchiveStringExpression":{ + "type":"structure", + "required":[ + "Evaluate", + "Operator", + "Values" + ], + "members":{ + "Evaluate":{ + "shape":"ArchiveStringToEvaluate", + "documentation":"

The attribute of the email to evaluate.

" + }, + "Operator":{ + "shape":"ArchiveStringOperator", + "documentation":"

The operator to use when evaluating the string values.

" + }, + "Values":{ + "shape":"StringValueList", + "documentation":"

The list of string values to evaluate the email attribute against.

" + } + }, + "documentation":"

A string expression to evaluate an email attribute value against one or more string values.

" + }, + "ArchiveStringOperator":{ + "type":"string", + "enum":["CONTAINS"] + }, + "ArchiveStringToEvaluate":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"ArchiveStringEmailAttribute", + "documentation":"

The name of the email attribute to evaluate.

" + } + }, + "documentation":"

Specifies the email attribute to evaluate in a string expression.

", + "union":true + }, + "ArchivedMessageId":{"type":"string"}, + "ArchivesList":{ + "type":"list", + "member":{"shape":"Archive"} + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request configuration has conflicts. For details, see the accompanying error message.

", + "exception":true + }, + "CreateAddonInstanceRequest":{ + "type":"structure", + "required":["AddonSubscriptionId"], + "members":{ + "AddonSubscriptionId":{ + "shape":"AddonSubscriptionId", + "documentation":"

The unique ID of a previously created subscription that an Add On instance is created for. You can only have one instance per subscription.

" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique token that Amazon SES uses to recognize subsequent retries of the same request.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "CreateAddonInstanceResponse":{ + "type":"structure", + "required":["AddonInstanceId"], + "members":{ + "AddonInstanceId":{ + "shape":"AddonInstanceId", + "documentation":"

The unique ID of the Add On instance created by this API.

" + } + } + }, + "CreateAddonSubscriptionRequest":{ + "type":"structure", + "required":["AddonName"], + "members":{ + "AddonName":{ + "shape":"AddonName", + "documentation":"

The name of the Add On to subscribe to. You can only have one subscription for each Add On name.

" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique token that Amazon SES uses to recognize subsequent retries of the same request.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "CreateAddonSubscriptionResponse":{ + "type":"structure", + "required":["AddonSubscriptionId"], + "members":{ + "AddonSubscriptionId":{ + "shape":"AddonSubscriptionId", + "documentation":"

The unique ID of the Add On subscription created by this API.

" + } + } + }, + "CreateArchiveRequest":{ + "type":"structure", + "required":["ArchiveName"], + "members":{ + "ArchiveName":{ + "shape":"ArchiveNameString", + "documentation":"

A unique name for the new archive.

" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique token Amazon SES uses to recognize retries of this request.

", + "idempotencyToken":true + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key for encrypting emails in the archive.

" + }, + "Retention":{ + "shape":"ArchiveRetention", + "documentation":"

The period for retaining emails in the archive before automatic deletion.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + }, + "documentation":"

The request to create a new email archive.

" + }, + "CreateArchiveResponse":{ + "type":"structure", + "required":["ArchiveId"], + "members":{ + "ArchiveId":{ + "shape":"ArchiveIdString", + "documentation":"

The unique identifier for the newly created archive.

" + } + }, + "documentation":"

The response from creating a new email archive.

" + }, + "CreateIngressPointRequest":{ + "type":"structure", + "required":[ + "IngressPointName", + "RuleSetId", + "TrafficPolicyId", + "Type" + ], + "members":{ + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique token that Amazon SES uses to recognize subsequent retries of the same request.

", + "idempotencyToken":true + }, + "IngressPointConfiguration":{ + "shape":"IngressPointConfiguration", + "documentation":"

If you choose an Authenticated ingress endpoint, you must configure either an SMTP password or a secret ARN.

" + }, + "IngressPointName":{ + "shape":"IngressPointName", + "documentation":"

A user friendly name for an ingress endpoint resource.

" + }, + "RuleSetId":{ + "shape":"RuleSetId", + "documentation":"

The identifier of an existing rule set that you attach to an ingress endpoint resource.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + }, + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "documentation":"

The identifier of an existing traffic policy that you attach to an ingress endpoint resource.

" + }, + "Type":{ + "shape":"IngressPointType", + "documentation":"

The type of the ingress endpoint to create.

" + } + } + }, + "CreateIngressPointResponse":{ + "type":"structure", + "required":["IngressPointId"], + "members":{ + "IngressPointId":{ + "shape":"IngressPointId", + "documentation":"

The unique identifier for a previously created ingress endpoint.

" + } + } + }, + "CreateRelayRequest":{ + "type":"structure", + "required":[ + "Authentication", + "RelayName", + "ServerName", + "ServerPort" + ], + "members":{ + "Authentication":{ + "shape":"RelayAuthentication", + "documentation":"

Authentication for the relay destination server—specify the secretARN where the SMTP credentials are stored.

" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique token that Amazon SES uses to recognize subsequent retries of the same request.

", + "idempotencyToken":true + }, + "RelayName":{ + "shape":"RelayName", + "documentation":"

The unique name of the relay resource.

" + }, + "ServerName":{ + "shape":"RelayServerName", + "documentation":"

The destination relay server address.

" + }, + "ServerPort":{ + "shape":"RelayServerPort", + "documentation":"

The destination relay server port.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "CreateRelayResponse":{ + "type":"structure", + "required":["RelayId"], + "members":{ + "RelayId":{ + "shape":"RelayId", + "documentation":"

A unique identifier of the created relay resource.

" + } + } + }, + "CreateRuleSetRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "Rules" + ], + "members":{ + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique token that Amazon SES uses to recognize subsequent retries of the same request.

", + "idempotencyToken":true + }, + "RuleSetName":{ + "shape":"RuleSetName", + "documentation":"

A user-friendly name for the rule set.

" + }, + "Rules":{ + "shape":"Rules", + "documentation":"

Conditional rules that are evaluated for determining actions on email.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "CreateRuleSetResponse":{ + "type":"structure", + "required":["RuleSetId"], + "members":{ + "RuleSetId":{ + "shape":"RuleSetId", + "documentation":"

The identifier of the created rule set.

" + } + } + }, + "CreateTrafficPolicyRequest":{ + "type":"structure", + "required":[ + "DefaultAction", + "PolicyStatements", + "TrafficPolicyName" + ], + "members":{ + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique token that Amazon SES uses to recognize subsequent retries of the same request.

", + "idempotencyToken":true + }, + "DefaultAction":{ + "shape":"AcceptAction", + "documentation":"

Default action instructs the traffic policy to either Allow or Deny (block) messages that fall outside of (or not addressed by) the conditions of your policy statements

" + }, + "MaxMessageSizeBytes":{ + "shape":"MaxMessageSizeBytes", + "documentation":"

The maximum message size in bytes of email which is allowed in by this traffic policy—anything larger will be blocked.

" + }, + "PolicyStatements":{ + "shape":"PolicyStatementList", + "documentation":"

Conditional statements for filtering email traffic.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + }, + "TrafficPolicyName":{ + "shape":"TrafficPolicyName", + "documentation":"

A user-friendly name for the traffic policy resource.

" + } + } + }, + "CreateTrafficPolicyResponse":{ + "type":"structure", + "required":["TrafficPolicyId"], + "members":{ + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "documentation":"

The identifier of the traffic policy resource.

" + } + } + }, + "DeleteAddonInstanceRequest":{ + "type":"structure", + "required":["AddonInstanceId"], + "members":{ + "AddonInstanceId":{ + "shape":"AddonInstanceId", + "documentation":"

The Add On instance ID to delete.

" + } + } + }, + "DeleteAddonInstanceResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteAddonSubscriptionRequest":{ + "type":"structure", + "required":["AddonSubscriptionId"], + "members":{ + "AddonSubscriptionId":{ + "shape":"AddonSubscriptionId", + "documentation":"

The Add On subscription ID to delete.

" + } + } + }, + "DeleteAddonSubscriptionResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteArchiveRequest":{ + "type":"structure", + "required":["ArchiveId"], + "members":{ + "ArchiveId":{ + "shape":"ArchiveIdString", + "documentation":"

The identifier of the archive to delete.

" + } + }, + "documentation":"

The request to initiate deletion of an email archive.

" + }, + "DeleteArchiveResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

The response indicating if the archive deletion was successfully initiated.

On success, returns an HTTP 200 status code. On failure, returns an error message.

" + }, + "DeleteIngressPointRequest":{ + "type":"structure", + "required":["IngressPointId"], + "members":{ + "IngressPointId":{ + "shape":"IngressPointId", + "documentation":"

The identifier of the ingress endpoint resource that you want to delete.

" + } + } + }, + "DeleteIngressPointResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteRelayRequest":{ + "type":"structure", + "required":["RelayId"], + "members":{ + "RelayId":{ + "shape":"RelayId", + "documentation":"

The unique relay identifier.

" + } + } + }, + "DeleteRelayResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteRuleSetRequest":{ + "type":"structure", + "required":["RuleSetId"], + "members":{ + "RuleSetId":{ + "shape":"RuleSetId", + "documentation":"

The identifier of an existing rule set resource to delete.

" + } + } + }, + "DeleteRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTrafficPolicyRequest":{ + "type":"structure", + "required":["TrafficPolicyId"], + "members":{ + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "documentation":"

The identifier of the traffic policy that you want to delete.

" + } + } + }, + "DeleteTrafficPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeliverToMailboxAction":{ + "type":"structure", + "required":[ + "MailboxArn", + "RoleArn" + ], + "members":{ + "ActionFailurePolicy":{ + "shape":"ActionFailurePolicy", + "documentation":"

A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the mailbox ARN is no longer valid.

" + }, + "MailboxArn":{ + "shape":"NameOrArn", + "documentation":"

The Amazon Resource Name (ARN) of a WorkMail organization to deliver the email to.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role to use to execute this action. The role must have access to the workmail:DeliverToMailbox API.

" + } + }, + "documentation":"

This action to delivers an email to a mailbox.

" + }, + "Double":{ + "type":"double", + "box":true + }, + "DropAction":{ + "type":"structure", + "members":{ + }, + "documentation":"

This action causes processing to stop and the email to be dropped. If the action applies only to certain recipients, only those recipients are dropped, and processing continues for other recipients.

" + }, + "EmailAddress":{ + "type":"string", + "max":254, + "min":0, + "pattern":"^[0-9A-Za-z@+.-]+$", + "sensitive":true + }, + "EmailReceivedHeadersList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ErrorMessage":{"type":"string"}, + "ExportDestinationConfiguration":{ + "type":"structure", + "members":{ + "S3":{ + "shape":"S3ExportDestinationConfiguration", + "documentation":"

Configuration for delivering to an Amazon S3 bucket.

" + } + }, + "documentation":"

The destination configuration for delivering exported email data.

", + "union":true + }, + "ExportId":{ + "type":"string", + "max":64, + "min":1 + }, + "ExportMaxResults":{ + "type":"integer", + "box":true + }, + "ExportState":{ + "type":"string", + "enum":[ + "QUEUED", + "PREPROCESSING", + "PROCESSING", + "COMPLETED", + "FAILED", + "CANCELLED" + ] + }, + "ExportStatus":{ + "type":"structure", + "members":{ + "CompletionTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the export job completed (if finished).

" + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

An error message if the export job failed.

" + }, + "State":{ + "shape":"ExportState", + "documentation":"

The current state of the export job.

" + }, + "SubmissionTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the export job was submitted.

" + } + }, + "documentation":"

The current status of an archive export job.

" + }, + "ExportSummary":{ + "type":"structure", + "members":{ + "ExportId":{ + "shape":"ExportId", + "documentation":"

The unique identifier of the export job.

" + }, + "Status":{ + "shape":"ExportStatus", + "documentation":"

The current status of the export job.

" + } + }, + "documentation":"

Summary statuses of an archive export job.

" + }, + "ExportSummaryList":{ + "type":"list", + "member":{"shape":"ExportSummary"} + }, + "GetAddonInstanceRequest":{ + "type":"structure", + "required":["AddonInstanceId"], + "members":{ + "AddonInstanceId":{ + "shape":"AddonInstanceId", + "documentation":"

The Add On instance ID to retrieve information for.

" + } + } + }, + "GetAddonInstanceResponse":{ + "type":"structure", + "members":{ + "AddonInstanceArn":{ + "shape":"AddonInstanceArn", + "documentation":"

The Amazon Resource Name (ARN) of the Add On instance.

" + }, + "AddonName":{ + "shape":"AddonName", + "documentation":"

The name of the Add On provider associated to the subscription of the instance.

" + }, + "AddonSubscriptionId":{ + "shape":"AddonSubscriptionId", + "documentation":"

The subscription ID associated to the instance.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the Add On instance was created.

" + } + } + }, + "GetAddonSubscriptionRequest":{ + "type":"structure", + "required":["AddonSubscriptionId"], + "members":{ + "AddonSubscriptionId":{ + "shape":"AddonSubscriptionId", + "documentation":"

The Add On subscription ID to retrieve information for.

" + } + } + }, + "GetAddonSubscriptionResponse":{ + "type":"structure", + "members":{ + "AddonName":{ + "shape":"AddonName", + "documentation":"

The name of the Add On for the subscription.

" + }, + "AddonSubscriptionArn":{ + "shape":"AddonSubscriptionArn", + "documentation":"

Amazon Resource Name (ARN) for the subscription.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the Add On subscription was created.

" + } + } + }, + "GetArchiveExportRequest":{ + "type":"structure", + "required":["ExportId"], + "members":{ + "ExportId":{ + "shape":"ExportId", + "documentation":"

The identifier of the export job to get details for.

" + } + }, + "documentation":"

The request to retrieve details of a specific archive export job.

" + }, + "GetArchiveExportResponse":{ + "type":"structure", + "members":{ + "ArchiveId":{ + "shape":"ArchiveId", + "documentation":"

The identifier of the archive the email export was performed from.

" + }, + "ExportDestinationConfiguration":{ + "shape":"ExportDestinationConfiguration", + "documentation":"

Where the exported emails are being delivered.

" + }, + "Filters":{ + "shape":"ArchiveFilters", + "documentation":"

The criteria used to filter emails included in the export.

" + }, + "FromTimestamp":{ + "shape":"Timestamp", + "documentation":"

The start of the timestamp range the exported emails cover.

" + }, + "MaxResults":{ + "shape":"ExportMaxResults", + "documentation":"

The maximum number of email items included in the export.

" + }, + "Status":{ + "shape":"ExportStatus", + "documentation":"

The current status of the export job.

" + }, + "ToTimestamp":{ + "shape":"Timestamp", + "documentation":"

The end of the date range the exported emails cover.

" + } + }, + "documentation":"

The response containing details of the specified archive export job.

" + }, + "GetArchiveMessageContentRequest":{ + "type":"structure", + "required":["ArchivedMessageId"], + "members":{ + "ArchivedMessageId":{ + "shape":"ArchivedMessageId", + "documentation":"

The unique identifier of the archived email message.

" + } + }, + "documentation":"

The request to get the textual content of a specific email message stored in an archive.

" + }, + "GetArchiveMessageContentResponse":{ + "type":"structure", + "members":{ + "Body":{ + "shape":"MessageBody", + "documentation":"

The textual body content of the email message.

" + } + }, + "documentation":"

The response containing the textual content of the requested archived email message.

" + }, + "GetArchiveMessageRequest":{ + "type":"structure", + "required":["ArchivedMessageId"], + "members":{ + "ArchivedMessageId":{ + "shape":"ArchivedMessageId", + "documentation":"

The unique identifier of the archived email message.

" + } + }, + "documentation":"

The request to get details of a specific email message stored in an archive.

" + }, + "GetArchiveMessageResponse":{ + "type":"structure", + "members":{ + "MessageDownloadLink":{ + "shape":"S3PresignedURL", + "documentation":"

A pre-signed URL to temporarily download the full message content.

" + } + }, + "documentation":"

The response containing details about the requested archived email message.

" + }, + "GetArchiveRequest":{ + "type":"structure", + "required":["ArchiveId"], + "members":{ + "ArchiveId":{ + "shape":"ArchiveIdString", + "documentation":"

The identifier of the archive to retrieve.

" + } + }, + "documentation":"

The request to retrieve details of an email archive.

" + }, + "GetArchiveResponse":{ + "type":"structure", + "required":[ + "ArchiveArn", + "ArchiveId", + "ArchiveName", + "ArchiveState", + "Retention" + ], + "members":{ + "ArchiveArn":{ + "shape":"ArchiveArn", + "documentation":"

The Amazon Resource Name (ARN) of the archive.

" + }, + "ArchiveId":{ + "shape":"ArchiveIdString", + "documentation":"

The unique identifier of the archive.

" + }, + "ArchiveName":{ + "shape":"ArchiveNameString", + "documentation":"

The unique name assigned to the archive.

" + }, + "ArchiveState":{ + "shape":"ArchiveState", + "documentation":"

The current state of the archive:

  • ACTIVE – The archive is ready and available for use.

  • PENDING_DELETION – The archive has been marked for deletion and will be permanently deleted in 30 days. No further modifications can be made in this state.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the archive was created.

" + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key used to encrypt the archive.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the archive was modified.

" + }, + "Retention":{ + "shape":"ArchiveRetention", + "documentation":"

The retention period for emails in this archive.

" + } + }, + "documentation":"

The response containing details of the requested archive.

" + }, + "GetArchiveSearchRequest":{ + "type":"structure", + "required":["SearchId"], + "members":{ + "SearchId":{ + "shape":"SearchId", + "documentation":"

The identifier of the search job to get details for.

" + } + }, + "documentation":"

The request to retrieve details of a specific archive search job.

" + }, + "GetArchiveSearchResponse":{ + "type":"structure", + "members":{ + "ArchiveId":{ + "shape":"ArchiveId", + "documentation":"

The identifier of the archive the email search was performed in.

" + }, + "Filters":{ + "shape":"ArchiveFilters", + "documentation":"

The criteria used to filter emails included in the search.

" + }, + "FromTimestamp":{ + "shape":"Timestamp", + "documentation":"

The start timestamp of the range the searched emails cover.

" + }, + "MaxResults":{ + "shape":"SearchMaxResults", + "documentation":"

The maximum number of search results to return.

" + }, + "Status":{ + "shape":"SearchStatus", + "documentation":"

The current status of the search job.

" + }, + "ToTimestamp":{ + "shape":"Timestamp", + "documentation":"

The end timestamp of the range the searched emails cover.

" + } + }, + "documentation":"

The response containing details of the specified archive search job.

" + }, + "GetArchiveSearchResultsRequest":{ + "type":"structure", + "required":["SearchId"], + "members":{ + "SearchId":{ + "shape":"SearchId", + "documentation":"

The identifier of the completed search job.

" + } + }, + "documentation":"

The request to retrieve results from a completed archive search job.

" + }, + "GetArchiveSearchResultsResponse":{ + "type":"structure", + "members":{ + "Rows":{ + "shape":"RowsList", + "documentation":"

The list of email result objects matching the search criteria.

" + } + }, + "documentation":"

The response containing search results from a completed archive search.

" + }, + "GetIngressPointRequest":{ + "type":"structure", + "required":["IngressPointId"], + "members":{ + "IngressPointId":{ + "shape":"IngressPointId", + "documentation":"

The identifier of an ingress endpoint.

" + } + } + }, + "GetIngressPointResponse":{ + "type":"structure", + "required":[ + "IngressPointId", + "IngressPointName" + ], + "members":{ + "ARecord":{ + "shape":"IngressPointARecord", + "documentation":"

The DNS A Record that identifies your ingress endpoint. Configure your DNS Mail Exchange (MX) record with this value to route emails to Mail Manager.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the ingress endpoint was created.

" + }, + "IngressPointArn":{ + "shape":"IngressPointArn", + "documentation":"

The Amazon Resource Name (ARN) of the ingress endpoint resource.

" + }, + "IngressPointAuthConfiguration":{ + "shape":"IngressPointAuthConfiguration", + "documentation":"

The authentication configuration of the ingress endpoint resource.

" + }, + "IngressPointId":{ + "shape":"IngressPointId", + "documentation":"

The identifier of an ingress endpoint resource.

" + }, + "IngressPointName":{ + "shape":"IngressPointName", + "documentation":"

A user friendly name for the ingress endpoint.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the ingress endpoint was last updated.

" + }, + "RuleSetId":{ + "shape":"RuleSetId", + "documentation":"

The identifier of a rule set resource associated with the ingress endpoint.

" + }, + "Status":{ + "shape":"IngressPointStatus", + "documentation":"

The status of the ingress endpoint resource.

" + }, + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "documentation":"

The identifier of the traffic policy resource associated with the ingress endpoint.

" + }, + "Type":{ + "shape":"IngressPointType", + "documentation":"

The type of ingress endpoint.

" + } + } + }, + "GetRelayRequest":{ + "type":"structure", + "required":["RelayId"], + "members":{ + "RelayId":{ + "shape":"RelayId", + "documentation":"

A unique relay identifier.

" + } + } + }, + "GetRelayResponse":{ + "type":"structure", + "required":["RelayId"], + "members":{ + "Authentication":{ + "shape":"RelayAuthentication", + "documentation":"

The authentication attribute—contains the secret ARN where the customer relay server credentials are stored.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the relay was created.

" + }, + "LastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when relay was last updated.

" + }, + "RelayArn":{ + "shape":"RelayArn", + "documentation":"

The Amazon Resource Name (ARN) of the relay.

" + }, + "RelayId":{ + "shape":"RelayId", + "documentation":"

The unique relay identifier.

" + }, + "RelayName":{ + "shape":"RelayName", + "documentation":"

The unique name of the relay.

" + }, + "ServerName":{ + "shape":"RelayServerName", + "documentation":"

The destination relay server address.

" + }, + "ServerPort":{ + "shape":"RelayServerPort", + "documentation":"

The destination relay server port.

" + } + } + }, + "GetRuleSetRequest":{ + "type":"structure", + "required":["RuleSetId"], + "members":{ + "RuleSetId":{ + "shape":"RuleSetId", + "documentation":"

The identifier of an existing rule set to be retrieved.

" + } + } + }, + "GetRuleSetResponse":{ + "type":"structure", + "required":[ + "CreatedDate", + "LastModificationDate", + "RuleSetArn", + "RuleSetId", + "RuleSetName", + "Rules" + ], + "members":{ + "CreatedDate":{ + "shape":"Timestamp", + "documentation":"

The date of when then rule set was created.

" + }, + "LastModificationDate":{ + "shape":"Timestamp", + "documentation":"

The date of when the rule set was last modified.

" + }, + "RuleSetArn":{ + "shape":"RuleSetArn", + "documentation":"

The Amazon Resource Name (ARN) of the rule set resource.

" + }, + "RuleSetId":{ + "shape":"RuleSetId", + "documentation":"

The identifier of the rule set resource.

" + }, + "RuleSetName":{ + "shape":"RuleSetName", + "documentation":"

A user-friendly name for the rule set resource.

" + }, + "Rules":{ + "shape":"Rules", + "documentation":"

The rules contained in the rule set.

" + } + } + }, + "GetTrafficPolicyRequest":{ + "type":"structure", + "required":["TrafficPolicyId"], + "members":{ + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "documentation":"

The identifier of the traffic policy resource.

" + } + } + }, + "GetTrafficPolicyResponse":{ + "type":"structure", + "required":[ + "TrafficPolicyId", + "TrafficPolicyName" + ], + "members":{ + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the traffic policy was created.

" + }, + "DefaultAction":{ + "shape":"AcceptAction", + "documentation":"

The default action of the traffic policy.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the traffic policy was last updated.

" + }, + "MaxMessageSizeBytes":{ + "shape":"MaxMessageSizeBytes", + "documentation":"

The maximum message size in bytes of email which is allowed in by this traffic policy—anything larger will be blocked.

" + }, + "PolicyStatements":{ + "shape":"PolicyStatementList", + "documentation":"

The list of conditions which are in the traffic policy resource.

" + }, + "TrafficPolicyArn":{ + "shape":"TrafficPolicyArn", + "documentation":"

The Amazon Resource Name (ARN) of the traffic policy resource.

" + }, + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "documentation":"

The identifier of the traffic policy resource.

" + }, + "TrafficPolicyName":{ + "shape":"TrafficPolicyName", + "documentation":"

A user-friendly name for the traffic policy resource.

" + } + } + }, + "HeaderName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[xX]\\-[a-zA-Z0-9\\-]+$" + }, + "HeaderValue":{ + "type":"string", + "max":128, + "min":1 + }, + "IamRoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^[a-zA-Z0-9:_/+=,@.#-]+$" + }, + "IdOrArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[a-zA-Z0-9:_/+=,@.#-]+$" + }, + "IdempotencyToken":{ + "type":"string", + "max":128, + "min":1 + }, + "IngressAnalysis":{ + "type":"structure", + "required":[ + "Analyzer", + "ResultField" + ], + "members":{ + "Analyzer":{ + "shape":"AnalyzerArn", + "documentation":"

The Amazon Resource Name (ARN) of an Add On.

" + }, + "ResultField":{ + "shape":"ResultField", + "documentation":"

The returned value from an Add On.

" + } + }, + "documentation":"

The Add On ARN and its returned value that is evaluated in a policy statement's conditional expression to either deny or block the incoming email.

" + }, + "IngressBooleanExpression":{ + "type":"structure", + "required":[ + "Evaluate", + "Operator" + ], + "members":{ + "Evaluate":{ + "shape":"IngressBooleanToEvaluate", + "documentation":"

The operand on which to perform a boolean condition operation.

" + }, + "Operator":{ + "shape":"IngressBooleanOperator", + "documentation":"

The matching operator for a boolean condition expression.

" + } + }, + "documentation":"

The structure for a boolean condition matching on the incoming mail.

" + }, + "IngressBooleanOperator":{ + "type":"string", + "enum":[ + "IS_TRUE", + "IS_FALSE" + ] + }, + "IngressBooleanToEvaluate":{ + "type":"structure", + "members":{ + "Analysis":{ + "shape":"IngressAnalysis", + "documentation":"

The structure type for a boolean condition stating the Add On ARN and its returned value.

" + } + }, + "documentation":"

The union type representing the allowed types of operands for a boolean condition.

", + "union":true + }, + "IngressIpOperator":{ + "type":"string", + "enum":[ + "CIDR_MATCHES", + "NOT_CIDR_MATCHES" + ] + }, + "IngressIpToEvaluate":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"IngressIpv4Attribute", + "documentation":"

An enum type representing the allowed attribute types for an IP condition.

" + } + }, + "documentation":"

The structure for an IP based condition matching on the incoming mail.

", + "union":true + }, + "IngressIpv4Attribute":{ + "type":"string", + "enum":["SENDER_IP"] + }, + "IngressIpv4Expression":{ + "type":"structure", + "required":[ + "Evaluate", + "Operator", + "Values" + ], + "members":{ + "Evaluate":{ + "shape":"IngressIpToEvaluate", + "documentation":"

The left hand side argument of an IP condition expression.

" + }, + "Operator":{ + "shape":"IngressIpOperator", + "documentation":"

The matching operator for an IP condition expression.

" + }, + "Values":{ + "shape":"Ipv4Cidrs", + "documentation":"

The right hand side argument of an IP condition expression.

" + } + }, + "documentation":"

The union type representing the allowed types for the left hand side of an IP condition.

" + }, + "IngressPoint":{ + "type":"structure", + "required":[ + "IngressPointId", + "IngressPointName", + "Status", + "Type" + ], + "members":{ + "ARecord":{ + "shape":"IngressPointARecord", + "documentation":"

The DNS A Record that identifies your ingress endpoint. Configure your DNS Mail Exchange (MX) record with this value to route emails to Mail Manager.

" + }, + "IngressPointId":{ + "shape":"IngressPointId", + "documentation":"

The identifier of the ingress endpoint resource.

" + }, + "IngressPointName":{ + "shape":"IngressPointName", + "documentation":"

A user friendly name for the ingress endpoint resource.

" + }, + "Status":{ + "shape":"IngressPointStatus", + "documentation":"

The status of the ingress endpoint resource.

" + }, + "Type":{ + "shape":"IngressPointType", + "documentation":"

The type of ingress endpoint resource.

" + } + }, + "documentation":"

The structure of an ingress endpoint resource.

" + }, + "IngressPointARecord":{"type":"string"}, + "IngressPointArn":{"type":"string"}, + "IngressPointAuthConfiguration":{ + "type":"structure", + "members":{ + "IngressPointPasswordConfiguration":{ + "shape":"IngressPointPasswordConfiguration", + "documentation":"

The ingress endpoint password configuration for the ingress endpoint resource.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The ingress endpoint SecretsManager::Secret ARN configuration for the ingress endpoint resource.

" + } + }, + "documentation":"

The authentication configuration for the ingress endpoint resource.

" + }, + "IngressPointConfiguration":{ + "type":"structure", + "members":{ + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The SecretsManager::Secret ARN of the ingress endpoint resource.

" + }, + "SmtpPassword":{ + "shape":"SmtpPassword", + "documentation":"

The password of the ingress endpoint resource.

" + } + }, + "documentation":"

The configuration of the ingress endpoint resource.

", + "union":true + }, + "IngressPointId":{ + "type":"string", + "max":100, + "min":1 + }, + "IngressPointName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[A-Za-z0-9_\\-]+$" + }, + "IngressPointPasswordConfiguration":{ + "type":"structure", + "members":{ + "PreviousSmtpPasswordExpiryTimestamp":{ + "shape":"Timestamp", + "documentation":"

The previous password expiry timestamp of the ingress endpoint resource.

" + }, + "PreviousSmtpPasswordVersion":{ + "shape":"String", + "documentation":"

The previous password version of the ingress endpoint resource.

" + }, + "SmtpPasswordVersion":{ + "shape":"String", + "documentation":"

The current password expiry timestamp of the ingress endpoint resource.

" + } + }, + "documentation":"

The password configuration of the ingress endpoint resource.

" + }, + "IngressPointStatus":{ + "type":"string", + "enum":[ + "PROVISIONING", + "DEPROVISIONING", + "UPDATING", + "ACTIVE", + "CLOSED", + "FAILED" + ] + }, + "IngressPointStatusToUpdate":{ + "type":"string", + "enum":[ + "ACTIVE", + "CLOSED" + ] + }, + "IngressPointType":{ + "type":"string", + "enum":[ + "OPEN", + "AUTH" + ] + }, + "IngressPointsList":{ + "type":"list", + "member":{"shape":"IngressPoint"} + }, + "IngressStringEmailAttribute":{ + "type":"string", + "enum":["RECIPIENT"] + }, + "IngressStringExpression":{ + "type":"structure", + "required":[ + "Evaluate", + "Operator", + "Values" + ], + "members":{ + "Evaluate":{ + "shape":"IngressStringToEvaluate", + "documentation":"

The left hand side argument of a string condition expression.

" + }, + "Operator":{ + "shape":"IngressStringOperator", + "documentation":"

The matching operator for a string condition expression.

" + }, + "Values":{ + "shape":"StringList", + "documentation":"

The right hand side argument of a string condition expression.

" + } + }, + "documentation":"

The structure for a string based condition matching on the incoming mail.

" + }, + "IngressStringOperator":{ + "type":"string", + "enum":[ + "EQUALS", + "NOT_EQUALS", + "STARTS_WITH", + "ENDS_WITH", + "CONTAINS" + ] + }, + "IngressStringToEvaluate":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"IngressStringEmailAttribute", + "documentation":"

The enum type representing the allowed attribute types for a string condition.

" + } + }, + "documentation":"

The union type representing the allowed types for the left hand side of a string condition.

", + "union":true + }, + "IngressTlsAttribute":{ + "type":"string", + "enum":["TLS_PROTOCOL"] + }, + "IngressTlsProtocolAttribute":{ + "type":"string", + "enum":[ + "TLS1_2", + "TLS1_3" + ] + }, + "IngressTlsProtocolExpression":{ + "type":"structure", + "required":[ + "Evaluate", + "Operator", + "Value" + ], + "members":{ + "Evaluate":{ + "shape":"IngressTlsProtocolToEvaluate", + "documentation":"

The left hand side argument of a TLS condition expression.

" + }, + "Operator":{ + "shape":"IngressTlsProtocolOperator", + "documentation":"

The matching operator for a TLS condition expression.

" + }, + "Value":{ + "shape":"IngressTlsProtocolAttribute", + "documentation":"

The right hand side argument of a TLS condition expression.

" + } + }, + "documentation":"

The structure for a TLS related condition matching on the incoming mail.

" + }, + "IngressTlsProtocolOperator":{ + "type":"string", + "enum":[ + "MINIMUM_TLS_VERSION", + "IS" + ] + }, + "IngressTlsProtocolToEvaluate":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"IngressTlsAttribute", + "documentation":"

The enum type representing the allowed attribute types for the TLS condition.

" + } + }, + "documentation":"

The union type representing the allowed types for the left hand side of a TLS condition.

", + "union":true + }, + "Integer":{ + "type":"integer", + "box":true + }, + "Ipv4Cidr":{ + "type":"string", + "pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/([0-9]|[12][0-9]|3[0-2])$" + }, + "Ipv4Cidrs":{ + "type":"list", + "member":{"shape":"Ipv4Cidr"} + }, + "KmsKeyArn":{ + "type":"string", + "pattern":"^arn:aws(|-cn|-us-gov):kms:[a-z0-9-]{1,20}:[0-9]{12}:(key|alias)/.+$" + }, + "KmsKeyId":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^[a-zA-Z0-9-:/]+$" + }, + "ListAddonInstancesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"

The maximum number of ingress endpoint resources that are returned per call. You can use NextToken to obtain further ingress endpoints.

" + } + } + }, + "ListAddonInstancesResponse":{ + "type":"structure", + "members":{ + "AddonInstances":{ + "shape":"AddonInstances", + "documentation":"

The list of ingress endpoints.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + } + } + }, + "ListAddonSubscriptionsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"

The maximum number of ingress endpoint resources that are returned per call. You can use NextToken to obtain further ingress endpoints.

" + } + } + }, + "ListAddonSubscriptionsResponse":{ + "type":"structure", + "members":{ + "AddonSubscriptions":{ + "shape":"AddonSubscriptions", + "documentation":"

The list of ingress endpoints.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + } + } + }, + "ListArchiveExportsRequest":{ + "type":"structure", + "required":["ArchiveId"], + "members":{ + "ArchiveId":{ + "shape":"ArchiveId", + "documentation":"

The identifier of the archive.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"

The maximum number of archive export jobs that are returned per call. You can use NextToken to obtain further pages of archives.

" + } + }, + "documentation":"

The request to list archive export jobs in your account.

" + }, + "ListArchiveExportsResponse":{ + "type":"structure", + "members":{ + "Exports":{ + "shape":"ExportSummaryList", + "documentation":"

The list of export job identifiers and statuses.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If present, use to retrieve the next page of results.

" + } + }, + "documentation":"

The response containing a list of archive export jobs and their statuses.

" + }, + "ListArchiveSearchesRequest":{ + "type":"structure", + "required":["ArchiveId"], + "members":{ + "ArchiveId":{ + "shape":"ArchiveId", + "documentation":"

The identifier of the archive.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"

The maximum number of archive search jobs that are returned per call. You can use NextToken to obtain further pages of archives.

" + } + }, + "documentation":"

The request to list archive search jobs in your account.

" + }, + "ListArchiveSearchesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If present, use to retrieve the next page of results.

" + }, + "Searches":{ + "shape":"SearchSummaryList", + "documentation":"

The list of search job identifiers and statuses.

" + } + }, + "documentation":"

The response containing a list of archive search jobs and their statuses.

" + }, + "ListArchivesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"

The maximum number of archives that are returned per call. You can use NextToken to obtain further pages of archives.

" + } + }, + "documentation":"

The request to list email archives in your account.

" + }, + "ListArchivesResponse":{ + "type":"structure", + "required":["Archives"], + "members":{ + "Archives":{ + "shape":"ArchivesList", + "documentation":"

The list of archive details.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If present, use to retrieve the next page of results.

" + } + }, + "documentation":"

The response containing a list of your email archives.

" + }, + "ListIngressPointsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"

The maximum number of ingress endpoint resources that are returned per call. You can use NextToken to obtain further ingress endpoints.

" + } + } + }, + "ListIngressPointsResponse":{ + "type":"structure", + "members":{ + "IngressPoints":{ + "shape":"IngressPointsList", + "documentation":"

The list of ingress endpoints.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + } + } + }, + "ListRelaysRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + }, + "PageSize":{ + "shape":"Integer", + "documentation":"

The number of relays to be returned in one request.

" + } + } + }, + "ListRelaysResponse":{ + "type":"structure", + "required":["Relays"], + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + }, + "Relays":{ + "shape":"Relays", + "documentation":"

The list of returned relays.

" + } + } + }, + "ListRuleSetsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"

The maximum number of rule set resources that are returned per call. You can use NextToken to obtain further rule sets.

" + } + } + }, + "ListRuleSetsResponse":{ + "type":"structure", + "required":["RuleSets"], + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + }, + "RuleSets":{ + "shape":"RuleSets", + "documentation":"

The list of rule sets.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"TaggableResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource to retrieve tags from.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "ListTrafficPoliciesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"

The maximum number of traffic policy resources that are returned per call. You can use NextToken to obtain further traffic policies.

" + } + } + }, + "ListTrafficPoliciesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + }, + "TrafficPolicies":{ + "shape":"TrafficPolicyList", + "documentation":"

The list of traffic policies.

" + } + } + }, + "MailFrom":{ + "type":"string", + "enum":[ + "REPLACE", + "PRESERVE" + ] + }, + "MaxMessageSizeBytes":{ + "type":"integer", + "box":true, + "min":1 + }, + "MessageBody":{ + "type":"structure", + "members":{ + "Html":{ + "shape":"String", + "documentation":"

The HTML body content of the message.

" + }, + "MessageMalformed":{ + "shape":"Boolean", + "documentation":"

A flag indicating if the email was malformed.

" + }, + "Text":{ + "shape":"String", + "documentation":"

The plain text body content of the message.

" + } + }, + "documentation":"

The textual body content of an email message.

" + }, + "NameOrArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[a-zA-Z0-9:_/+=,@.#-]+$" + }, + "NoAuthentication":{ + "type":"structure", + "members":{ + }, + "documentation":"

Explicitly indicate that the relay destination server does not require SMTP credential authentication.

" + }, + "PageSize":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, + "PaginationToken":{ + "type":"string", + "max":2048, + "min":1 + }, + "PolicyCondition":{ + "type":"structure", + "members":{ + "BooleanExpression":{ + "shape":"IngressBooleanExpression", + "documentation":"

This represents a boolean type condition matching on the incoming mail. It performs the boolean operation configured in 'Operator' and evaluates the 'Protocol' object against the 'Value'.

" + }, + "IpExpression":{ + "shape":"IngressIpv4Expression", + "documentation":"

This represents an IP based condition matching on the incoming mail. It performs the operation configured in 'Operator' and evaluates the 'Protocol' object against the 'Value'.

" + }, + "StringExpression":{ + "shape":"IngressStringExpression", + "documentation":"

This represents a string based condition matching on the incoming mail. It performs the string operation configured in 'Operator' and evaluates the 'Protocol' object against the 'Value'.

" + }, + "TlsExpression":{ + "shape":"IngressTlsProtocolExpression", + "documentation":"

This represents a TLS based condition matching on the incoming mail. It performs the operation configured in 'Operator' and evaluates the 'Protocol' object against the 'Value'.

" + } + }, + "documentation":"

The email traffic filtering conditions which are contained in a traffic policy resource.

", + "union":true + }, + "PolicyConditions":{ + "type":"list", + "member":{"shape":"PolicyCondition"}, + "min":1 + }, + "PolicyStatement":{ + "type":"structure", + "required":[ + "Action", + "Conditions" + ], + "members":{ + "Action":{ + "shape":"AcceptAction", + "documentation":"

The action that informs a traffic policy resource to either allow or block the email if it matches a condition in the policy statement.

" + }, + "Conditions":{ + "shape":"PolicyConditions", + "documentation":"

The list of conditions to apply to incoming messages for filtering email traffic.

" + } + }, + "documentation":"

The structure containing traffic policy conditions and actions.

" + }, + "PolicyStatementList":{ + "type":"list", + "member":{"shape":"PolicyStatement"} + }, + "Recipients":{ + "type":"list", + "member":{"shape":"EmailAddress"}, + "max":100, + "min":1 + }, + "Relay":{ + "type":"structure", + "members":{ + "LastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the relay was last modified.

" + }, + "RelayId":{ + "shape":"RelayId", + "documentation":"

The unique relay identifier.

" + }, + "RelayName":{ + "shape":"RelayName", + "documentation":"

The unique relay name.

" + } + }, + "documentation":"

The relay resource that can be used as a rule to relay receiving emails to the destination relay server.

" + }, + "RelayAction":{ + "type":"structure", + "required":["Relay"], + "members":{ + "ActionFailurePolicy":{ + "shape":"ActionFailurePolicy", + "documentation":"

A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the specified relay has been deleted.

" + }, + "MailFrom":{ + "shape":"MailFrom", + "documentation":"

This action specifies whether to preserve or replace original mail from address while relaying received emails to a destination server.

" + }, + "Relay":{ + "shape":"IdOrArn", + "documentation":"

The identifier of the relay resource to be used when relaying an email.

" + } + }, + "documentation":"

The action relays the email via SMTP to another specific SMTP server.

" + }, + "RelayArn":{"type":"string"}, + "RelayAuthentication":{ + "type":"structure", + "members":{ + "NoAuthentication":{ + "shape":"NoAuthentication", + "documentation":"

Keep an empty structure if the relay destination server does not require SMTP credential authentication.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The ARN of the secret created in secrets manager where the relay server's SMTP credentials are stored.

" + } + }, + "documentation":"

Authentication for the relay destination server—specify the secretARN where the SMTP credentials are stored, or specify an empty NoAuthentication structure if the relay destination server does not require SMTP credential authentication.

", + "union":true + }, + "RelayId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "RelayName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z0-9-_]+$" + }, + "RelayServerName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z0-9-\\.]+$" + }, + "RelayServerPort":{ + "type":"integer", + "box":true, + "max":65535, + "min":1 + }, + "Relays":{ + "type":"list", + "member":{"shape":"Relay"} + }, + "ReplaceRecipientAction":{ + "type":"structure", + "members":{ + "ReplaceWith":{ + "shape":"Recipients", + "documentation":"

This action specifies the replacement recipient email addresses to insert.

" + } + }, + "documentation":"

This action replaces the email envelope recipients with the given list of recipients. If the condition of this action applies only to a subset of recipients, only those recipients are replaced with the recipients specified in the action. The message contents and headers are unaffected by this action, only the envelope recipients are updated.

" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Occurs when a requested resource is not found.

", + "exception":true + }, + "ResultField":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[\\sa-zA-Z0-9_]+$" + }, + "RetentionPeriod":{ + "type":"string", + "enum":[ + "THREE_MONTHS", + "SIX_MONTHS", + "NINE_MONTHS", + "ONE_YEAR", + "EIGHTEEN_MONTHS", + "TWO_YEARS", + "THIRTY_MONTHS", + "THREE_YEARS", + "FOUR_YEARS", + "FIVE_YEARS", + "SIX_YEARS", + "SEVEN_YEARS", + "EIGHT_YEARS", + "NINE_YEARS", + "TEN_YEARS", + "PERMANENT" + ] + }, + "Row":{ + "type":"structure", + "members":{ + "ArchivedMessageId":{ + "shape":"ArchivedMessageId", + "documentation":"

The unique identifier of the archived message.

" + }, + "Cc":{ + "shape":"String", + "documentation":"

The email addresses in the CC header.

" + }, + "Date":{ + "shape":"String", + "documentation":"

The date the email was sent.

" + }, + "From":{ + "shape":"String", + "documentation":"

The email address of the sender.

" + }, + "HasAttachments":{ + "shape":"Boolean", + "documentation":"

A flag indicating if the email has attachments.

" + }, + "InReplyTo":{ + "shape":"String", + "documentation":"

The email message ID this is a reply to.

" + }, + "MessageId":{ + "shape":"String", + "documentation":"

The unique message ID of the email.

" + }, + "ReceivedHeaders":{ + "shape":"EmailReceivedHeadersList", + "documentation":"

The received headers from the email delivery path.

" + }, + "ReceivedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the email was received.

" + }, + "Subject":{ + "shape":"String", + "documentation":"

The subject header value of the email.

" + }, + "To":{ + "shape":"String", + "documentation":"

The email addresses in the To header.

" + }, + "XMailer":{ + "shape":"String", + "documentation":"

The user agent that sent the email.

" + }, + "XOriginalMailer":{ + "shape":"String", + "documentation":"

The original user agent that sent the email.

" + }, + "XPriority":{ + "shape":"String", + "documentation":"

The priority level of the email.

" + } + }, + "documentation":"

A result row containing metadata for an archived email message.

" + }, + "RowsList":{ + "type":"list", + "member":{"shape":"Row"} + }, + "Rule":{ + "type":"structure", + "required":["Actions"], + "members":{ + "Actions":{ + "shape":"RuleActions", + "documentation":"

The list of actions to execute when the conditions match the incoming email, and none of the \"unless conditions\" match.

" + }, + "Conditions":{ + "shape":"RuleConditions", + "documentation":"

The conditions of this rule. All conditions must match the email for the actions to be executed. An empty list of conditions means that all emails match, but are still subject to any \"unless conditions\"

" + }, + "Name":{ + "shape":"RuleName", + "documentation":"

The user-friendly name of the rule.

" + }, + "Unless":{ + "shape":"RuleConditions", + "documentation":"

The \"unless conditions\" of this rule. None of the conditions can match the email for the actions to be executed. If any of these conditions do match the email, then the actions are not executed.

" + } + }, + "documentation":"

A rule contains conditions, \"unless conditions\" and actions. For each envelope recipient of an email, if all conditions match and none of the \"unless conditions\" match, then all of the actions are executed sequentially. If no conditions are provided, the rule always applies and the actions are implicitly executed. If only \"unless conditions\" are provided, the rule applies if the email does not match the evaluation of the \"unless conditions\".

" + }, + "RuleAction":{ + "type":"structure", + "members":{ + "AddHeader":{ + "shape":"AddHeaderAction", + "documentation":"

This action adds a header. This can be used to add arbitrary email headers.

" + }, + "Archive":{ + "shape":"ArchiveAction", + "documentation":"

This action archives the email. This can be used to deliver an email to an archive.

" + }, + "DeliverToMailbox":{ + "shape":"DeliverToMailboxAction", + "documentation":"

This action delivers an email to a WorkMail mailbox.

" + }, + "Drop":{ + "shape":"DropAction", + "documentation":"

This action terminates the evaluation of rules in the rule set.

" + }, + "Relay":{ + "shape":"RelayAction", + "documentation":"

This action relays the email to another SMTP server.

" + }, + "ReplaceRecipient":{ + "shape":"ReplaceRecipientAction", + "documentation":"

The action replaces certain or all recipients with a different set of recipients.

" + }, + "Send":{ + "shape":"SendAction", + "documentation":"

This action sends the email to the internet.

" + }, + "WriteToS3":{ + "shape":"S3Action", + "documentation":"

This action writes the MIME content of the email to an S3 bucket.

" + } + }, + "documentation":"

The action for a rule to take. Only one of the contained actions can be set.

", + "union":true + }, + "RuleActions":{ + "type":"list", + "member":{"shape":"RuleAction"}, + "max":10, + "min":1 + }, + "RuleBooleanEmailAttribute":{ + "type":"string", + "enum":[ + "READ_RECEIPT_REQUESTED", + "TLS", + "TLS_WRAPPED" + ] + }, + "RuleBooleanExpression":{ + "type":"structure", + "required":[ + "Evaluate", + "Operator" + ], + "members":{ + "Evaluate":{ + "shape":"RuleBooleanToEvaluate", + "documentation":"

The operand on which to perform a boolean condition operation.

" + }, + "Operator":{ + "shape":"RuleBooleanOperator", + "documentation":"

The matching operator for a boolean condition expression.

" + } + }, + "documentation":"

A boolean expression to be used in a rule condition.

" + }, + "RuleBooleanOperator":{ + "type":"string", + "enum":[ + "IS_TRUE", + "IS_FALSE" + ] + }, + "RuleBooleanToEvaluate":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"RuleBooleanEmailAttribute", + "documentation":"

The boolean type representing the allowed attribute types for an email.

" + } + }, + "documentation":"

The union type representing the allowed types of operands for a boolean condition.

", + "union":true + }, + "RuleCondition":{ + "type":"structure", + "members":{ + "BooleanExpression":{ + "shape":"RuleBooleanExpression", + "documentation":"

The condition applies to a boolean expression passed in this field.

" + }, + "DmarcExpression":{ + "shape":"RuleDmarcExpression", + "documentation":"

The condition applies to a DMARC policy expression passed in this field.

" + }, + "IpExpression":{ + "shape":"RuleIpExpression", + "documentation":"

The condition applies to an IP address expression passed in this field.

" + }, + "NumberExpression":{ + "shape":"RuleNumberExpression", + "documentation":"

The condition applies to a number expression passed in this field.

" + }, + "StringExpression":{ + "shape":"RuleStringExpression", + "documentation":"

The condition applies to a string expression passed in this field.

" + }, + "VerdictExpression":{ + "shape":"RuleVerdictExpression", + "documentation":"

The condition applies to a verdict expression passed in this field.

" + } + }, + "documentation":"

The conditional expression used to evaluate an email for determining if a rule action should be taken.

", + "union":true + }, + "RuleConditions":{ + "type":"list", + "member":{"shape":"RuleCondition"}, + "max":10, + "min":0 + }, + "RuleDmarcExpression":{ + "type":"structure", + "required":[ + "Operator", + "Values" + ], + "members":{ + "Operator":{ + "shape":"RuleDmarcOperator", + "documentation":"

The operator to apply to the DMARC policy of the incoming email.

" + }, + "Values":{ + "shape":"RuleDmarcValueList", + "documentation":"

The values to use for the given DMARC policy operator. For the operator EQUALS, if multiple values are given, they are evaluated as an OR. That is, if any of the given values match, the condition is deemed to match. For the operator NOT_EQUALS, if multiple values are given, they are evaluated as an AND. That is, only if the email's DMARC policy is not equal to any of the given values, then the condition is deemed to match.

" + } + }, + "documentation":"

A DMARC policy expression. The condition matches if the given DMARC policy matches that of the incoming email.

" + }, + "RuleDmarcOperator":{ + "type":"string", + "enum":[ + "EQUALS", + "NOT_EQUALS" + ] + }, + "RuleDmarcPolicy":{ + "type":"string", + "enum":[ + "NONE", + "QUARANTINE", + "REJECT" + ] + }, + "RuleDmarcValueList":{ + "type":"list", + "member":{"shape":"RuleDmarcPolicy"}, + "max":10, + "min":1 + }, + "RuleIpEmailAttribute":{ + "type":"string", + "enum":["SOURCE_IP"] + }, + "RuleIpExpression":{ + "type":"structure", + "required":[ + "Evaluate", + "Operator", + "Values" + ], + "members":{ + "Evaluate":{ + "shape":"RuleIpToEvaluate", + "documentation":"

The IP address to evaluate in this condition.

" + }, + "Operator":{ + "shape":"RuleIpOperator", + "documentation":"

The operator to evaluate the IP address.

" + }, + "Values":{ + "shape":"RuleIpValueList", + "documentation":"

The IP CIDR blocks in format \"x.y.z.w/n\" (eg 10.0.0.0/8) to match with the email's IP address. For the operator CIDR_MATCHES, if multiple values are given, they are evaluated as an OR. That is, if the IP address is contained within any of the given CIDR ranges, the condition is deemed to match. For NOT_CIDR_MATCHES, if multiple CIDR ranges are given, the condition is deemed to match if the IP address is not contained in any of the given CIDR ranges.

" + } + }, + "documentation":"

An IP address expression matching certain IP addresses within a given range of IP addresses.

" + }, + "RuleIpOperator":{ + "type":"string", + "enum":[ + "CIDR_MATCHES", + "NOT_CIDR_MATCHES" + ] + }, + "RuleIpStringValue":{ + "type":"string", + "max":18, + "min":1, + "pattern":"^(([0-9]|.|/)*)$" + }, + "RuleIpToEvaluate":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"RuleIpEmailAttribute", + "documentation":"

The attribute of the email to evaluate.

" + } + }, + "documentation":"

The IP address to evaluate for this condition.

", + "union":true + }, + "RuleIpValueList":{ + "type":"list", + "member":{"shape":"RuleIpStringValue"}, + "max":10, + "min":1 + }, + "RuleName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[a-zA-Z0-9_.-]+$" + }, + "RuleNumberEmailAttribute":{ + "type":"string", + "enum":["MESSAGE_SIZE"] + }, + "RuleNumberExpression":{ + "type":"structure", + "required":[ + "Evaluate", + "Operator", + "Value" + ], + "members":{ + "Evaluate":{ + "shape":"RuleNumberToEvaluate", + "documentation":"

The number to evaluate in a numeric condition expression.

" + }, + "Operator":{ + "shape":"RuleNumberOperator", + "documentation":"

The operator for a numeric condition expression.

" + }, + "Value":{ + "shape":"Double", + "documentation":"

The value to evaluate in a numeric condition expression.

" + } + }, + "documentation":"

A number expression to match numeric conditions with integers from the incoming email.

" + }, + "RuleNumberOperator":{ + "type":"string", + "enum":[ + "EQUALS", + "NOT_EQUALS", + "LESS_THAN", + "GREATER_THAN", + "LESS_THAN_OR_EQUAL", + "GREATER_THAN_OR_EQUAL" + ] + }, + "RuleNumberToEvaluate":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"RuleNumberEmailAttribute", + "documentation":"

An email attribute that is used as the number to evaluate.

" + } + }, + "documentation":"

The number to evaluate in a numeric condition expression.

", + "union":true + }, + "RuleSet":{ + "type":"structure", + "members":{ + "LastModificationDate":{ + "shape":"Timestamp", + "documentation":"

The last modification date of the rule set.

" + }, + "RuleSetId":{ + "shape":"RuleSetId", + "documentation":"

The identifier of the rule set.

" + }, + "RuleSetName":{ + "shape":"RuleSetName", + "documentation":"

A user-friendly name for the rule set.

" + } + }, + "documentation":"

A rule set contains a list of rules that are evaluated in order. Each rule is evaluated sequentially for each email.

" + }, + "RuleSetArn":{"type":"string"}, + "RuleSetId":{ + "type":"string", + "max":100, + "min":1 + }, + "RuleSetName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z0-9_.-]+$" + }, + "RuleSets":{ + "type":"list", + "member":{"shape":"RuleSet"} + }, + "RuleStringEmailAttribute":{ + "type":"string", + "enum":[ + "MAIL_FROM", + "HELO", + "RECIPIENT", + "SENDER", + "FROM", + "SUBJECT", + "TO", + "CC" + ] + }, + "RuleStringExpression":{ + "type":"structure", + "required":[ + "Evaluate", + "Operator", + "Values" + ], + "members":{ + "Evaluate":{ + "shape":"RuleStringToEvaluate", + "documentation":"

The string to evaluate in a string condition expression.

" + }, + "Operator":{ + "shape":"RuleStringOperator", + "documentation":"

The matching operator for a string condition expression.

" + }, + "Values":{ + "shape":"RuleStringList", + "documentation":"

The string(s) to be evaluated in a string condition expression. For all operators, except for NOT_EQUALS, if multiple values are given, the values are processed as an OR. That is, if any of the values match the email's string using the given operator, the condition is deemed to match. However, for NOT_EQUALS, the condition is only deemed to match if none of the given strings match the email's string.

" + } + }, + "documentation":"

A string expression is evaluated against strings or substrings of the email.

" + }, + "RuleStringList":{ + "type":"list", + "member":{"shape":"RuleStringValue"}, + "max":10, + "min":1 + }, + "RuleStringOperator":{ + "type":"string", + "enum":[ + "EQUALS", + "NOT_EQUALS", + "STARTS_WITH", + "ENDS_WITH", + "CONTAINS" + ] + }, + "RuleStringToEvaluate":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"RuleStringEmailAttribute", + "documentation":"

The email attribute to evaluate in a string condition expression.

" + } + }, + "documentation":"

The string to evaluate in a string condition expression.

", + "union":true + }, + "RuleStringValue":{ + "type":"string", + "max":4096, + "min":1 + }, + "RuleVerdict":{ + "type":"string", + "enum":[ + "PASS", + "FAIL", + "GRAY", + "PROCESSING_FAILED" + ] + }, + "RuleVerdictAttribute":{ + "type":"string", + "enum":[ + "SPF", + "DKIM" + ] + }, + "RuleVerdictExpression":{ + "type":"structure", + "required":[ + "Evaluate", + "Operator", + "Values" + ], + "members":{ + "Evaluate":{ + "shape":"RuleVerdictToEvaluate", + "documentation":"

The verdict to evaluate in a verdict condition expression.

" + }, + "Operator":{ + "shape":"RuleVerdictOperator", + "documentation":"

The matching operator for a verdict condition expression.

" + }, + "Values":{ + "shape":"RuleVerdictValueList", + "documentation":"

The values to match with the email's verdict using the given operator. For the EQUALS operator, if multiple values are given, the condition is deemed to match if any of the given verdicts match that of the email. For the NOT_EQUALS operator, if multiple values are given, the condition is deemed to match of none of the given verdicts match the verdict of the email.

" + } + }, + "documentation":"

A verdict expression is evaluated against verdicts of the email.

" + }, + "RuleVerdictOperator":{ + "type":"string", + "enum":[ + "EQUALS", + "NOT_EQUALS" + ] + }, + "RuleVerdictToEvaluate":{ + "type":"structure", + "members":{ + "Analysis":{ + "shape":"Analysis", + "documentation":"

The Add On ARN and its returned value to evaluate in a verdict condition expression.

" + }, + "Attribute":{ + "shape":"RuleVerdictAttribute", + "documentation":"

The email verdict attribute to evaluate in a string verdict expression.

" + } + }, + "documentation":"

The verdict to evaluate in a verdict condition expression.

", + "union":true + }, + "RuleVerdictValueList":{ + "type":"list", + "member":{"shape":"RuleVerdict"}, + "max":10, + "min":1 + }, + "Rules":{ + "type":"list", + "member":{"shape":"Rule"}, + "max":40, + "min":0 + }, + "S3Action":{ + "type":"structure", + "required":[ + "RoleArn", + "S3Bucket" + ], + "members":{ + "ActionFailurePolicy":{ + "shape":"ActionFailurePolicy", + "documentation":"

A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the specified the bucket has been deleted.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM Role to use while writing to S3. This role must have access to the s3:PutObject, kms:Encrypt, and kms:GenerateDataKey APIs for the given bucket.

" + }, + "S3Bucket":{ + "shape":"S3Bucket", + "documentation":"

The bucket name of the S3 bucket to write to.

" + }, + "S3Prefix":{ + "shape":"S3Prefix", + "documentation":"

The S3 prefix to use for the write to the s3 bucket.

" + }, + "S3SseKmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The KMS Key ID to use to encrypt the message in S3.

" + } + }, + "documentation":"

Writes the MIME content of the email to an S3 bucket.

" + }, + "S3Bucket":{ + "type":"string", + "max":62, + "min":1, + "pattern":"^[a-zA-Z0-9.-]+$" + }, + "S3ExportDestinationConfiguration":{ + "type":"structure", + "members":{ + "S3Location":{ + "shape":"S3Location", + "documentation":"

The S3 location to deliver the exported email data.

" + } + }, + "documentation":"

The configuration for exporting email data to an Amazon S3 bucket.

" + }, + "S3Location":{ + "type":"string", + "pattern":"^s3://[a-zA-Z0-9.-]{3,63}(/[a-zA-Z0-9!_.*'()/-]*)*$" + }, + "S3Prefix":{ + "type":"string", + "max":62, + "min":1, + "pattern":"^[a-zA-Z0-9!_.*'()/-]+$" + }, + "S3PresignedURL":{"type":"string"}, + "SearchId":{ + "type":"string", + "max":64, + "min":1 + }, + "SearchMaxResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":0 + }, + "SearchState":{ + "type":"string", + "enum":[ + "QUEUED", + "RUNNING", + "COMPLETED", + "FAILED", + "CANCELLED" + ] + }, + "SearchStatus":{ + "type":"structure", + "members":{ + "CompletionTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the search completed (if finished).

" + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

An error message if the search failed.

" + }, + "State":{ + "shape":"SearchState", + "documentation":"

The current state of the search job.

" + }, + "SubmissionTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the search was submitted.

" + } + }, + "documentation":"

The current status of an archive search job.

" + }, + "SearchSummary":{ + "type":"structure", + "members":{ + "SearchId":{ + "shape":"SearchId", + "documentation":"

The unique identifier of the search job.

" + }, + "Status":{ + "shape":"SearchStatus", + "documentation":"

The current status of the search job.

" + } + }, + "documentation":"

Summary details of an archive search job.

" + }, + "SearchSummaryList":{ + "type":"list", + "member":{"shape":"SearchSummary"} + }, + "SecretArn":{ + "type":"string", + "pattern":"^arn:(aws|aws-cn|aws-us-gov):secretsmanager:[a-z0-9-]+:\\d{12}:secret:[a-zA-Z0-9/_+=,.@-]+$" + }, + "SendAction":{ + "type":"structure", + "required":["RoleArn"], + "members":{ + "ActionFailurePolicy":{ + "shape":"ActionFailurePolicy", + "documentation":"

A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the caller does not have the permissions to call the sendRawEmail API.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the role to use for this action. This role must have access to the ses:SendRawEmail API.

" + } + }, + "documentation":"

Sends the email to the internet using the ses:SendRawEmail API.

" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Occurs when an operation exceeds a predefined service quota or limit.

", + "exception":true + }, + "SmtpPassword":{ + "type":"string", + "max":64, + "min":8, + "pattern":"^[A-Za-z0-9!@#$%^&*()_+\\-=\\[\\]{}|.,?]+$", + "sensitive":true + }, + "StartArchiveExportRequest":{ + "type":"structure", + "required":[ + "ArchiveId", + "ExportDestinationConfiguration", + "FromTimestamp", + "ToTimestamp" + ], + "members":{ + "ArchiveId":{ + "shape":"ArchiveId", + "documentation":"

The identifier of the archive to export emails from.

" + }, + "ExportDestinationConfiguration":{ + "shape":"ExportDestinationConfiguration", + "documentation":"

Details on where to deliver the exported email data.

" + }, + "Filters":{ + "shape":"ArchiveFilters", + "documentation":"

Criteria to filter which emails are included in the export.

" + }, + "FromTimestamp":{ + "shape":"Timestamp", + "documentation":"

The start of the timestamp range to include emails from.

" + }, + "MaxResults":{ + "shape":"ExportMaxResults", + "documentation":"

The maximum number of email items to include in the export.

" + }, + "ToTimestamp":{ + "shape":"Timestamp", + "documentation":"

The end of the timestamp range to include emails from.

" + } + }, + "documentation":"

The request to initiate an export of emails from an archive.

" + }, + "StartArchiveExportResponse":{ + "type":"structure", + "members":{ + "ExportId":{ + "shape":"ExportId", + "documentation":"

The unique identifier for the initiated export job.

" + } + }, + "documentation":"

The response from initiating an archive export.

" + }, + "StartArchiveSearchRequest":{ + "type":"structure", + "required":[ + "ArchiveId", + "FromTimestamp", + "MaxResults", + "ToTimestamp" + ], + "members":{ + "ArchiveId":{ + "shape":"ArchiveId", + "documentation":"

The identifier of the archive to search emails in.

" + }, + "Filters":{ + "shape":"ArchiveFilters", + "documentation":"

Criteria to filter which emails are included in the search results.

" + }, + "FromTimestamp":{ + "shape":"Timestamp", + "documentation":"

The start timestamp of the range to search emails from.

" + }, + "MaxResults":{ + "shape":"SearchMaxResults", + "documentation":"

The maximum number of search results to return.

" + }, + "ToTimestamp":{ + "shape":"Timestamp", + "documentation":"

The end timestamp of the range to search emails from.

" + } + }, + "documentation":"

The request to initiate a search across emails in an archive.

" + }, + "StartArchiveSearchResponse":{ + "type":"structure", + "members":{ + "SearchId":{ + "shape":"SearchId", + "documentation":"

The unique identifier for the initiated search job.

" + } + }, + "documentation":"

The response from initiating an archive search.

" + }, + "StopArchiveExportRequest":{ + "type":"structure", + "required":["ExportId"], + "members":{ + "ExportId":{ + "shape":"ExportId", + "documentation":"

The identifier of the export job to stop.

" + } + }, + "documentation":"

The request to stop an in-progress archive export job.

" + }, + "StopArchiveExportResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

The response indicating if the request to stop the export job succeeded.

On success, returns an HTTP 200 status code. On failure, returns an error message.

" + }, + "StopArchiveSearchRequest":{ + "type":"structure", + "required":["SearchId"], + "members":{ + "SearchId":{ + "shape":"SearchId", + "documentation":"

The identifier of the search job to stop.

" + } + }, + "documentation":"

The request to stop an in-progress archive search job.

" + }, + "StopArchiveSearchResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

The response indicating if the request to stop the search job succeeded.

On success, returns an HTTP 200 status code. On failure, returns an error message.

" + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "StringValueList":{ + "type":"list", + "member":{"shape":"String"}, + "max":10, + "min":1 + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key of the key-value tag.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value of the key-value tag.

" + } + }, + "documentation":"

A key-value pair (the value is optional), that you can define and assign to Amazon Web Services resources.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9/_\\+=\\.:@\\-]+$", + "sensitive":true + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"TaggableResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to tag.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^[a-zA-Z0-9/_\\+=\\.:@\\-]*$", + "sensitive":true + }, + "TaggableResourceArn":{ + "type":"string", + "max":1011, + "min":20, + "pattern":"^arn:aws(|-cn|-us-gov):ses:[a-z0-9-]{1,20}:[0-9]{12}:(mailmanager-|addon-).+$" + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Occurs when a service's request rate limit is exceeded, resulting in throttling of further requests.

", + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "TrafficPolicy":{ + "type":"structure", + "required":[ + "DefaultAction", + "TrafficPolicyId", + "TrafficPolicyName" + ], + "members":{ + "DefaultAction":{ + "shape":"AcceptAction", + "documentation":"

Default action instructs the traffic policy to either Allow or Deny (block) messages that fall outside of (or not addressed by) the conditions of your policy statements

" + }, + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "documentation":"

The identifier of the traffic policy resource.

" + }, + "TrafficPolicyName":{ + "shape":"TrafficPolicyName", + "documentation":"

A user-friendly name of the traffic policy resource.

" + } + }, + "documentation":"

The structure of a traffic policy resource which is a container for policy statements.

" + }, + "TrafficPolicyArn":{"type":"string"}, + "TrafficPolicyId":{ + "type":"string", + "max":100, + "min":1 + }, + "TrafficPolicyList":{ + "type":"list", + "member":{"shape":"TrafficPolicy"} + }, + "TrafficPolicyName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[A-Za-z0-9_\\-]+$" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"TaggableResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to untag.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The keys of the key-value pairs for the tag or tags you want to remove from the specified resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateArchiveRequest":{ + "type":"structure", + "required":["ArchiveId"], + "members":{ + "ArchiveId":{ + "shape":"ArchiveIdString", + "documentation":"

The identifier of the archive to update.

" + }, + "ArchiveName":{ + "shape":"ArchiveNameString", + "documentation":"

A new, unique name for the archive.

" + }, + "Retention":{ + "shape":"ArchiveRetention", + "documentation":"

A new retention period for emails in the archive.

" + } + }, + "documentation":"

The request to update properties of an existing email archive.

" + }, + "UpdateArchiveResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

The response indicating if the archive update succeeded or failed.

On success, returns an HTTP 200 status code. On failure, returns an error message.

" + }, + "UpdateIngressPointRequest":{ + "type":"structure", + "required":["IngressPointId"], + "members":{ + "IngressPointConfiguration":{ + "shape":"IngressPointConfiguration", + "documentation":"

If you choose an Authenticated ingress endpoint, you must configure either an SMTP password or a secret ARN.

" + }, + "IngressPointId":{ + "shape":"IngressPointId", + "documentation":"

The identifier for the ingress endpoint you want to update.

" + }, + "IngressPointName":{ + "shape":"IngressPointName", + "documentation":"

A user friendly name for the ingress endpoint resource.

" + }, + "RuleSetId":{ + "shape":"RuleSetId", + "documentation":"

The identifier of an existing rule set that you attach to an ingress endpoint resource.

" + }, + "StatusToUpdate":{ + "shape":"IngressPointStatusToUpdate", + "documentation":"

The update status of an ingress endpoint.

" + }, + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "documentation":"

The identifier of an existing traffic policy that you attach to an ingress endpoint resource.

" + } + } + }, + "UpdateIngressPointResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateRelayRequest":{ + "type":"structure", + "required":["RelayId"], + "members":{ + "Authentication":{ + "shape":"RelayAuthentication", + "documentation":"

Authentication for the relay destination server—specify the secretARN where the SMTP credentials are stored.

" + }, + "RelayId":{ + "shape":"RelayId", + "documentation":"

The unique relay identifier.

" + }, + "RelayName":{ + "shape":"RelayName", + "documentation":"

The name of the relay resource.

" + }, + "ServerName":{ + "shape":"RelayServerName", + "documentation":"

The destination relay server address.

" + }, + "ServerPort":{ + "shape":"RelayServerPort", + "documentation":"

The destination relay server port.

" + } + } + }, + "UpdateRelayResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateRuleSetRequest":{ + "type":"structure", + "required":["RuleSetId"], + "members":{ + "RuleSetId":{ + "shape":"RuleSetId", + "documentation":"

The identifier of a rule set you want to update.

" + }, + "RuleSetName":{ + "shape":"RuleSetName", + "documentation":"

A user-friendly name for the rule set resource.

" + }, + "Rules":{ + "shape":"Rules", + "documentation":"

A new set of rules to replace the current rules of the rule set—these rules will override all the rules of the rule set.

" + } + } + }, + "UpdateRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateTrafficPolicyRequest":{ + "type":"structure", + "required":["TrafficPolicyId"], + "members":{ + "DefaultAction":{ + "shape":"AcceptAction", + "documentation":"

Default action instructs the traffic policy to either Allow or Deny (block) messages that fall outside of (or not addressed by) the conditions of your policy statements

" + }, + "MaxMessageSizeBytes":{ + "shape":"MaxMessageSizeBytes", + "documentation":"

The maximum message size in bytes of email which is allowed in by this traffic policy—anything larger will be blocked.

" + }, + "PolicyStatements":{ + "shape":"PolicyStatementList", + "documentation":"

The list of conditions to be updated for filtering email traffic.

" + }, + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "documentation":"

The identifier of the traffic policy that you want to update.

" + }, + "TrafficPolicyName":{ + "shape":"TrafficPolicyName", + "documentation":"

A user-friendly name for the traffic policy resource.

" + } + } + }, + "UpdateTrafficPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request validation has failed. For details, see the accompanying error message.

", + "exception":true + } + }, + "documentation":"

AWS SES Mail Manager API

AWS SES Mail Manager API contains operations and data types that comprise the Mail Manager feature of Amazon Simple Email Service.

Mail Manager is a set of Amazon SES email gateway features designed to help you strengthen your organization's email infrastructure, simplify email workflow management, and streamline email compliance control. To learn more, see the Mail Manager chapter in the Amazon SES Developer Guide.

" +} diff --git a/botocore/data/managedblockchain/2018-09-24/service-2.json b/botocore/data/managedblockchain/2018-09-24/service-2.json index 8f7a755fd9..e179748dbd 100644 --- a/botocore/data/managedblockchain/2018-09-24/service-2.json +++ b/botocore/data/managedblockchain/2018-09-24/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"managedblockchain", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"ManagedBlockchain", "serviceFullName":"Amazon Managed Blockchain", "serviceId":"ManagedBlockchain", @@ -652,7 +653,7 @@ }, "NetworkType":{ "shape":"AccessorNetworkType", - "documentation":"

The blockchain network that the Accessor token is created for.

We recommend using the appropriate networkType value for the blockchain network that you are creating the Accessor token for. You cannnot use the value ETHEREUM_MAINNET_AND_GOERLI to specify a networkType for your Accessor token.

The default value of ETHEREUM_MAINNET_AND_GOERLI is only applied:

  • when the CreateAccessor action does not set a networkType.

  • to all existing Accessor tokens that were created before the networkType property was introduced.

" + "documentation":"

The blockchain network that the Accessor token is created for.

  • Use the actual networkType value for the blockchain network that you are creating the Accessor token for.

  • With the shut down of the Ethereum Goerli and Polygon Mumbai Testnet networks the following networkType values are no longer available for selection and use.

    • ETHEREUM_MAINNET_AND_GOERLI

    • ETHEREUM_GOERLI

    • POLYGON_MUMBAI

    However, your existing Accessor tokens with these networkType values will remain unchanged.

" } } }, @@ -790,7 +791,7 @@ }, "NetworkId":{ "shape":"ResourceIdString", - "documentation":"

The unique identifier of the network for the node.

Ethereum public networks have the following NetworkIds:

  • n-ethereum-mainnet

  • n-ethereum-goerli

", + "documentation":"

The unique identifier of the network for the node.

Ethereum public networks have the following NetworkIds:

  • n-ethereum-mainnet

", "location":"uri", "locationName":"networkId" }, @@ -916,7 +917,7 @@ "members":{ "NetworkId":{ "shape":"ResourceIdString", - "documentation":"

The unique identifier of the network that the node is on.

Ethereum public networks have the following NetworkIds:

  • n-ethereum-mainnet

  • n-ethereum-goerli

", + "documentation":"

The unique identifier of the network that the node is on.

Ethereum public networks have the following NetworkIds:

  • n-ethereum-mainnet

", "location":"uri", "locationName":"networkId" }, @@ -1818,7 +1819,7 @@ "members":{ "ChainId":{ "shape":"String", - "documentation":"

The Ethereum CHAIN_ID associated with the Ethereum network. Chain IDs are as follows:

  • mainnet = 1

  • goerli = 5

" + "documentation":"

The Ethereum CHAIN_ID associated with the Ethereum network. Chain IDs are as follows:

  • mainnet = 1

" } }, "documentation":"

Attributes of Ethereum for a network.

" diff --git a/botocore/data/mediaconnect/2018-11-14/service-2.json b/botocore/data/mediaconnect/2018-11-14/service-2.json index 3bf4acb776..73591931d8 100644 --- a/botocore/data/mediaconnect/2018-11-14/service-2.json +++ b/botocore/data/mediaconnect/2018-11-14/service-2.json @@ -8,7 +8,10 @@ "protocol": "rest-json", "jsonVersion": "1.1", "uid": "mediaconnect-2018-11-14", - "signatureVersion": "v4" + "signatureVersion": "v4", + "auth": [ + "aws.auth#sigv4" + ] }, "operations": { "AddBridgeOutputs": { @@ -2641,7 +2644,7 @@ "MinLatency": { "shape": "__integer", "locationName": "minLatency", - "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency." + "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender’s minimum latency and the receiver’s minimum latency." }, "Name": { "shape": "__string", @@ -2682,6 +2685,11 @@ "shape": "VpcInterfaceAttachment", "locationName": "vpcInterfaceAttachment", "documentation": "The name of the VPC interface attachment to use for this output." + }, + "OutputStatus": { + "shape": "OutputStatus", + "locationName": "outputStatus", + "documentation": "An indication of whether the new output should be enabled or disabled as soon as it is created. If you don't specify the outputStatus field in your request, MediaConnect sets it to ENABLED." } }, "documentation": "The output that you want to add to this flow.", @@ -3904,7 +3912,7 @@ "ScanMode": { "shape": "ScanMode", "locationName": "scanMode", - "documentation": "The type of compression that was used to smooth the video\u2019s appearance" + "documentation": "The type of compression that was used to smooth the video’s appearance" }, "Tcs": { "shape": "Tcs", @@ -3945,7 +3953,7 @@ "ScanMode": { "shape": "ScanMode", "locationName": "scanMode", - "documentation": "The type of compression that was used to smooth the video\u2019s appearance." + "documentation": "The type of compression that was used to smooth the video’s appearance." }, "Tcs": { "shape": "Tcs", @@ -4159,7 +4167,7 @@ "EntitlementStatus": { "shape": "EntitlementStatus", "locationName": "entitlementStatus", - "documentation": "An indication of whether the new entitlement should be enabled or disabled as soon as it is created. If you don\u2019t specify the entitlementStatus field in your request, MediaConnect sets it to ENABLED." + "documentation": "An indication of whether the new entitlement should be enabled or disabled as soon as it is created. If you don’t specify the entitlementStatus field in your request, MediaConnect sets it to ENABLED." }, "Name": { "shape": "__string", @@ -5237,6 +5245,11 @@ "shape": "__listOf__integer", "locationName": "bridgePorts", "documentation": "The bridge output ports currently in use." + }, + "OutputStatus": { + "shape": "OutputStatus", + "locationName": "outputStatus", + "documentation": "An indication of whether the output is transmitting data or not." } }, "documentation": "The settings for an output.", @@ -5245,6 +5258,13 @@ "Name" ] }, + "OutputStatus": { + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, "PriceUnits": { "type": "string", "enum": [ @@ -5801,7 +5821,7 @@ "MinLatency": { "shape": "__integer", "locationName": "minLatency", - "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency." + "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender’s minimum latency and the receiver’s minimum latency." }, "Name": { "shape": "__string", @@ -6117,7 +6137,7 @@ "MinLatency": { "shape": "__integer", "locationName": "minLatency", - "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency." + "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender’s minimum latency and the receiver’s minimum latency." }, "Protocol": { "shape": "Protocol", @@ -6629,7 +6649,7 @@ "EntitlementStatus": { "shape": "EntitlementStatus", "locationName": "entitlementStatus", - "documentation": "An indication of whether you want to enable the entitlement to allow access, or disable it to stop streaming content to the subscriber\u2019s flow temporarily. If you don\u2019t specify the entitlementStatus field in your request, MediaConnect leaves the value unchanged." + "documentation": "An indication of whether you want to enable the entitlement to allow access, or disable it to stop streaming content to the subscriber’s flow temporarily. If you don’t specify the entitlementStatus field in your request, MediaConnect leaves the value unchanged." }, "FlowArn": { "shape": "__string", @@ -6771,7 +6791,7 @@ "MinLatency": { "shape": "__integer", "locationName": "minLatency", - "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency." + "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender’s minimum latency and the receiver’s minimum latency." }, "OutputArn": { "shape": "__string", @@ -6819,6 +6839,11 @@ "shape": "VpcInterfaceAttachment", "locationName": "vpcInterfaceAttachment", "documentation": "The name of the VPC interface attachment to use for this output." + }, + "OutputStatus": { + "shape": "OutputStatus", + "locationName": "outputStatus", + "documentation": "An indication of whether the output should transmit data or not. If you don't specify the outputStatus field in your request, MediaConnect leaves the value unchanged." } }, "documentation": "The fields that you want to update in the output.", @@ -6929,7 +6954,7 @@ "MinLatency": { "shape": "__integer", "locationName": "minLatency", - "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency." + "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender’s minimum latency and the receiver’s minimum latency." }, "Protocol": { "shape": "Protocol", @@ -7430,4 +7455,4 @@ } }, "documentation": "API for AWS Elemental MediaConnect" -} +} \ No newline at end of file diff --git a/botocore/data/mediaconvert/2017-08-29/paginators-1.json b/botocore/data/mediaconvert/2017-08-29/paginators-1.json index 5588b9e677..edee23dc7b 100644 --- a/botocore/data/mediaconvert/2017-08-29/paginators-1.json +++ b/botocore/data/mediaconvert/2017-08-29/paginators-1.json @@ -29,6 +29,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Queues" + }, + "SearchJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Jobs" } } } diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 6651915f97..db3e021b4c 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -9,7 +9,10 @@ "jsonVersion": "1.1", "uid": "mediaconvert-2017-08-29", "signatureVersion": "v4", - "serviceAbbreviation": "MediaConvert" + "serviceAbbreviation": "MediaConvert", + "auth": [ + "aws.auth#sigv4" + ] }, "operations": { "AssociateCertificate": { @@ -461,7 +464,7 @@ "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], - "documentation": "Send an request with an empty body to the regional API endpoint to get your account API endpoint.", + "documentation": "Send a request with an empty body to the regional API endpoint to get your account API endpoint. Note that DescribeEndpoints is no longer required. We recommend that you send your requests directly to the regional endpoint instead.", "deprecated": true, "deprecatedMessage": "DescribeEndpoints and account specific endpoints are no longer required. We recommend that you send your requests directly to the regional endpoint instead." }, @@ -957,6 +960,47 @@ ], "documentation": "Create or change your policy. For more information about policies, see the user guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" }, + "SearchJobs": { + "name": "SearchJobs", + "http": { + "method": "GET", + "requestUri": "/2017-08-29/search", + "responseCode": 200 + }, + "input": { + "shape": "SearchJobsRequest" + }, + "output": { + "shape": "SearchJobsResponse" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "The service can't process your request because of a problem in the request. Please check your request form and syntax." + }, + { + "shape": "InternalServerErrorException", + "documentation": "The service encountered an unexpected condition and can't fulfill your request." + }, + { + "shape": "ForbiddenException", + "documentation": "You don't have permissions for this action with the credentials you sent." + }, + { + "shape": "NotFoundException", + "documentation": "The resource you requested doesn't exist." + }, + { + "shape": "TooManyRequestsException", + "documentation": "Too many requests have been sent in too short of a time. The service limits the rate at which it will accept requests." + }, + { + "shape": "ConflictException", + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." + } + ], + "documentation": "Retrieve a JSON array that includes job details for up to twenty of your most recent jobs. Optionally filter results further according to input file, queue, or status. To retrieve the twenty next most recent jobs, use the nextToken string returned with the array." + }, "TagResource": { "name": "TagResource", "http": { @@ -3041,6 +3085,11 @@ "locationName": "codecSpecification", "documentation": "Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist generation." }, + "DashIFrameTrickPlayNameModifier": { + "shape": "__stringMin1Max256", + "locationName": "dashIFrameTrickPlayNameModifier", + "documentation": "Specify whether MediaConvert generates I-frame only video segments for DASH trick play, also known as trick mode. When specified, the I-frame only video segments are included within an additional AdaptationSet in your DASH output manifest. To generate I-frame only video segments: Enter a name as a text string, up to 256 character long. This name is appended to the end of this output group's base filename, that you specify as part of your destination URI, and used for the I-frame only video segment files. You may also include format identifiers. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/using-variables-in-your-job-settings.html#using-settings-variables-with-streaming-outputs To not generate I-frame only video segments: Leave blank." + }, "DashManifestStyle": { "shape": "DashManifestStyle", "locationName": "dashManifestStyle", @@ -4029,6 +4078,11 @@ "locationName": "baseUrl", "documentation": "A partial URI prefix that will be put in the manifest (.mpd) file at the top level BaseURL element. Can be used if streams are delivered from a different URL than the manifest file." }, + "DashIFrameTrickPlayNameModifier": { + "shape": "__stringMin1Max256", + "locationName": "dashIFrameTrickPlayNameModifier", + "documentation": "Specify whether MediaConvert generates I-frame only video segments for DASH trick play, also known as trick mode. When specified, the I-frame only video segments are included within an additional AdaptationSet in your DASH output manifest. To generate I-frame only video segments: Enter a name as a text string, up to 256 character long. This name is appended to the end of this output group's base filename, that you specify as part of your destination URI, and used for the I-frame only video segment files. You may also include format identifiers. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/using-variables-in-your-job-settings.html#using-settings-variables-with-streaming-outputs To not generate I-frame only video segments: Leave blank." + }, "DashManifestStyle": { "shape": "DashManifestStyle", "locationName": "dashManifestStyle", @@ -7567,12 +7621,12 @@ "type": "structure", "members": { "EndTimecode": { - "shape": "__stringPattern010920405090509092", + "shape": "__stringPattern010920405090509092090909", "locationName": "endTimecode", "documentation": "Set End timecode to the end of the portion of the input you are clipping. The frame corresponding to the End timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for timecode source under input settings. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to end six minutes into the video, use 01:06:00:00." }, "StartTimecode": { - "shape": "__stringPattern010920405090509092", + "shape": "__stringPattern010920405090509092090909", "locationName": "startTimecode", "documentation": "Set Start timecode to the beginning of the portion of the input you are clipping. The frame corresponding to the Start timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:05:00:00." } @@ -7797,10 +7851,30 @@ "InputVideoGenerator": { "type": "structure", "members": { + "Channels": { + "shape": "__integerMin1Max32", + "locationName": "channels", + "documentation": "Specify the number of audio channels to include in your video generator input. MediaConvert creates these audio channels as silent audio within a single audio track. Enter an integer from 1 to 32." + }, "Duration": { "shape": "__integerMin50Max86400000", "locationName": "duration", - "documentation": "Specify an integer value for Black video duration from 50 to 86400000 to generate a black video input for that many milliseconds. Required when you include Video generator." + "documentation": "Specify the duration, in milliseconds, for your video generator input.\nEnter an integer from 50 to 86400000." + }, + "FramerateDenominator": { + "shape": "__integerMin1Max1001", + "locationName": "framerateDenominator", + "documentation": "Specify the denominator of the fraction that represents the frame rate for your video generator input. When you do, you must also specify a value for Frame rate numerator. MediaConvert uses a default frame rate of 29.97 when you leave Frame rate numerator and Frame rate denominator blank." + }, + "FramerateNumerator": { + "shape": "__integerMin1Max60000", + "locationName": "framerateNumerator", + "documentation": "Specify the numerator of the fraction that represents the frame rate for your video generator input. When you do, you must also specify a value for Frame rate denominator. MediaConvert uses a default frame rate of 29.97 when you leave Frame rate numerator and Frame rate denominator blank." + }, + "SampleRate": { + "shape": "__integerMin32000Max48000", + "locationName": "sampleRate", + "documentation": "Specify the audio sample rate, in Hz, for the silent audio in your video generator input.\nEnter an integer from 32000 to 48000." } }, "documentation": "When you include Video generator, MediaConvert creates a video input with black frames. Use this setting if you do not have a video input or if you want to add black video frames before, or after, other inputs. You can specify Video generator, or you can specify an Input file, but you cannot specify both. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/video-generator.html" @@ -10547,7 +10621,7 @@ "documentation": "Use Extension to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, webm * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)" }, "NameModifier": { - "shape": "__stringMin1", + "shape": "__stringMin1Max256", "locationName": "nameModifier", "documentation": "Use Name modifier to have the service add a string to the end of each output filename. You specify the base filename as part of your destination URI. When you create multiple outputs in the same output group, Name modifier is required. Name modifier also accepts format identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in one output, you must use them in the same way in all outputs of the output group." }, @@ -11423,6 +11497,62 @@ }, "documentation": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html." }, + "SearchJobsRequest": { + "type": "structure", + "members": { + "InputFile": { + "shape": "__string", + "locationName": "inputFile", + "documentation": "Optional. Provide your input file URL or your partial input file name. The maximum length for an input file is 300 characters.", + "location": "querystring" + }, + "MaxResults": { + "shape": "__integerMin1Max20", + "locationName": "maxResults", + "documentation": "Optional. Number of jobs, up to twenty, that will be returned at one time.", + "location": "querystring" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "Optional. Use this string, provided with the response to a previous request, to request the next batch of jobs.", + "location": "querystring" + }, + "Order": { + "shape": "Order", + "locationName": "order", + "documentation": "Optional. When you request lists of resources, you can specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", + "location": "querystring" + }, + "Queue": { + "shape": "__string", + "locationName": "queue", + "documentation": "Optional. Provide a queue name, or a queue ARN, to return only jobs from that queue.", + "location": "querystring" + }, + "Status": { + "shape": "JobStatus", + "locationName": "status", + "documentation": "Optional. A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR.", + "location": "querystring" + } + } + }, + "SearchJobsResponse": { + "type": "structure", + "members": { + "Jobs": { + "shape": "__listOfJob", + "locationName": "jobs", + "documentation": "List of jobs." + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "Use this string to request the next batch of jobs." + } + } + }, "SimulateReservedQueue": { "type": "string", "documentation": "Enable this setting when you run a test job to estimate how many reserved transcoding slots (RTS) you need. When this is enabled, MediaConvert runs your job from an on-demand queue with similar performance to what you will see with one RTS in a reserved queue. This setting is disabled by default.", @@ -14276,6 +14406,10 @@ "type": "string", "pattern": "^([01][0-9]|2[0-4]):[0-5][0-9]:[0-5][0-9][:;][0-9]{2}$" }, + "__stringPattern010920405090509092090909": { + "type": "string", + "pattern": "^([01][0-9]|2[0-4]):[0-5][0-9]:[0-5][0-9][:;][0-9]{2}(@[0-9]+(\\.[0-9]+)?(:[0-9]+)?)?$" + }, "__stringPattern01D20305D205D": { "type": "string", "pattern": "^((([0-1]\\d)|(2[0-3]))(:[0-5]\\d){2}([:;][0-5]\\d))$" diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index 7f9f2da0d5..f5f1bfee64 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -9,7 +9,10 @@ "uid": "medialive-2017-10-14", "signatureVersion": "v4", "serviceAbbreviation": "MediaLive", - "jsonVersion": "1.1" + "jsonVersion": "1.1", + "auth": [ + "aws.auth#sigv4" + ] }, "operations": { "AcceptInputDeviceTransfer": { @@ -855,7 +858,7 @@ }, { "shape": "NotFoundException", - "documentation": "The multiplex that you are trying to delete doesn\u2019t exist. Check the ID and try again." + "documentation": "The multiplex that you are trying to delete doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -905,7 +908,7 @@ }, { "shape": "NotFoundException", - "documentation": "The program that you are trying to delete doesn\u2019t exist. Check the ID and try again." + "documentation": "The program that you are trying to delete doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -1353,7 +1356,7 @@ }, { "shape": "NotFoundException", - "documentation": "The multiplex that you are trying to describe doesn\u2019t exist. Check the ID and try again." + "documentation": "The multiplex that you are trying to describe doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -1399,7 +1402,7 @@ }, { "shape": "NotFoundException", - "documentation": "MediaLive can't describe the program. The multiplex or the program that you specified doesn\u2019t exist. Check the IDs and try again." + "documentation": "MediaLive can't describe the program. The multiplex or the program that you specified doesn’t exist. Check the IDs and try again." }, { "shape": "GatewayTimeoutException", @@ -1847,7 +1850,7 @@ }, { "shape": "NotFoundException", - "documentation": "MediaLive can't provide the list of programs. The multiplex that you specified doesn\u2019t exist. Check the ID and try again." + "documentation": "MediaLive can't provide the list of programs. The multiplex that you specified doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -2118,7 +2121,7 @@ "documentation": "Request limit exceeded on reboot device calls to the input device service." } ], - "documentation": "Send a reboot command to the specified input device. The device will begin rebooting within a few seconds of sending the command. When the reboot is complete, the device\u2019s connection status will change to connected." + "documentation": "Send a reboot command to the specified input device. The device will begin rebooting within a few seconds of sending the command. When the reboot is complete, the device’s connection status will change to connected." }, "RejectInputDeviceTransfer": { "name": "RejectInputDeviceTransfer", @@ -2357,7 +2360,7 @@ }, { "shape": "NotFoundException", - "documentation": "The multiplex that you are trying to start doesn\u2019t exist. Check the ID and try again." + "documentation": "The multiplex that you are trying to start doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -2507,7 +2510,7 @@ }, { "shape": "NotFoundException", - "documentation": "The multiplex that you are trying to stop doesn\u2019t exist. Check the ID and try again." + "documentation": "The multiplex that you are trying to stop doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -2903,7 +2906,7 @@ }, { "shape": "NotFoundException", - "documentation": "The multiplex that you are trying to update doesn\u2019t exist. Check the ID and try again." + "documentation": "The multiplex that you are trying to update doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -2953,7 +2956,7 @@ }, { "shape": "NotFoundException", - "documentation": "MediaLive can't update the program. The multiplex or the program that you specified doesn\u2019t exist. Check the IDs and try again." + "documentation": "MediaLive can't update the program. The multiplex or the program that you specified doesn’t exist. Check the IDs and try again." }, { "shape": "GatewayTimeoutException", @@ -4681,7 +4684,7 @@ "ProgramSelection": { "shape": "DolbyEProgramSelection", "locationName": "programSelection", - "documentation": "Applies only to Dolby E. Enter the program ID (according to the metadata in the audio) of the Dolby E program to extract from the specified track. One program extracted per audio selector. To select multiple programs, create multiple selectors with the same Track and different Program numbers. \u201cAll channels\u201d means to ignore the program IDs and include all the channels in this selector; useful if metadata is known to be incorrect." + "documentation": "Applies only to Dolby E. Enter the program ID (according to the metadata in the audio) of the Dolby E program to extract from the specified track. One program extracted per audio selector. To select multiple programs, create multiple selectors with the same Track and different Program numbers. “All channels” means to ignore the program IDs and include all the channels in this selector; useful if metadata is known to be incorrect." } }, "documentation": "Audio Dolby EDecode", @@ -6447,6 +6450,11 @@ "Vpc": { "shape": "InputVpcRequest", "locationName": "vpc" + }, + "SrtSettings": { + "shape": "SrtSettingsRequest", + "locationName": "srtSettings", + "documentation": "The settings associated with an SRT input." } }, "documentation": "Placeholder documentation for CreateInput" @@ -6507,6 +6515,11 @@ "Vpc": { "shape": "InputVpcRequest", "locationName": "vpc" + }, + "SrtSettings": { + "shape": "SrtSettingsRequest", + "locationName": "srtSettings", + "documentation": "The settings associated with an SRT input." } }, "documentation": "The name of the input" @@ -7651,6 +7664,11 @@ "Type": { "shape": "InputType", "locationName": "type" + }, + "SrtSettings": { + "shape": "SrtSettings", + "locationName": "srtSettings", + "documentation": "The settings associated with an SRT input." } }, "documentation": "Placeholder documentation for DescribeInputResponse" @@ -8439,7 +8457,7 @@ "Bitrate": { "shape": "__double", "locationName": "bitrate", - "documentation": "Average bitrate in bits/second. Valid bitrates depend on the coding mode.\n// * @affectsRightSizing true" + "documentation": "Average bitrate in bits/second. Valid bitrates depend on the coding mode." }, "CodingMode": { "shape": "Eac3AtmosCodingMode", @@ -8724,7 +8742,7 @@ "FontFamily": { "shape": "__string", "locationName": "fontFamily", - "documentation": "Specifies the font family to include in the font data attached to the EBU-TT captions. Valid only if styleControl is set to include. If you leave this field empty, the font family is set to \"monospaced\". (If styleControl is set to exclude, the font family is always set to \"monospaced\".)\n\nYou specify only the font family. All other style information (color, bold, position and so on) is copied from the input captions. The size is always set to 100% to allow the downstream player to choose the size.\n\n- Enter a list of font families, as a comma-separated list of font names, in order of preference. The name can be a font family (such as \u201cArial\u201d), or a generic font family (such as \u201cserif\u201d), or \u201cdefault\u201d (to let the downstream player choose the font).\n- Leave blank to set the family to \u201cmonospace\u201d." + "documentation": "Specifies the font family to include in the font data attached to the EBU-TT captions. Valid only if styleControl is set to include. If you leave this field empty, the font family is set to \"monospaced\". (If styleControl is set to exclude, the font family is always set to \"monospaced\".)\n\nYou specify only the font family. All other style information (color, bold, position and so on) is copied from the input captions. The size is always set to 100% to allow the downstream player to choose the size.\n\n- Enter a list of font families, as a comma-separated list of font names, in order of preference. The name can be a font family (such as “Arial”), or a generic font family (such as “serif”), or “default” (to let the downstream player choose the font).\n- Leave blank to set the family to “monospace”." }, "StyleControl": { "shape": "EbuTtDDestinationStyleControl", @@ -8900,7 +8918,7 @@ "JamSyncTime": { "shape": "__string", "locationName": "jamSyncTime", - "documentation": "Optional. Enter a time for the jam sync. The default is midnight UTC. When epoch locking is enabled, MediaLive performs a daily jam sync on every output encode to ensure timecodes don\u2019t diverge from the wall clock. The jam sync applies only to encodes with frame rate of 29.97 or 59.94 FPS. To override, enter a time in HH:MM:SS in UTC. Always set the MM:SS portion to 00:00." + "documentation": "Optional. Enter a time for the jam sync. The default is midnight UTC. When epoch locking is enabled, MediaLive performs a daily jam sync on every output encode to ensure timecodes don’t diverge from the wall clock. The jam sync applies only to encodes with frame rate of 29.97 or 59.94 FPS. To override, enter a time in HH:MM:SS in UTC. Always set the MM:SS portion to 00:00." } }, "documentation": "Epoch Locking Settings" @@ -9558,7 +9576,7 @@ "FilterSettings": { "shape": "H264FilterSettings", "locationName": "filterSettings", - "documentation": "Optional filters that you can apply to an encode." + "documentation": "Optional. Both filters reduce bandwidth by removing imperceptible details. You can enable one of the filters. We\nrecommend that you try both filters and observe the results to decide which one to use.\n\nThe Temporal Filter reduces bandwidth by removing imperceptible details in the content. It combines perceptual\nfiltering and motion compensated temporal filtering (MCTF). It operates independently of the compression level.\n\nThe Bandwidth Reduction filter is a perceptual filter located within the encoding loop. It adapts to the current\ncompression level to filter imperceptible signals. This filter works only when the resolution is 1080p or lower." }, "FixedAfd": { "shape": "FixedAfd", @@ -9952,7 +9970,7 @@ "FilterSettings": { "shape": "H265FilterSettings", "locationName": "filterSettings", - "documentation": "Optional filters that you can apply to an encode." + "documentation": "Optional. Both filters reduce bandwidth by removing imperceptible details. You can enable one of the filters. We\nrecommend that you try both filters and observe the results to decide which one to use.\n\nThe Temporal Filter reduces bandwidth by removing imperceptible details in the content. It combines perceptual\nfiltering and motion compensated temporal filtering (MCTF). It operates independently of the compression level.\n\nThe Bandwidth Reduction filter is a perceptual filter located within the encoding loop. It adapts to the current\ncompression level to filter imperceptible signals. This filter works only when the resolution is 1080p or lower." }, "FixedAfd": { "shape": "FixedAfd", @@ -10971,6 +10989,11 @@ "Type": { "shape": "InputType", "locationName": "type" + }, + "SrtSettings": { + "shape": "SrtSettings", + "locationName": "srtSettings", + "documentation": "The settings associated with an SRT input." } }, "documentation": "Placeholder documentation for Input" @@ -12079,7 +12102,8 @@ "MEDIACONNECT", "INPUT_DEVICE", "AWS_CDI", - "TS_FILE" + "TS_FILE", + "SRT_CALLER" ] }, "InputVpcRequest": { @@ -14061,6 +14085,22 @@ "VideoPid": { "shape": "__integer", "locationName": "videoPid" + }, + "AribCaptionsPid": { + "shape": "__integer", + "locationName": "aribCaptionsPid" + }, + "DvbTeletextPids": { + "shape": "__listOf__integer", + "locationName": "dvbTeletextPids" + }, + "EcmPid": { + "shape": "__integer", + "locationName": "ecmPid" + }, + "Smpte2038Pid": { + "shape": "__integer", + "locationName": "smpte2038Pid" } }, "documentation": "Packet identifiers map for a given Multiplex program." @@ -17426,6 +17466,11 @@ "shape": "__listOfInputSourceRequest", "locationName": "sources", "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty." + }, + "SrtSettings": { + "shape": "SrtSettingsRequest", + "locationName": "srtSettings", + "documentation": "The settings associated with an SRT input." } }, "documentation": "Placeholder documentation for UpdateInput" @@ -17620,6 +17665,11 @@ "shape": "__listOfInputSourceRequest", "locationName": "sources", "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty." + }, + "SrtSettings": { + "shape": "SrtSettingsRequest", + "locationName": "srtSettings", + "documentation": "The settings associated with an SRT input." } }, "documentation": "A request to update an input.", @@ -17704,6 +17754,10 @@ "shape": "__string", "locationName": "name", "documentation": "Name of the multiplex." + }, + "PacketIdentifiersMapping": { + "shape": "MultiplexPacketIdentifiersMapping", + "locationName": "packetIdentifiersMapping" } }, "documentation": "Placeholder documentation for UpdateMultiplex" @@ -17786,6 +17840,10 @@ "shape": "__string", "locationName": "name", "documentation": "Name of the multiplex." + }, + "PacketIdentifiersMapping": { + "shape": "MultiplexPacketIdentifiersMapping", + "locationName": "packetIdentifiersMapping" } }, "documentation": "A request to update a multiplex.", @@ -23700,7 +23758,152 @@ "ALL_OUTPUT_GROUPS", "SCTE35_ENABLED_OUTPUT_GROUPS" ] + }, + "Algorithm": { + "type": "string", + "enum": [ + "AES128", + "AES192", + "AES256" + ], + "documentation": "Placeholder documentation for Algorithm" + }, + "SrtCallerDecryption": { + "type": "structure", + "members": { + "Algorithm": { + "shape": "Algorithm", + "locationName": "algorithm", + "documentation": "The algorithm used to encrypt content." + }, + "PassphraseSecretArn": { + "shape": "__string", + "locationName": "passphraseSecretArn", + "documentation": "The ARN for the secret in Secrets Manager. Someone in your organization must create a secret and provide you with its ARN. The secret holds the passphrase that MediaLive uses to decrypt the source content." + } + }, + "documentation": "The decryption settings for the SRT caller source. Present only if the source has decryption enabled." + }, + "SrtCallerDecryptionRequest": { + "type": "structure", + "members": { + "Algorithm": { + "shape": "Algorithm", + "locationName": "algorithm", + "documentation": "The algorithm used to encrypt content." + }, + "PassphraseSecretArn": { + "shape": "__string", + "locationName": "passphraseSecretArn", + "documentation": "The ARN for the secret in Secrets Manager. Someone in your organization must create a secret and provide you with its ARN. This secret holds the passphrase that MediaLive will use to decrypt the source content." + } + }, + "documentation": "Complete these parameters only if the content is encrypted." + }, + "SrtCallerSource": { + "type": "structure", + "members": { + "Decryption": { + "shape": "SrtCallerDecryption", + "locationName": "decryption" + }, + "MinimumLatency": { + "shape": "__integer", + "locationName": "minimumLatency", + "documentation": "The preferred latency (in milliseconds) for implementing packet loss and recovery. Packet recovery is a key feature of SRT." + }, + "SrtListenerAddress": { + "shape": "__string", + "locationName": "srtListenerAddress", + "documentation": "The IP address at the upstream system (the listener) that MediaLive (the caller) connects to." + }, + "SrtListenerPort": { + "shape": "__string", + "locationName": "srtListenerPort", + "documentation": "The port at the upstream system (the listener) that MediaLive (the caller) connects to." + }, + "StreamId": { + "shape": "__string", + "locationName": "streamId", + "documentation": "The stream ID, if the upstream system uses this identifier." + } + }, + "documentation": "The configuration for a source that uses SRT as the connection protocol. In terms of establishing the connection, MediaLive is always caller and the upstream system is always the listener. In terms of transmission of the source content, MediaLive is always the receiver and the upstream system is always the sender." + }, + "SrtCallerSourceRequest": { + "type": "structure", + "members": { + "Decryption": { + "shape": "SrtCallerDecryptionRequest", + "locationName": "decryption" + }, + "MinimumLatency": { + "shape": "__integer", + "locationName": "minimumLatency", + "documentation": "The preferred latency (in milliseconds) for implementing packet loss and recovery. Packet recovery is a key feature of SRT. Obtain this value from the operator at the upstream system." + }, + "SrtListenerAddress": { + "shape": "__string", + "locationName": "srtListenerAddress", + "documentation": "The IP address at the upstream system (the listener) that MediaLive (the caller) will connect to." + }, + "SrtListenerPort": { + "shape": "__string", + "locationName": "srtListenerPort", + "documentation": "The port at the upstream system (the listener) that MediaLive (the caller) will connect to." + }, + "StreamId": { + "shape": "__string", + "locationName": "streamId", + "documentation": "This value is required if the upstream system uses this identifier because without it, the SRT handshake between MediaLive (the caller) and the upstream system (the listener) might fail." + } + }, + "documentation": "Configures the connection for a source that uses SRT as the connection protocol. In terms of establishing the connection, MediaLive is always the caller and the upstream system is always the listener. In terms of transmission of the source content, MediaLive is always the receiver and the upstream system is always the sender." + }, + "SrtSettings": { + "type": "structure", + "members": { + "SrtCallerSources": { + "shape": "__listOfSrtCallerSource", + "locationName": "srtCallerSources" + } + }, + "documentation": "The configured sources for this SRT input." + }, + "SrtSettingsRequest": { + "type": "structure", + "members": { + "SrtCallerSources": { + "shape": "__listOfSrtCallerSourceRequest", + "locationName": "srtCallerSources" + } + }, + "documentation": "Configures the sources for this SRT input. For a single-pipeline input, include one srtCallerSource in the array. For a standard-pipeline input, include two srtCallerSource." + }, + "__listOfSrtCallerSource": { + "type": "list", + "member": { + "shape": "SrtCallerSource" + }, + "documentation": "Placeholder documentation for __listOfSrtCallerSource" + }, + "__listOfSrtCallerSourceRequest": { + "type": "list", + "member": { + "shape": "SrtCallerSourceRequest" + }, + "documentation": "Placeholder documentation for __listOfSrtCallerSourceRequest" + }, + "MultiplexPacketIdentifiersMapping": { + "type": "map", + "key": { + "shape": "__string" + }, + "value": { + "shape": "MultiplexProgramPacketIdentifiersMap" + }, + "documentation": "Placeholder documentation for MultiplexPacketIdentifiersMapping" } }, "documentation": "API for AWS Elemental MediaLive" -} +} \ No newline at end of file diff --git a/botocore/data/mediapackagev2/2022-12-25/service-2.json b/botocore/data/mediapackagev2/2022-12-25/service-2.json index 9b5efe8615..09aab1a184 100644 --- a/botocore/data/mediapackagev2/2022-12-25/service-2.json +++ b/botocore/data/mediapackagev2/2022-12-25/service-2.json @@ -3,8 +3,8 @@ "metadata":{ "apiVersion":"2022-12-25", "endpointPrefix":"mediapackagev2", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"mediapackagev2", "serviceFullName":"AWS Elemental MediaPackage v2", "serviceId":"MediaPackageV2", @@ -557,6 +557,10 @@ "Description":{ "shape":"ResourceDescription", "documentation":"

Any descriptive information that you want to add to the channel for future identification purposes.

" + }, + "InputType":{ + "shape":"InputType", + "documentation":"

The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.

The allowed values are:

  • HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments).

  • CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).

" } }, "documentation":"

The configuration of the channel.

" @@ -694,6 +698,10 @@ "location":"header", "locationName":"x-amzn-client-token" }, + "InputType":{ + "shape":"InputType", + "documentation":"

The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.

The allowed values are:

  • HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments).

  • CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).

" + }, "Description":{ "shape":"ResourceDescription", "documentation":"

Enter any descriptive text that helps you to identify the channel.

" @@ -740,6 +748,10 @@ "documentation":"

The description for your channel.

" }, "IngestEndpoints":{"shape":"IngestEndpointList"}, + "InputType":{ + "shape":"InputType", + "documentation":"

The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.

The allowed values are:

  • HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments).

  • CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).

" + }, "ETag":{ "shape":"EntityTag", "documentation":"

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

" @@ -965,6 +977,10 @@ "shape":"CreateDashManifests", "documentation":"

A DASH manifest configuration.

" }, + "ForceEndpointErrorConfiguration":{ + "shape":"ForceEndpointErrorConfiguration", + "documentation":"

The failover settings for the endpoint.

" + }, "Tags":{ "shape":"TagMap", "documentation":"

A comma-separated list of tag key:value pairs that you define. For example:

\"Key1\": \"Value1\",

\"Key2\": \"Value2\"

" @@ -1042,6 +1058,10 @@ "shape":"GetDashManifests", "documentation":"

A DASH manifest configuration.

" }, + "ForceEndpointErrorConfiguration":{ + "shape":"ForceEndpointErrorConfiguration", + "documentation":"

The failover settings for the endpoint.

" + }, "ETag":{ "shape":"EntityTag", "documentation":"

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

" @@ -1248,7 +1268,8 @@ "CLEAR_KEY_AES_128", "FAIRPLAY", "PLAYREADY", - "WIDEVINE" + "WIDEVINE", + "IRDETO" ] }, "Encryption":{ @@ -1321,6 +1342,19 @@ }, "documentation":"

The encryption type.

" }, + "EndpointErrorCondition":{ + "type":"string", + "enum":[ + "STALE_MANIFEST", + "INCOMPLETE_MANIFEST", + "MISSING_DRM_KEY", + "SLATE_INPUT" + ] + }, + "EndpointErrorConditions":{ + "type":"list", + "member":{"shape":"EndpointErrorCondition"} + }, "EntityTag":{ "type":"string", "max":256, @@ -1360,6 +1394,16 @@ "max":1209600, "min":0 }, + "ForceEndpointErrorConfiguration":{ + "type":"structure", + "members":{ + "EndpointErrorConditions":{ + "shape":"EndpointErrorConditions", + "documentation":"

The failover conditions for the endpoint. The options are:

  • STALE_MANIFEST - The manifest stalled and there are no new segments or parts.

  • INCOMPLETE_MANIFEST - There is a gap in the manifest.

  • MISSING_DRM_KEY - Key rotation is enabled but we're unable to fetch the key for the current key period.

  • SLATE_INPUT - The segments which contain slate content are considered to be missing content.

" + } + }, + "documentation":"

The failover settings for the endpoint.

" + }, "GetChannelGroupRequest":{ "type":"structure", "required":["ChannelGroupName"], @@ -1516,6 +1560,10 @@ "documentation":"

The description for your channel.

" }, "IngestEndpoints":{"shape":"IngestEndpointList"}, + "InputType":{ + "shape":"InputType", + "documentation":"

The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.

The allowed values are:

  • HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments).

  • CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).

" + }, "ETag":{ "shape":"EntityTag", "documentation":"

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

" @@ -1798,6 +1846,14 @@ "shape":"GetLowLatencyHlsManifests", "documentation":"

A low-latency HLS manifest configuration.

" }, + "DashManifests":{ + "shape":"GetDashManifests", + "documentation":"

A DASH manifest configuration.

" + }, + "ForceEndpointErrorConfiguration":{ + "shape":"ForceEndpointErrorConfiguration", + "documentation":"

The failover settings for the endpoint.

" + }, "ETag":{ "shape":"EntityTag", "documentation":"

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

" @@ -1805,10 +1861,6 @@ "Tags":{ "shape":"TagMap", "documentation":"

The comma-separated list of tag key:value pairs assigned to the origin endpoint.

" - }, - "DashManifests":{ - "shape":"GetDashManifests", - "documentation":"

A DASH manifest configuration.

" } } }, @@ -1837,6 +1889,13 @@ "member":{"shape":"IngestEndpoint"}, "documentation":"

The list of ingest endpoints.

" }, + "InputType":{ + "type":"string", + "enum":[ + "HLS", + "CMAF" + ] + }, "Integer":{ "type":"integer", "box":true @@ -2116,6 +2175,10 @@ "DashManifests":{ "shape":"ListDashManifests", "documentation":"

A DASH manifest configuration.

" + }, + "ForceEndpointErrorConfiguration":{ + "shape":"ForceEndpointErrorConfiguration", + "documentation":"

The failover settings for the endpoint.

" } }, "documentation":"

The configuration of the origin endpoint.

" @@ -2625,6 +2688,10 @@ "documentation":"

The description for your channel.

" }, "IngestEndpoints":{"shape":"IngestEndpointList"}, + "InputType":{ + "shape":"InputType", + "documentation":"

The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.

The allowed values are:

  • HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments).

  • CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).

" + }, "ETag":{ "shape":"EntityTag", "documentation":"

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

" @@ -2691,6 +2758,10 @@ "shape":"CreateDashManifests", "documentation":"

A DASH manifest configuration.

" }, + "ForceEndpointErrorConfiguration":{ + "shape":"ForceEndpointErrorConfiguration", + "documentation":"

The failover settings for the endpoint.

" + }, "ETag":{ "shape":"EntityTag", "documentation":"

The expected current Entity Tag (ETag) for the resource. If the specified ETag does not match the resource's current entity tag, the update request will be rejected.

", @@ -2766,6 +2837,10 @@ "shape":"GetLowLatencyHlsManifests", "documentation":"

A low-latency HLS manifest configuration.

" }, + "ForceEndpointErrorConfiguration":{ + "shape":"ForceEndpointErrorConfiguration", + "documentation":"

The failover settings for the endpoint.

" + }, "ETag":{ "shape":"EntityTag", "documentation":"

The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource.

" @@ -2840,7 +2915,9 @@ "TIMING_SOURCE_MISSING", "UPDATE_PERIOD_SMALLER_THAN_SEGMENT_DURATION", "PERIOD_TRIGGERS_NONE_SPECIFIED_WITH_ADDITIONAL_VALUES", - "DRM_SIGNALING_MISMATCH_SEGMENT_ENCRYPTION_STATUS" + "DRM_SIGNALING_MISMATCH_SEGMENT_ENCRYPTION_STATUS", + "ONLY_CMAF_INPUT_TYPE_ALLOW_FORCE_ENDPOINT_ERROR_CONFIGURATION", + "SOURCE_DISRUPTIONS_ENABLED_INCORRECTLY" ] } }, diff --git a/botocore/data/medical-imaging/2023-07-19/service-2.json b/botocore/data/medical-imaging/2023-07-19/service-2.json index 127e1cafd7..4f4168da52 100644 --- a/botocore/data/medical-imaging/2023-07-19/service-2.json +++ b/botocore/data/medical-imaging/2023-07-19/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2023-07-19", + "auth":["aws.auth#sigv4"], "endpointPrefix":"medical-imaging", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Health Imaging", "serviceId":"Medical Imaging", "signatureVersion":"v4", @@ -282,7 +283,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Search image sets based on defined input attributes.

SearchImageSets accepts a single search query parameter and returns a paginated response of all image sets that have the matching criteria. All range queries must be input as (lowerBound, upperBound).

SearchImageSets uses the updatedAt field for sorting in decreasing order from latest to oldest.

", + "documentation":"

Search image sets based on defined input attributes.

SearchImageSets accepts a single search query parameter and returns a paginated response of all image sets that have the matching criteria. All date range queries must be input as (lowerBound, upperBound).

By default, SearchImageSets uses the updatedAt field for sorting in descending order from newest to oldest.

", "endpoint":{"hostPrefix":"runtime-"} }, "StartDICOMImportJob":{ @@ -384,6 +385,16 @@ "type":"string", "pattern":"arn:aws((-us-gov)|(-iso)|(-iso-b)|(-cn))?:medical-imaging:[a-z0-9-]+:[0-9]{12}:datastore/[0-9a-z]{32}(/imageset/[0-9a-z]{32})?" }, + "AwsAccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"\\d+" + }, + "Boolean":{ + "type":"boolean", + "box":true + }, "ClientToken":{ "type":"string", "max":64, @@ -403,6 +414,12 @@ }, "exception":true }, + "CopiableAttributes":{ + "type":"string", + "max":260000, + "min":1, + "sensitive":true + }, "CopyDestinationImageSet":{ "type":"structure", "required":[ @@ -497,6 +514,12 @@ "copyImageSetInformation":{ "shape":"CopyImageSetInformation", "documentation":"

Copy image set information.

" + }, + "force":{ + "shape":"Boolean", + "documentation":"

Setting this flag will force the CopyImageSet operation, even if Patient, Study, or Series level metadata are mismatched across the sourceImageSet and destinationImageSet.

", + "location":"querystring", + "locationName":"force" } }, "payload":"copyImageSetInformation" @@ -530,6 +553,10 @@ "latestVersionId":{ "shape":"ImageSetExternalVersionId", "documentation":"

The latest version identifier for the source image set.

" + }, + "DICOMCopies":{ + "shape":"MetadataCopies", + "documentation":"

Contains MetadataCopies structure and wraps information related to specific copy use cases. For example, when copying subsets.

" } }, "documentation":"

Copy source image set information.

" @@ -614,7 +641,7 @@ }, "DICOMAccessionNumber":{ "type":"string", - "max":16, + "max":256, "min":0, "sensitive":true }, @@ -734,7 +761,7 @@ }, "DICOMNumberOfStudyRelatedSeries":{ "type":"integer", - "max":10000, + "max":1000000, "min":0 }, "DICOMPatientBirthDate":{ @@ -745,7 +772,7 @@ }, "DICOMPatientId":{ "type":"string", - "max":64, + "max":256, "min":0, "sensitive":true }, @@ -769,9 +796,9 @@ }, "DICOMSeriesInstanceUID":{ "type":"string", - "max":64, + "max":256, "min":0, - "pattern":"(?:[1-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*", + "pattern":"(?:[0-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*", "sensitive":true }, "DICOMSeriesModality":{ @@ -822,9 +849,9 @@ }, "DICOMStudyInstanceUID":{ "type":"string", - "max":64, + "max":256, "min":0, - "pattern":"(?:[1-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*", + "pattern":"(?:[0-9][0-9]*|0)(\\.(?:[1-9][0-9]*|0))*", "sensitive":true }, "DICOMStudyTime":{ @@ -1315,6 +1342,10 @@ "imageSetArn":{ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) assigned to the image set.

" + }, + "overrides":{ + "shape":"Overrides", + "documentation":"

This object contains the details of any overrides used while creating a specific image set version. If an image set was copied or updated using the force flag, this object will contain the forced flag.

" } } }, @@ -1384,6 +1415,10 @@ "message":{ "shape":"Message", "documentation":"

The error message thrown if an image set action fails.

" + }, + "overrides":{ + "shape":"Overrides", + "documentation":"

Contains details on overrides used when creating the returned version of an image set. For example, if forced exists, the forced flag was used when creating the image set.

" } }, "documentation":"

The image set properties.

" @@ -1662,12 +1697,27 @@ "min":1, "pattern":"[\\w -:]+" }, + "MetadataCopies":{ + "type":"structure", + "required":["copiableAttributes"], + "members":{ + "copiableAttributes":{ + "shape":"CopiableAttributes", + "documentation":"

The JSON string used to specify a subset of SOP Instances to copy from source to destination image set.

" + } + }, + "documentation":"

Contains copiable Attributes structure and wraps information related to specific copy use cases. For example, when copying subsets.

" + }, "MetadataUpdates":{ "type":"structure", "members":{ "DICOMUpdates":{ "shape":"DICOMUpdates", "documentation":"

The object containing removableAttributes and updatableAttributes.

" + }, + "revertToVersionId":{ + "shape":"ImageSetExternalVersionId", + "documentation":"

Specifies the previous image set version ID to revert the current image set back to.

You must provide either revertToVersionId or DICOMUpdates in your request. A ValidationException error is thrown if both parameters are provided at the same time.

" } }, "documentation":"

Contains DICOMUpdates.

", @@ -1686,6 +1736,16 @@ "BETWEEN" ] }, + "Overrides":{ + "type":"structure", + "members":{ + "forced":{ + "shape":"Boolean", + "documentation":"

Setting this flag will force the CopyImageSet and UpdateImageSetMetadata operations, even if Patient, Study, or Series level metadata are mismatched.

" + } + }, + "documentation":"

Specifies the overrides used in image set modification calls to CopyImageSet and UpdateImageSetMetadata.

" + }, "PayloadBlob":{ "type":"blob", "streaming":true @@ -1934,6 +1994,10 @@ "outputS3Uri":{ "shape":"S3Uri", "documentation":"

The output prefix of the S3 bucket to upload the results of the DICOM import job.

" + }, + "inputOwnerAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The account ID of the source S3 bucket owner.

" } } }, @@ -2077,6 +2141,12 @@ "location":"querystring", "locationName":"latestVersion" }, + "force":{ + "shape":"Boolean", + "documentation":"

Setting this flag will force the UpdateImageSetMetadata operation for the following attributes:

  • Tag.StudyInstanceUID, Tag.SeriesInstanceUID, Tag.SOPInstanceUID, and Tag.StudyID

  • Adding, removing, or updating private tags for an individual SOP Instance

", + "location":"querystring", + "locationName":"force" + }, "updateImageSetMetadataUpdates":{ "shape":"MetadataUpdates", "documentation":"

Update image set metadata updates.

" diff --git a/botocore/data/memorydb/2021-01-01/endpoint-rule-set-1.json b/botocore/data/memorydb/2021-01-01/endpoint-rule-set-1.json index 5f23b68af4..9bf98ba089 100644 --- a/botocore/data/memorydb/2021-01-01/endpoint-rule-set-1.json +++ b/botocore/data/memorydb/2021-01-01/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -328,9 +326,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/memorydb/2021-01-01/service-2.json b/botocore/data/memorydb/2021-01-01/service-2.json index 7dfa0a3786..ec1558b0ec 100644 --- a/botocore/data/memorydb/2021-01-01/service-2.json +++ b/botocore/data/memorydb/2021-01-01/service-2.json @@ -5,13 +5,15 @@ "endpointPrefix":"memory-db", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Amazon MemoryDB", "serviceFullName":"Amazon MemoryDB", "serviceId":"MemoryDB", "signatureVersion":"v4", "signingName":"memorydb", "targetPrefix":"AmazonMemoryDB", - "uid":"memorydb-2021-01-01" + "uid":"memorydb-2021-01-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchUpdateCluster":{ @@ -202,7 +204,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Deletes a cluster. It also deletes all associated nodes and node endpoints

" + "documentation":"

Deletes a cluster. It also deletes all associated nodes and node endpoints

CreateSnapshot permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied exception.

" }, "DeleteParameterGroup":{ "name":"DeleteParameterGroup", @@ -311,7 +313,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Returns a list of the available Redis engine versions.

" + "documentation":"

Returns a list of the available Redis OSS engine versions.

" }, "DescribeEvents":{ "name":"DescribeEvents", @@ -918,11 +920,11 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The Redis engine version used by the cluster

" + "documentation":"

The Redis OSS engine version used by the cluster

" }, "EnginePatchVersion":{ "shape":"String", - "documentation":"

The Redis engine patch version used by the cluster

" + "documentation":"

The Redis OSS engine patch version used by the cluster

" }, "ParameterGroupName":{ "shape":"String", @@ -1011,7 +1013,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The Redis engine version used by the cluster

" + "documentation":"

The Redis OSS engine version used by the cluster

" }, "MaintenanceWindow":{ "shape":"String", @@ -1248,7 +1250,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the Redis engine to be used for the cluster.

" + "documentation":"

The version number of the Redis OSS engine to be used for the cluster.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -1607,7 +1609,7 @@ "members":{ "EngineVersion":{ "shape":"String", - "documentation":"

The Redis engine version

" + "documentation":"

The Redis OSS engine version

" }, "ParameterGroupFamily":{ "shape":"String", @@ -2017,7 +2019,7 @@ "documentation":"

Specifies the name of the parameter group family to which the engine default parameters apply.

" } }, - "documentation":"

Provides details of the Redis engine version

" + "documentation":"

Provides details of the Redis OSS engine version

" }, "EngineVersionInfoList":{ "type":"list", @@ -3422,5 +3424,5 @@ "exception":true } }, - "documentation":"

MemoryDB for Redis is a fully managed, Redis-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis, a popular open source data store, enabling you to leverage Redis’ flexible and friendly data structures, APIs, and commands.

" + "documentation":"

MemoryDB is a fully managed, Redis OSS-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis OSS, a popular open source data store, enabling you to leverage Redis OSS’ flexible and friendly data structures, APIs, and commands.

" } diff --git a/botocore/data/mobile/2017-07-01/endpoint-rule-set-1.json b/botocore/data/mobile/2017-07-01/endpoint-rule-set-1.json deleted file mode 100644 index 0941f3c239..0000000000 --- a/botocore/data/mobile/2017-07-01/endpoint-rule-set-1.json +++ /dev/null @@ -1,314 +0,0 @@ -{ - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mobile-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mobile-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mobile.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://mobile.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] -} \ No newline at end of file diff --git a/botocore/data/mobile/2017-07-01/examples-1.json b/botocore/data/mobile/2017-07-01/examples-1.json deleted file mode 100644 index 0ea7e3b0bb..0000000000 --- a/botocore/data/mobile/2017-07-01/examples-1.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "version": "1.0", - "examples": { - } -} diff --git a/botocore/data/mobile/2017-07-01/service-2.json b/botocore/data/mobile/2017-07-01/service-2.json deleted file mode 100644 index b588798ebc..0000000000 --- a/botocore/data/mobile/2017-07-01/service-2.json +++ /dev/null @@ -1,732 +0,0 @@ -{ - "version":"2.0", - "metadata":{ - "apiVersion":"2017-07-01", - "endpointPrefix":"mobile", - "jsonVersion":"1.1", - "protocol":"rest-json", - "serviceFullName":"AWS Mobile", - "serviceId":"Mobile", - "signatureVersion":"v4", - "signingName":"AWSMobileHubService", - "uid":"mobile-2017-07-01" - }, - "operations":{ - "CreateProject":{ - "name":"CreateProject", - "http":{ - "method":"POST", - "requestUri":"/projects" - }, - "input":{"shape":"CreateProjectRequest"}, - "output":{"shape":"CreateProjectResult"}, - "errors":[ - {"shape":"InternalFailureException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"UnauthorizedException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"BadRequestException"}, - {"shape":"NotFoundException"}, - {"shape":"LimitExceededException"} - ], - "documentation":"

Creates an AWS Mobile Hub project.

" - }, - "DeleteProject":{ - "name":"DeleteProject", - "http":{ - "method":"DELETE", - "requestUri":"/projects/{projectId}" - }, - "input":{"shape":"DeleteProjectRequest"}, - "output":{"shape":"DeleteProjectResult"}, - "errors":[ - {"shape":"InternalFailureException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"UnauthorizedException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"NotFoundException"} - ], - "documentation":"

Delets a project in AWS Mobile Hub.

" - }, - "DescribeBundle":{ - "name":"DescribeBundle", - "http":{ - "method":"GET", - "requestUri":"/bundles/{bundleId}" - }, - "input":{"shape":"DescribeBundleRequest"}, - "output":{"shape":"DescribeBundleResult"}, - "errors":[ - {"shape":"InternalFailureException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"UnauthorizedException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"BadRequestException"}, - {"shape":"NotFoundException"} - ], - "documentation":"

Get the bundle details for the requested bundle id.

" - }, - "DescribeProject":{ - "name":"DescribeProject", - "http":{ - "method":"GET", - "requestUri":"/project" - }, - "input":{"shape":"DescribeProjectRequest"}, - "output":{"shape":"DescribeProjectResult"}, - "errors":[ - {"shape":"InternalFailureException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"UnauthorizedException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"BadRequestException"}, - {"shape":"NotFoundException"} - ], - "documentation":"

Gets details about a project in AWS Mobile Hub.

" - }, - "ExportBundle":{ - "name":"ExportBundle", - "http":{ - "method":"POST", - "requestUri":"/bundles/{bundleId}" - }, - "input":{"shape":"ExportBundleRequest"}, - "output":{"shape":"ExportBundleResult"}, - "errors":[ - {"shape":"InternalFailureException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"UnauthorizedException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"BadRequestException"}, - {"shape":"NotFoundException"} - ], - "documentation":"

Generates customized software development kit (SDK) and or tool packages used to integrate mobile web or mobile app clients with backend AWS resources.

" - }, - "ExportProject":{ - "name":"ExportProject", - "http":{ - "method":"POST", - "requestUri":"/exports/{projectId}" - }, - "input":{"shape":"ExportProjectRequest"}, - "output":{"shape":"ExportProjectResult"}, - "errors":[ - {"shape":"InternalFailureException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"UnauthorizedException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"BadRequestException"}, - {"shape":"NotFoundException"} - ], - "documentation":"

Exports project configuration to a snapshot which can be downloaded and shared. Note that mobile app push credentials are encrypted in exported projects, so they can only be shared successfully within the same AWS account.

" - }, - "ListBundles":{ - "name":"ListBundles", - "http":{ - "method":"GET", - "requestUri":"/bundles" - }, - "input":{"shape":"ListBundlesRequest"}, - "output":{"shape":"ListBundlesResult"}, - "errors":[ - {"shape":"InternalFailureException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"UnauthorizedException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"BadRequestException"} - ], - "documentation":"

List all available bundles.

" - }, - "ListProjects":{ - "name":"ListProjects", - "http":{ - "method":"GET", - "requestUri":"/projects" - }, - "input":{"shape":"ListProjectsRequest"}, - "output":{"shape":"ListProjectsResult"}, - "errors":[ - {"shape":"InternalFailureException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"UnauthorizedException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"BadRequestException"} - ], - "documentation":"

Lists projects in AWS Mobile Hub.

" - }, - "UpdateProject":{ - "name":"UpdateProject", - "http":{ - "method":"POST", - "requestUri":"/update" - }, - "input":{"shape":"UpdateProjectRequest"}, - "output":{"shape":"UpdateProjectResult"}, - "errors":[ - {"shape":"InternalFailureException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"UnauthorizedException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"BadRequestException"}, - {"shape":"NotFoundException"}, - {"shape":"AccountActionRequiredException"}, - {"shape":"LimitExceededException"} - ], - "documentation":"

Update an existing project.

" - } - }, - "shapes":{ - "AccountActionRequiredException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

Account Action is required in order to continue the request.

", - "error":{"httpStatusCode":403}, - "exception":true - }, - "AttributeKey":{ - "type":"string", - "documentation":"

Key part of key-value attribute pairs.

" - }, - "AttributeValue":{ - "type":"string", - "documentation":"

Value part of key-value attribute pairs.

" - }, - "Attributes":{ - "type":"map", - "key":{"shape":"AttributeKey"}, - "value":{"shape":"AttributeValue"}, - "documentation":"

Key-value attribute pairs.

" - }, - "BadRequestException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

The request cannot be processed because some parameter is not valid or the project state prevents the operation from being performed.

", - "error":{"httpStatusCode":400}, - "exception":true - }, - "Boolean":{"type":"boolean"}, - "BundleDescription":{ - "type":"string", - "documentation":"

Description of the download bundle.

" - }, - "BundleDetails":{ - "type":"structure", - "members":{ - "bundleId":{"shape":"BundleId"}, - "title":{"shape":"BundleTitle"}, - "version":{"shape":"BundleVersion"}, - "description":{"shape":"BundleDescription"}, - "iconUrl":{"shape":"IconUrl"}, - "availablePlatforms":{"shape":"Platforms"} - }, - "documentation":"

The details of the bundle.

" - }, - "BundleId":{ - "type":"string", - "documentation":"

Unique bundle identifier.

" - }, - "BundleList":{ - "type":"list", - "member":{"shape":"BundleDetails"}, - "documentation":"

A list of bundles.

" - }, - "BundleTitle":{ - "type":"string", - "documentation":"

Title of the download bundle.

" - }, - "BundleVersion":{ - "type":"string", - "documentation":"

Version of the download bundle.

" - }, - "ConsoleUrl":{"type":"string"}, - "Contents":{ - "type":"blob", - "documentation":"

Binary file data.

" - }, - "CreateProjectRequest":{ - "type":"structure", - "members":{ - "name":{ - "shape":"ProjectName", - "documentation":"

Name of the project.

", - "location":"querystring", - "locationName":"name" - }, - "region":{ - "shape":"ProjectRegion", - "documentation":"

Default region where project resources should be created.

", - "location":"querystring", - "locationName":"region" - }, - "contents":{ - "shape":"Contents", - "documentation":"

ZIP or YAML file which contains configuration settings to be used when creating the project. This may be the contents of the file downloaded from the URL provided in an export project operation.

" - }, - "snapshotId":{ - "shape":"SnapshotId", - "documentation":"

Unique identifier for an exported snapshot of project configuration. This snapshot identifier is included in the share URL when a project is exported.

", - "location":"querystring", - "locationName":"snapshotId" - } - }, - "documentation":"

Request structure used to request a project be created.

", - "payload":"contents" - }, - "CreateProjectResult":{ - "type":"structure", - "members":{ - "details":{ - "shape":"ProjectDetails", - "documentation":"

Detailed information about the created AWS Mobile Hub project.

" - } - }, - "documentation":"

Result structure used in response to a request to create a project.

" - }, - "Date":{"type":"timestamp"}, - "DeleteProjectRequest":{ - "type":"structure", - "required":["projectId"], - "members":{ - "projectId":{ - "shape":"ProjectId", - "documentation":"

Unique project identifier.

", - "location":"uri", - "locationName":"projectId" - } - }, - "documentation":"

Request structure used to request a project be deleted.

" - }, - "DeleteProjectResult":{ - "type":"structure", - "members":{ - "deletedResources":{ - "shape":"Resources", - "documentation":"

Resources which were deleted.

" - }, - "orphanedResources":{ - "shape":"Resources", - "documentation":"

Resources which were not deleted, due to a risk of losing potentially important data or files.

" - } - }, - "documentation":"

Result structure used in response to request to delete a project.

" - }, - "DescribeBundleRequest":{ - "type":"structure", - "required":["bundleId"], - "members":{ - "bundleId":{ - "shape":"BundleId", - "documentation":"

Unique bundle identifier.

", - "location":"uri", - "locationName":"bundleId" - } - }, - "documentation":"

Request structure to request the details of a specific bundle.

" - }, - "DescribeBundleResult":{ - "type":"structure", - "members":{ - "details":{ - "shape":"BundleDetails", - "documentation":"

The details of the bundle.

" - } - }, - "documentation":"

Result structure contains the details of the bundle.

" - }, - "DescribeProjectRequest":{ - "type":"structure", - "required":["projectId"], - "members":{ - "projectId":{ - "shape":"ProjectId", - "documentation":"

Unique project identifier.

", - "location":"querystring", - "locationName":"projectId" - }, - "syncFromResources":{ - "shape":"Boolean", - "documentation":"

If set to true, causes AWS Mobile Hub to synchronize information from other services, e.g., update state of AWS CloudFormation stacks in the AWS Mobile Hub project.

", - "location":"querystring", - "locationName":"syncFromResources" - } - }, - "documentation":"

Request structure used to request details about a project.

" - }, - "DescribeProjectResult":{ - "type":"structure", - "members":{ - "details":{"shape":"ProjectDetails"} - }, - "documentation":"

Result structure used for requests of project details.

" - }, - "DownloadUrl":{ - "type":"string", - "documentation":"

The download Url.

" - }, - "ErrorMessage":{ - "type":"string", - "documentation":"

The Exception Error Message.

" - }, - "ExportBundleRequest":{ - "type":"structure", - "required":["bundleId"], - "members":{ - "bundleId":{ - "shape":"BundleId", - "documentation":"

Unique bundle identifier.

", - "location":"uri", - "locationName":"bundleId" - }, - "projectId":{ - "shape":"ProjectId", - "documentation":"

Unique project identifier.

", - "location":"querystring", - "locationName":"projectId" - }, - "platform":{ - "shape":"Platform", - "documentation":"

Developer desktop or target application platform.

", - "location":"querystring", - "locationName":"platform" - } - }, - "documentation":"

Request structure used to request generation of custom SDK and tool packages required to integrate mobile web or app clients with backed AWS resources.

" - }, - "ExportBundleResult":{ - "type":"structure", - "members":{ - "downloadUrl":{ - "shape":"DownloadUrl", - "documentation":"

URL which contains the custom-generated SDK and tool packages used to integrate the client mobile app or web app with the AWS resources created by the AWS Mobile Hub project.

" - } - }, - "documentation":"

Result structure which contains link to download custom-generated SDK and tool packages used to integrate mobile web or app clients with backed AWS resources.

" - }, - "ExportProjectRequest":{ - "type":"structure", - "required":["projectId"], - "members":{ - "projectId":{ - "shape":"ProjectId", - "documentation":"

Unique project identifier.

", - "location":"uri", - "locationName":"projectId" - } - }, - "documentation":"

Request structure used in requests to export project configuration details.

" - }, - "ExportProjectResult":{ - "type":"structure", - "members":{ - "downloadUrl":{ - "shape":"DownloadUrl", - "documentation":"

URL which can be used to download the exported project configuation file(s).

" - }, - "shareUrl":{ - "shape":"ShareUrl", - "documentation":"

URL which can be shared to allow other AWS users to create their own project in AWS Mobile Hub with the same configuration as the specified project. This URL pertains to a snapshot in time of the project configuration that is created when this API is called. If you want to share additional changes to your project configuration, then you will need to create and share a new snapshot by calling this method again.

" - }, - "snapshotId":{ - "shape":"SnapshotId", - "documentation":"

Unique identifier for the exported snapshot of the project configuration. This snapshot identifier is included in the share URL.

" - } - }, - "documentation":"

Result structure used for requests to export project configuration details.

" - }, - "Feature":{ - "type":"string", - "documentation":"

Identifies which feature in AWS Mobile Hub is associated with this AWS resource.

" - }, - "IconUrl":{ - "type":"string", - "documentation":"

Icon for the download bundle.

" - }, - "InternalFailureException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

The service has encountered an unexpected error condition which prevents it from servicing the request.

", - "error":{"httpStatusCode":500}, - "exception":true, - "fault":true - }, - "LimitExceededException":{ - "type":"structure", - "members":{ - "retryAfterSeconds":{ - "shape":"ErrorMessage", - "location":"header", - "locationName":"Retry-After" - }, - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

There are too many AWS Mobile Hub projects in the account or the account has exceeded the maximum number of resources in some AWS service. You should create another sub-account using AWS Organizations or remove some resources and retry your request.

", - "error":{"httpStatusCode":429}, - "exception":true - }, - "ListBundlesRequest":{ - "type":"structure", - "members":{ - "maxResults":{ - "shape":"MaxResults", - "documentation":"

Maximum number of records to list in a single response.

", - "location":"querystring", - "locationName":"maxResults" - }, - "nextToken":{ - "shape":"NextToken", - "documentation":"

Pagination token. Set to null to start listing bundles from start. If non-null pagination token is returned in a result, then pass its value in here in another request to list more bundles.

", - "location":"querystring", - "locationName":"nextToken" - } - }, - "documentation":"

Request structure to request all available bundles.

" - }, - "ListBundlesResult":{ - "type":"structure", - "members":{ - "bundleList":{ - "shape":"BundleList", - "documentation":"

A list of bundles.

" - }, - "nextToken":{ - "shape":"NextToken", - "documentation":"

Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries.

" - } - }, - "documentation":"

Result structure contains a list of all available bundles with details.

" - }, - "ListProjectsRequest":{ - "type":"structure", - "members":{ - "maxResults":{ - "shape":"MaxResults", - "documentation":"

Maximum number of records to list in a single response.

", - "location":"querystring", - "locationName":"maxResults" - }, - "nextToken":{ - "shape":"NextToken", - "documentation":"

Pagination token. Set to null to start listing projects from start. If non-null pagination token is returned in a result, then pass its value in here in another request to list more projects.

", - "location":"querystring", - "locationName":"nextToken" - } - }, - "documentation":"

Request structure used to request projects list in AWS Mobile Hub.

" - }, - "ListProjectsResult":{ - "type":"structure", - "members":{ - "projects":{"shape":"ProjectSummaries"}, - "nextToken":{"shape":"NextToken"} - }, - "documentation":"

Result structure used for requests to list projects in AWS Mobile Hub.

" - }, - "MaxResults":{ - "type":"integer", - "documentation":"

Maximum number of records to list in a single response.

" - }, - "NextToken":{ - "type":"string", - "documentation":"

Pagination token. Set to null to start listing records from start. If non-null pagination token is returned in a result, then pass its value in here in another request to list more entries.

" - }, - "NotFoundException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

No entity can be found with the specified identifier.

", - "error":{"httpStatusCode":404}, - "exception":true - }, - "Platform":{ - "type":"string", - "documentation":"

Developer desktop or target mobile app or website platform.

", - "enum":[ - "OSX", - "WINDOWS", - "LINUX", - "OBJC", - "SWIFT", - "ANDROID", - "JAVASCRIPT" - ] - }, - "Platforms":{ - "type":"list", - "member":{"shape":"Platform"}, - "documentation":"

Developer desktop or mobile app or website platforms.

" - }, - "ProjectDetails":{ - "type":"structure", - "members":{ - "name":{"shape":"ProjectName"}, - "projectId":{"shape":"ProjectId"}, - "region":{"shape":"ProjectRegion"}, - "state":{"shape":"ProjectState"}, - "createdDate":{ - "shape":"Date", - "documentation":"

Date the project was created.

" - }, - "lastUpdatedDate":{ - "shape":"Date", - "documentation":"

Date of the last modification of the project.

" - }, - "consoleUrl":{ - "shape":"ConsoleUrl", - "documentation":"

Website URL for this project in the AWS Mobile Hub console.

" - }, - "resources":{"shape":"Resources"} - }, - "documentation":"

Detailed information about an AWS Mobile Hub project.

" - }, - "ProjectId":{ - "type":"string", - "documentation":"

Unique project identifier.

" - }, - "ProjectName":{ - "type":"string", - "documentation":"

Name of the project.

" - }, - "ProjectRegion":{ - "type":"string", - "documentation":"

Default region to use for AWS resource creation in the AWS Mobile Hub project.

" - }, - "ProjectState":{ - "type":"string", - "documentation":"

Synchronization state for a project.

", - "enum":[ - "NORMAL", - "SYNCING", - "IMPORTING" - ] - }, - "ProjectSummaries":{ - "type":"list", - "member":{"shape":"ProjectSummary"}, - "documentation":"

List of projects.

" - }, - "ProjectSummary":{ - "type":"structure", - "members":{ - "name":{ - "shape":"ProjectName", - "documentation":"

Name of the project.

" - }, - "projectId":{ - "shape":"ProjectId", - "documentation":"

Unique project identifier.

" - } - }, - "documentation":"

Summary information about an AWS Mobile Hub project.

" - }, - "Resource":{ - "type":"structure", - "members":{ - "type":{"shape":"ResourceType"}, - "name":{"shape":"ResourceName"}, - "arn":{"shape":"ResourceArn"}, - "feature":{"shape":"Feature"}, - "attributes":{"shape":"Attributes"} - }, - "documentation":"

Information about an instance of an AWS resource associated with a project.

" - }, - "ResourceArn":{ - "type":"string", - "documentation":"

AWS resource name which uniquely identifies the resource in AWS systems.

" - }, - "ResourceName":{ - "type":"string", - "documentation":"

Name of the AWS resource (e.g., for an Amazon S3 bucket this is the name of the bucket).

" - }, - "ResourceType":{ - "type":"string", - "documentation":"

Simplified name for type of AWS resource (e.g., bucket is an Amazon S3 bucket).

" - }, - "Resources":{ - "type":"list", - "member":{"shape":"Resource"}, - "documentation":"

List of AWS resources associated with a project.

" - }, - "ServiceUnavailableException":{ - "type":"structure", - "members":{ - "retryAfterSeconds":{ - "shape":"ErrorMessage", - "location":"header", - "locationName":"Retry-After" - }, - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

The service is temporarily unavailable. The request should be retried after some time delay.

", - "error":{"httpStatusCode":503}, - "exception":true, - "fault":true - }, - "ShareUrl":{ - "type":"string", - "documentation":"

URL which can be shared to allow other AWS users to create their own project in AWS Mobile Hub with the same configuration as the specified project. This URL pertains to a snapshot in time of the project configuration that is created when this API is called. If you want to share additional changes to your project configuration, then you will need to create and share a new snapshot by calling this method again.

" - }, - "SnapshotId":{ - "type":"string", - "documentation":"

Unique identifier for the exported snapshot of the project configuration. This snapshot identifier is included in the share URL.

" - }, - "TooManyRequestsException":{ - "type":"structure", - "members":{ - "retryAfterSeconds":{ - "shape":"ErrorMessage", - "location":"header", - "locationName":"Retry-After" - }, - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

Too many requests have been received for this AWS account in too short a time. The request should be retried after some time delay.

", - "error":{"httpStatusCode":429}, - "exception":true - }, - "UnauthorizedException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

Credentials of the caller are insufficient to authorize the request.

", - "error":{"httpStatusCode":401}, - "exception":true - }, - "UpdateProjectRequest":{ - "type":"structure", - "required":["projectId"], - "members":{ - "contents":{ - "shape":"Contents", - "documentation":"

ZIP or YAML file which contains project configuration to be updated. This should be the contents of the file downloaded from the URL provided in an export project operation.

" - }, - "projectId":{ - "shape":"ProjectId", - "documentation":"

Unique project identifier.

", - "location":"querystring", - "locationName":"projectId" - } - }, - "documentation":"

Request structure used for requests to update project configuration.

", - "payload":"contents" - }, - "UpdateProjectResult":{ - "type":"structure", - "members":{ - "details":{ - "shape":"ProjectDetails", - "documentation":"

Detailed information about the updated AWS Mobile Hub project.

" - } - }, - "documentation":"

Result structure used for requests to updated project configuration.

" - } - }, - "documentation":"

AWS Mobile Service provides mobile app and website developers with capabilities required to configure AWS resources and bootstrap their developer desktop projects with the necessary SDKs, constants, tools and samples to make use of those resources.

" -} diff --git a/botocore/data/mq/2017-11-27/endpoint-rule-set-1.json b/botocore/data/mq/2017-11-27/endpoint-rule-set-1.json index a5581b0094..d56e61d069 100644 --- a/botocore/data/mq/2017-11-27/endpoint-rule-set-1.json +++ b/botocore/data/mq/2017-11-27/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/mq/2017-11-27/service-2.json b/botocore/data/mq/2017-11-27/service-2.json index f10c53475e..32f807b0aa 100644 --- a/botocore/data/mq/2017-11-27/service-2.json +++ b/botocore/data/mq/2017-11-27/service-2.json @@ -1,3707 +1,4030 @@ { - "metadata" : { - "apiVersion" : "2017-11-27", - "endpointPrefix" : "mq", - "signingName" : "mq", - "serviceFullName" : "AmazonMQ", - "serviceId" : "mq", - "protocol" : "rest-json", - "jsonVersion" : "1.1", - "uid" : "mq-2017-11-27", - "signatureVersion" : "v4" + "metadata": { + "apiVersion": "2017-11-27", + "endpointPrefix": "mq", + "signingName": "mq", + "serviceFullName": "AmazonMQ", + "serviceId": "mq", + "protocol": "rest-json", + "jsonVersion": "1.1", + "uid": "mq-2017-11-27", + "signatureVersion": "v4", + "auth": [ + "aws.auth#sigv4" + ] }, - "operations" : { - "CreateBroker" : { - "name" : "CreateBroker", - "http" : { - "method" : "POST", - "requestUri" : "/v1/brokers", - "responseCode" : 200 - }, - "input" : { - "shape" : "CreateBrokerRequest" - }, - "output" : { - "shape" : "CreateBrokerResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "UnauthorizedException", - "documentation" : "

HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ConflictException", - "documentation" : "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Creates a broker. Note: This API is asynchronous.

To create a broker, you must either use the AmazonMQFullAccess IAM policy or include the following EC2 permissions in your IAM policy.

  • ec2:CreateNetworkInterface

    This permission is required to allow Amazon MQ to create an elastic network interface (ENI) on behalf of your account.

  • ec2:CreateNetworkInterfacePermission

    This permission is required to attach the ENI to the broker instance.

  • ec2:DeleteNetworkInterface

  • ec2:DeleteNetworkInterfacePermission

  • ec2:DetachNetworkInterface

  • ec2:DescribeInternetGateways

  • ec2:DescribeNetworkInterfaces

  • ec2:DescribeNetworkInterfacePermissions

  • ec2:DescribeRouteTables

  • ec2:DescribeSecurityGroups

  • ec2:DescribeSubnets

  • ec2:DescribeVpcs

For more information, see Create an IAM User and Get Your Amazon Web Services Credentials and Never Modify or Delete the Amazon MQ Elastic Network Interface in the Amazon MQ Developer Guide.

" - }, - "CreateConfiguration" : { - "name" : "CreateConfiguration", - "http" : { - "method" : "POST", - "requestUri" : "/v1/configurations", - "responseCode" : 200 - }, - "input" : { - "shape" : "CreateConfigurationRequest" - }, - "output" : { - "shape" : "CreateConfigurationResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ConflictException", - "documentation" : "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version).

" - }, - "CreateTags" : { - "name" : "CreateTags", - "http" : { - "method" : "POST", - "requestUri" : "/v1/tags/{resource-arn}", - "responseCode" : 204 - }, - "input" : { - "shape" : "CreateTagsRequest" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Add a tag to a resource.

" - }, - "CreateUser" : { - "name" : "CreateUser", - "http" : { - "method" : "POST", - "requestUri" : "/v1/brokers/{broker-id}/users/{username}", - "responseCode" : 200 - }, - "input" : { - "shape" : "CreateUserRequest" - }, - "output" : { - "shape" : "CreateUserResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ConflictException", - "documentation" : "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Creates an ActiveMQ user.

Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker usernames are not intended to be used for private or sensitive data.

" - }, - "DeleteBroker" : { - "name" : "DeleteBroker", - "http" : { - "method" : "DELETE", - "requestUri" : "/v1/brokers/{broker-id}", - "responseCode" : 200 - }, - "input" : { - "shape" : "DeleteBrokerRequest" - }, - "output" : { - "shape" : "DeleteBrokerResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Deletes a broker. Note: This API is asynchronous.

" - }, - "DeleteTags" : { - "name" : "DeleteTags", - "http" : { - "method" : "DELETE", - "requestUri" : "/v1/tags/{resource-arn}", - "responseCode" : 204 - }, - "input" : { - "shape" : "DeleteTagsRequest" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Removes a tag from a resource.

" - }, - "DeleteUser" : { - "name" : "DeleteUser", - "http" : { - "method" : "DELETE", - "requestUri" : "/v1/brokers/{broker-id}/users/{username}", - "responseCode" : 200 - }, - "input" : { - "shape" : "DeleteUserRequest" - }, - "output" : { - "shape" : "DeleteUserResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Deletes an ActiveMQ user.

" - }, - "DescribeBroker" : { - "name" : "DescribeBroker", - "http" : { - "method" : "GET", - "requestUri" : "/v1/brokers/{broker-id}", - "responseCode" : 200 - }, - "input" : { - "shape" : "DescribeBrokerRequest" - }, - "output" : { - "shape" : "DescribeBrokerResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Returns information about the specified broker.

" - }, - "DescribeBrokerEngineTypes" : { - "name" : "DescribeBrokerEngineTypes", - "http" : { - "method" : "GET", - "requestUri" : "/v1/broker-engine-types", - "responseCode" : 200 - }, - "input" : { - "shape" : "DescribeBrokerEngineTypesRequest" - }, - "output" : { - "shape" : "DescribeBrokerEngineTypesResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Describe available engine types and versions.

" - }, - "DescribeBrokerInstanceOptions" : { - "name" : "DescribeBrokerInstanceOptions", - "http" : { - "method" : "GET", - "requestUri" : "/v1/broker-instance-options", - "responseCode" : 200 - }, - "input" : { - "shape" : "DescribeBrokerInstanceOptionsRequest" - }, - "output" : { - "shape" : "DescribeBrokerInstanceOptionsResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Describe available broker instance options.

" - }, - "DescribeConfiguration" : { - "name" : "DescribeConfiguration", - "http" : { - "method" : "GET", - "requestUri" : "/v1/configurations/{configuration-id}", - "responseCode" : 200 - }, - "input" : { - "shape" : "DescribeConfigurationRequest" - }, - "output" : { - "shape" : "DescribeConfigurationResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Returns information about the specified configuration.

" - }, - "DescribeConfigurationRevision" : { - "name" : "DescribeConfigurationRevision", - "http" : { - "method" : "GET", - "requestUri" : "/v1/configurations/{configuration-id}/revisions/{configuration-revision}", - "responseCode" : 200 - }, - "input" : { - "shape" : "DescribeConfigurationRevisionRequest" - }, - "output" : { - "shape" : "DescribeConfigurationRevisionResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Returns the specified configuration revision for the specified configuration.

" - }, - "DescribeUser" : { - "name" : "DescribeUser", - "http" : { - "method" : "GET", - "requestUri" : "/v1/brokers/{broker-id}/users/{username}", - "responseCode" : 200 - }, - "input" : { - "shape" : "DescribeUserRequest" - }, - "output" : { - "shape" : "DescribeUserResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Returns information about an ActiveMQ user.

" - }, - "ListBrokers" : { - "name" : "ListBrokers", - "http" : { - "method" : "GET", - "requestUri" : "/v1/brokers", - "responseCode" : 200 - }, - "input" : { - "shape" : "ListBrokersRequest" - }, - "output" : { - "shape" : "ListBrokersResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Returns a list of all brokers.

" - }, - "ListConfigurationRevisions" : { - "name" : "ListConfigurationRevisions", - "http" : { - "method" : "GET", - "requestUri" : "/v1/configurations/{configuration-id}/revisions", - "responseCode" : 200 - }, - "input" : { - "shape" : "ListConfigurationRevisionsRequest" - }, - "output" : { - "shape" : "ListConfigurationRevisionsResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Returns a list of all revisions for the specified configuration.

" - }, - "ListConfigurations" : { - "name" : "ListConfigurations", - "http" : { - "method" : "GET", - "requestUri" : "/v1/configurations", - "responseCode" : 200 - }, - "input" : { - "shape" : "ListConfigurationsRequest" - }, - "output" : { - "shape" : "ListConfigurationsResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Returns a list of all configurations.

" - }, - "ListTags" : { - "name" : "ListTags", - "http" : { - "method" : "GET", - "requestUri" : "/v1/tags/{resource-arn}", - "responseCode" : 200 - }, - "input" : { - "shape" : "ListTagsRequest" - }, - "output" : { - "shape" : "ListTagsResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Lists tags for a resource.

" - }, - "ListUsers" : { - "name" : "ListUsers", - "http" : { - "method" : "GET", - "requestUri" : "/v1/brokers/{broker-id}/users", - "responseCode" : 200 - }, - "input" : { - "shape" : "ListUsersRequest" - }, - "output" : { - "shape" : "ListUsersResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Returns a list of all ActiveMQ users.

" - }, - "Promote" : { - "name" : "Promote", - "http" : { - "method" : "POST", - "requestUri" : "/v1/brokers/{broker-id}/promote", - "responseCode" : 200 - }, - "input" : { - "shape" : "PromoteRequest" - }, - "output" : { - "shape" : "PromoteResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Promotes a data replication replica broker to the primary broker role.

" - }, - "RebootBroker" : { - "name" : "RebootBroker", - "http" : { - "method" : "POST", - "requestUri" : "/v1/brokers/{broker-id}/reboot", - "responseCode" : 200 - }, - "input" : { - "shape" : "RebootBrokerRequest" - }, - "output" : { - "shape" : "RebootBrokerResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Reboots a broker. Note: This API is asynchronous.

" - }, - "UpdateBroker" : { - "name" : "UpdateBroker", - "http" : { - "method" : "PUT", - "requestUri" : "/v1/brokers/{broker-id}", - "responseCode" : 200 - }, - "input" : { - "shape" : "UpdateBrokerRequest" - }, - "output" : { - "shape" : "UpdateBrokerResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ConflictException", - "documentation" : "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Adds a pending configuration change to a broker.

" - }, - "UpdateConfiguration" : { - "name" : "UpdateConfiguration", - "http" : { - "method" : "PUT", - "requestUri" : "/v1/configurations/{configuration-id}", - "responseCode" : 200 - }, - "input" : { - "shape" : "UpdateConfigurationRequest" - }, - "output" : { - "shape" : "UpdateConfigurationResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ConflictException", - "documentation" : "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Updates the specified configuration.

" - }, - "UpdateUser" : { - "name" : "UpdateUser", - "http" : { - "method" : "PUT", - "requestUri" : "/v1/brokers/{broker-id}/users/{username}", - "responseCode" : 200 - }, - "input" : { - "shape" : "UpdateUserRequest" - }, - "output" : { - "shape" : "UpdateUserResponse", - "documentation" : "

HTTP Status Code 200: OK.

" - }, - "errors" : [ { - "shape" : "NotFoundException", - "documentation" : "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "BadRequestException", - "documentation" : "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" - }, { - "shape" : "InternalServerErrorException", - "documentation" : "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" - }, { - "shape" : "ConflictException", - "documentation" : "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" - }, { - "shape" : "ForbiddenException", - "documentation" : "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" - } ], - "documentation" : "

Updates the information for an ActiveMQ user.

" + "operations": { + "CreateBroker": { + "name": "CreateBroker", + "http": { + "method": "POST", + "requestUri": "/v1/brokers", + "responseCode": 200 + }, + "input": { + "shape": "CreateBrokerRequest" + }, + "output": { + "shape": "CreateBrokerResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "UnauthorizedException", + "documentation": "

HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ConflictException", + "documentation": "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Creates a broker. Note: This API is asynchronous.

To create a broker, you must either use the AmazonMQFullAccess IAM policy or include the following EC2 permissions in your IAM policy.

  • ec2:CreateNetworkInterface

    This permission is required to allow Amazon MQ to create an elastic network interface (ENI) on behalf of your account.

  • ec2:CreateNetworkInterfacePermission

    This permission is required to attach the ENI to the broker instance.

  • ec2:DeleteNetworkInterface

  • ec2:DeleteNetworkInterfacePermission

  • ec2:DetachNetworkInterface

  • ec2:DescribeInternetGateways

  • ec2:DescribeNetworkInterfaces

  • ec2:DescribeNetworkInterfacePermissions

  • ec2:DescribeRouteTables

  • ec2:DescribeSecurityGroups

  • ec2:DescribeSubnets

  • ec2:DescribeVpcs

For more information, see Create an IAM User and Get Your Amazon Web Services Credentials and Never Modify or Delete the Amazon MQ Elastic Network Interface in the Amazon MQ Developer Guide.

" + }, + "CreateConfiguration": { + "name": "CreateConfiguration", + "http": { + "method": "POST", + "requestUri": "/v1/configurations", + "responseCode": 200 + }, + "input": { + "shape": "CreateConfigurationRequest" + }, + "output": { + "shape": "CreateConfigurationResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ConflictException", + "documentation": "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version).

" + }, + "CreateTags": { + "name": "CreateTags", + "http": { + "method": "POST", + "requestUri": "/v1/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "CreateTagsRequest" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Add a tag to a resource.

" + }, + "CreateUser": { + "name": "CreateUser", + "http": { + "method": "POST", + "requestUri": "/v1/brokers/{broker-id}/users/{username}", + "responseCode": 200 + }, + "input": { + "shape": "CreateUserRequest" + }, + "output": { + "shape": "CreateUserResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ConflictException", + "documentation": "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Creates an ActiveMQ user.

Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker usernames are not intended to be used for private or sensitive data.

" + }, + "DeleteBroker": { + "name": "DeleteBroker", + "http": { + "method": "DELETE", + "requestUri": "/v1/brokers/{broker-id}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteBrokerRequest" + }, + "output": { + "shape": "DeleteBrokerResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Deletes a broker. Note: This API is asynchronous.

" + }, + "DeleteTags": { + "name": "DeleteTags", + "http": { + "method": "DELETE", + "requestUri": "/v1/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteTagsRequest" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Removes a tag from a resource.

" + }, + "DeleteUser": { + "name": "DeleteUser", + "http": { + "method": "DELETE", + "requestUri": "/v1/brokers/{broker-id}/users/{username}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteUserRequest" + }, + "output": { + "shape": "DeleteUserResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Deletes an ActiveMQ user.

" + }, + "DescribeBroker": { + "name": "DescribeBroker", + "http": { + "method": "GET", + "requestUri": "/v1/brokers/{broker-id}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeBrokerRequest" + }, + "output": { + "shape": "DescribeBrokerResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Returns information about the specified broker.

" + }, + "DescribeBrokerEngineTypes": { + "name": "DescribeBrokerEngineTypes", + "http": { + "method": "GET", + "requestUri": "/v1/broker-engine-types", + "responseCode": 200 + }, + "input": { + "shape": "DescribeBrokerEngineTypesRequest" + }, + "output": { + "shape": "DescribeBrokerEngineTypesResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Describe available engine types and versions.

" + }, + "DescribeBrokerInstanceOptions": { + "name": "DescribeBrokerInstanceOptions", + "http": { + "method": "GET", + "requestUri": "/v1/broker-instance-options", + "responseCode": 200 + }, + "input": { + "shape": "DescribeBrokerInstanceOptionsRequest" + }, + "output": { + "shape": "DescribeBrokerInstanceOptionsResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Describe available broker instance options.

" + }, + "DescribeConfiguration": { + "name": "DescribeConfiguration", + "http": { + "method": "GET", + "requestUri": "/v1/configurations/{configuration-id}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeConfigurationRequest" + }, + "output": { + "shape": "DescribeConfigurationResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Returns information about the specified configuration.

" + }, + "DescribeConfigurationRevision": { + "name": "DescribeConfigurationRevision", + "http": { + "method": "GET", + "requestUri": "/v1/configurations/{configuration-id}/revisions/{configuration-revision}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeConfigurationRevisionRequest" + }, + "output": { + "shape": "DescribeConfigurationRevisionResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Returns the specified configuration revision for the specified configuration.

" + }, + "DescribeUser": { + "name": "DescribeUser", + "http": { + "method": "GET", + "requestUri": "/v1/brokers/{broker-id}/users/{username}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeUserRequest" + }, + "output": { + "shape": "DescribeUserResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Returns information about an ActiveMQ user.

" + }, + "ListBrokers": { + "name": "ListBrokers", + "http": { + "method": "GET", + "requestUri": "/v1/brokers", + "responseCode": 200 + }, + "input": { + "shape": "ListBrokersRequest" + }, + "output": { + "shape": "ListBrokersResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Returns a list of all brokers.

" + }, + "ListConfigurationRevisions": { + "name": "ListConfigurationRevisions", + "http": { + "method": "GET", + "requestUri": "/v1/configurations/{configuration-id}/revisions", + "responseCode": 200 + }, + "input": { + "shape": "ListConfigurationRevisionsRequest" + }, + "output": { + "shape": "ListConfigurationRevisionsResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Returns a list of all revisions for the specified configuration.

" + }, + "ListConfigurations": { + "name": "ListConfigurations", + "http": { + "method": "GET", + "requestUri": "/v1/configurations", + "responseCode": 200 + }, + "input": { + "shape": "ListConfigurationsRequest" + }, + "output": { + "shape": "ListConfigurationsResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Returns a list of all configurations.

" + }, + "ListTags": { + "name": "ListTags", + "http": { + "method": "GET", + "requestUri": "/v1/tags/{resource-arn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsRequest" + }, + "output": { + "shape": "ListTagsResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Lists tags for a resource.

" + }, + "ListUsers": { + "name": "ListUsers", + "http": { + "method": "GET", + "requestUri": "/v1/brokers/{broker-id}/users", + "responseCode": 200 + }, + "input": { + "shape": "ListUsersRequest" + }, + "output": { + "shape": "ListUsersResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Returns a list of all ActiveMQ users.

" + }, + "Promote": { + "name": "Promote", + "http": { + "method": "POST", + "requestUri": "/v1/brokers/{broker-id}/promote", + "responseCode": 200 + }, + "input": { + "shape": "PromoteRequest" + }, + "output": { + "shape": "PromoteResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Promotes a data replication replica broker to the primary broker role.

" + }, + "RebootBroker": { + "name": "RebootBroker", + "http": { + "method": "POST", + "requestUri": "/v1/brokers/{broker-id}/reboot", + "responseCode": 200 + }, + "input": { + "shape": "RebootBrokerRequest" + }, + "output": { + "shape": "RebootBrokerResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Reboots a broker. Note: This API is asynchronous.

" + }, + "UpdateBroker": { + "name": "UpdateBroker", + "http": { + "method": "PUT", + "requestUri": "/v1/brokers/{broker-id}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateBrokerRequest" + }, + "output": { + "shape": "UpdateBrokerResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ConflictException", + "documentation": "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Adds a pending configuration change to a broker.

" + }, + "UpdateConfiguration": { + "name": "UpdateConfiguration", + "http": { + "method": "PUT", + "requestUri": "/v1/configurations/{configuration-id}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateConfigurationRequest" + }, + "output": { + "shape": "UpdateConfigurationResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ConflictException", + "documentation": "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Updates the specified configuration.

" + }, + "UpdateUser": { + "name": "UpdateUser", + "http": { + "method": "PUT", + "requestUri": "/v1/brokers/{broker-id}/users/{username}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateUserRequest" + }, + "output": { + "shape": "UpdateUserResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ConflictException", + "documentation": "

HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + } + ], + "documentation": "

Updates the information for an ActiveMQ user.

" } }, - "shapes" : { - "ActionRequired" : { - "type" : "structure", - "members" : { - "ActionRequiredCode" : { - "shape" : "__string", - "locationName" : "actionRequiredCode", - "documentation" : "

The code you can use to find instructions on the action required to resolve your broker issue.

" - }, - "ActionRequiredInfo" : { - "shape" : "__string", - "locationName" : "actionRequiredInfo", - "documentation" : "

Information about the action required to resolve your broker issue.

" - } - }, - "documentation" : "

Action required for a broker.

" - }, - "AuthenticationStrategy" : { - "type" : "string", - "documentation" : "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

", - "enum" : [ "SIMPLE", "LDAP" ] - }, - "AvailabilityZone" : { - "type" : "structure", - "members" : { - "Name" : { - "shape" : "__string", - "locationName" : "name", - "documentation" : "

Id for the availability zone.

" - } - }, - "documentation" : "

Name of the availability zone.

" - }, - "BadRequestException" : { - "type" : "structure", - "members" : { - "ErrorAttribute" : { - "shape" : "__string", - "locationName" : "errorAttribute", - "documentation" : "

The attribute which caused the error.

" - }, - "Message" : { - "shape" : "__string", - "locationName" : "message", - "documentation" : "

The explanation of the error.

" - } - }, - "documentation" : "

Returns information about an error.

", - "exception" : true, - "error" : { - "httpStatusCode" : 400 + "shapes": { + "ActionRequired": { + "type": "structure", + "members": { + "ActionRequiredCode": { + "shape": "__string", + "locationName": "actionRequiredCode", + "documentation": "

The code you can use to find instructions on the action required to resolve your broker issue.

" + }, + "ActionRequiredInfo": { + "shape": "__string", + "locationName": "actionRequiredInfo", + "documentation": "

Information about the action required to resolve your broker issue.

" + } + }, + "documentation": "

Action required for a broker.

" + }, + "AuthenticationStrategy": { + "type": "string", + "documentation": "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

", + "enum": [ + "SIMPLE", + "LDAP" + ] + }, + "AvailabilityZone": { + "type": "structure", + "members": { + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "

Id for the availability zone.

" + } + }, + "documentation": "

Name of the availability zone.

" + }, + "BadRequestException": { + "type": "structure", + "members": { + "ErrorAttribute": { + "shape": "__string", + "locationName": "errorAttribute", + "documentation": "

The attribute which caused the error.

" + }, + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error.

" + } + }, + "documentation": "

Returns information about an error.

", + "exception": true, + "error": { + "httpStatusCode": 400 } }, - "BrokerEngineType" : { - "type" : "structure", - "members" : { - "EngineType" : { - "shape" : "EngineType", - "locationName" : "engineType", - "documentation" : "

The broker's engine type.

" - }, - "EngineVersions" : { - "shape" : "__listOfEngineVersion", - "locationName" : "engineVersions", - "documentation" : "

The list of engine versions.

" - } - }, - "documentation" : "

Types of broker engines.

" - }, - "BrokerEngineTypeOutput" : { - "type" : "structure", - "members" : { - "BrokerEngineTypes" : { - "shape" : "__listOfBrokerEngineType", - "locationName" : "brokerEngineTypes", - "documentation" : "

List of available engine types and versions.

" - }, - "MaxResults" : { - "shape" : "__integerMin5Max100", - "locationName" : "maxResults", - "documentation" : "

Required. The maximum number of engine types that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" - } - }, - "documentation" : "

Returns a list of broker engine type.

", - "required" : [ "MaxResults" ] - }, - "BrokerInstance" : { - "type" : "structure", - "members" : { - "ConsoleURL" : { - "shape" : "__string", - "locationName" : "consoleURL", - "documentation" : "

The brokers web console URL.

" - }, - "Endpoints" : { - "shape" : "__listOf__string", - "locationName" : "endpoints", - "documentation" : "

The broker's wire-level protocol endpoints.

" - }, - "IpAddress" : { - "shape" : "__string", - "locationName" : "ipAddress", - "documentation" : "

The IP address of the Elastic Network Interface (ENI) attached to the broker. Does not apply to RabbitMQ brokers.

" - } - }, - "documentation" : "

Returns information about all brokers.

" - }, - "BrokerInstanceOption" : { - "type" : "structure", - "members" : { - "AvailabilityZones" : { - "shape" : "__listOfAvailabilityZone", - "locationName" : "availabilityZones", - "documentation" : "

The list of available az.

" - }, - "EngineType" : { - "shape" : "EngineType", - "locationName" : "engineType", - "documentation" : "

The broker's engine type.

" - }, - "HostInstanceType" : { - "shape" : "__string", - "locationName" : "hostInstanceType", - "documentation" : "

The broker's instance type.

" - }, - "StorageType" : { - "shape" : "BrokerStorageType", - "locationName" : "storageType", - "documentation" : "

The broker's storage type.

" - }, - "SupportedDeploymentModes" : { - "shape" : "__listOfDeploymentMode", - "locationName" : "supportedDeploymentModes", - "documentation" : "

The list of supported deployment modes.

" - }, - "SupportedEngineVersions" : { - "shape" : "__listOf__string", - "locationName" : "supportedEngineVersions", - "documentation" : "

The list of supported engine versions.

" - } - }, - "documentation" : "

Option for host instance type.

" - }, - "BrokerInstanceOptionsOutput" : { - "type" : "structure", - "members" : { - "BrokerInstanceOptions" : { - "shape" : "__listOfBrokerInstanceOption", - "locationName" : "brokerInstanceOptions", - "documentation" : "

List of available broker instance options.

" - }, - "MaxResults" : { - "shape" : "__integerMin5Max100", - "locationName" : "maxResults", - "documentation" : "

Required. The maximum number of instance options that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" - } - }, - "documentation" : "

Returns a list of broker instance options.

", - "required" : [ "MaxResults" ] - }, - "BrokerState" : { - "type" : "string", - "documentation" : "

The broker's status.

", - "enum" : [ "CREATION_IN_PROGRESS", "CREATION_FAILED", "DELETION_IN_PROGRESS", "RUNNING", "REBOOT_IN_PROGRESS", "CRITICAL_ACTION_REQUIRED", "REPLICA" ] - }, - "BrokerStorageType" : { - "type" : "string", - "documentation" : "

The broker's storage type.

EFS is not supported for RabbitMQ engine type.

", - "enum" : [ "EBS", "EFS" ] - }, - "BrokerSummary" : { - "type" : "structure", - "members" : { - "BrokerArn" : { - "shape" : "__string", - "locationName" : "brokerArn", - "documentation" : "

The broker's Amazon Resource Name (ARN).

" - }, - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - }, - "BrokerName" : { - "shape" : "__string", - "locationName" : "brokerName", - "documentation" : "

The broker's name. This value is unique in your Amazon Web Services account, 1-50 characters long, and containing only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

" - }, - "BrokerState" : { - "shape" : "BrokerState", - "locationName" : "brokerState", - "documentation" : "

The broker's status.

" - }, - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

The time when the broker was created.

" - }, - "DeploymentMode" : { - "shape" : "DeploymentMode", - "locationName" : "deploymentMode", - "documentation" : "

The broker's deployment mode.

" - }, - "EngineType" : { - "shape" : "EngineType", - "locationName" : "engineType", - "documentation" : "

The type of broker engine.

" - }, - "HostInstanceType" : { - "shape" : "__string", - "locationName" : "hostInstanceType", - "documentation" : "

The broker's instance type.

" - } - }, - "documentation" : "

Returns information about all brokers.

", - "required" : [ "DeploymentMode", "EngineType" ] - }, - "ChangeType" : { - "type" : "string", - "documentation" : "

The type of change pending for the ActiveMQ user.

", - "enum" : [ "CREATE", "UPDATE", "DELETE" ] - }, - "Configuration" : { - "type" : "structure", - "members" : { - "Arn" : { - "shape" : "__string", - "locationName" : "arn", - "documentation" : "

Required. The ARN of the configuration.

" - }, - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" - }, - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

Required. The date and time of the configuration revision.

" - }, - "Description" : { - "shape" : "__string", - "locationName" : "description", - "documentation" : "

Required. The description of the configuration.

" - }, - "EngineType" : { - "shape" : "EngineType", - "locationName" : "engineType", - "documentation" : "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

Required. The broker engine's version. For a list of supported engine versions, see, Supported engines.

" - }, - "Id" : { - "shape" : "__string", - "locationName" : "id", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the configuration.

" - }, - "LatestRevision" : { - "shape" : "ConfigurationRevision", - "locationName" : "latestRevision", - "documentation" : "

Required. The latest revision of the configuration.

" - }, - "Name" : { - "shape" : "__string", - "locationName" : "name", - "documentation" : "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" - }, - "Tags" : { - "shape" : "__mapOf__string", - "locationName" : "tags", - "documentation" : "

The list of all tags associated with this configuration.

" - } - }, - "documentation" : "

Returns information about all configurations.

", - "required" : [ "Description", "EngineVersion", "LatestRevision", "AuthenticationStrategy", "EngineType", "Id", "Arn", "Name", "Created" ] - }, - "ConfigurationId" : { - "type" : "structure", - "members" : { - "Id" : { - "shape" : "__string", - "locationName" : "id", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the configuration.

" - }, - "Revision" : { - "shape" : "__integer", - "locationName" : "revision", - "documentation" : "

The revision number of the configuration.

" - } - }, - "documentation" : "

A list of information about the configuration.

", - "required" : [ "Id" ] - }, - "ConfigurationRevision" : { - "type" : "structure", - "members" : { - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

Required. The date and time of the configuration revision.

" - }, - "Description" : { - "shape" : "__string", - "locationName" : "description", - "documentation" : "

The description of the configuration revision.

" - }, - "Revision" : { - "shape" : "__integer", - "locationName" : "revision", - "documentation" : "

Required. The revision number of the configuration.

" - } - }, - "documentation" : "

Returns information about the specified configuration revision.

", - "required" : [ "Revision", "Created" ] - }, - "Configurations" : { - "type" : "structure", - "members" : { - "Current" : { - "shape" : "ConfigurationId", - "locationName" : "current", - "documentation" : "

The broker's current configuration.

" - }, - "History" : { - "shape" : "__listOfConfigurationId", - "locationName" : "history", - "documentation" : "

The history of configurations applied to the broker.

" - }, - "Pending" : { - "shape" : "ConfigurationId", - "locationName" : "pending", - "documentation" : "

The broker's pending configuration.

" - } - }, - "documentation" : "

Broker configuration information

" - }, - "ConflictException" : { - "type" : "structure", - "members" : { - "ErrorAttribute" : { - "shape" : "__string", - "locationName" : "errorAttribute", - "documentation" : "

The attribute which caused the error.

" - }, - "Message" : { - "shape" : "__string", - "locationName" : "message", - "documentation" : "

The explanation of the error.

" - } - }, - "documentation" : "

Returns information about an error.

", - "exception" : true, - "error" : { - "httpStatusCode" : 409 + "BrokerEngineType": { + "type": "structure", + "members": { + "EngineType": { + "shape": "EngineType", + "locationName": "engineType", + "documentation": "

The broker's engine type.

" + }, + "EngineVersions": { + "shape": "__listOfEngineVersion", + "locationName": "engineVersions", + "documentation": "

The list of engine versions.

" + } + }, + "documentation": "

Types of broker engines.

" + }, + "BrokerEngineTypeOutput": { + "type": "structure", + "members": { + "BrokerEngineTypes": { + "shape": "__listOfBrokerEngineType", + "locationName": "brokerEngineTypes", + "documentation": "

List of available engine types and versions.

" + }, + "MaxResults": { + "shape": "__integerMin5Max100", + "locationName": "maxResults", + "documentation": "

Required. The maximum number of engine types that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + } + }, + "documentation": "

Returns a list of broker engine type.

", + "required": [ + "MaxResults" + ] + }, + "BrokerInstance": { + "type": "structure", + "members": { + "ConsoleURL": { + "shape": "__string", + "locationName": "consoleURL", + "documentation": "

The brokers web console URL.

" + }, + "Endpoints": { + "shape": "__listOf__string", + "locationName": "endpoints", + "documentation": "

The broker's wire-level protocol endpoints.

" + }, + "IpAddress": { + "shape": "__string", + "locationName": "ipAddress", + "documentation": "

The IP address of the Elastic Network Interface (ENI) attached to the broker. Does not apply to RabbitMQ brokers.

" + } + }, + "documentation": "

Returns information about all brokers.

" + }, + "BrokerInstanceOption": { + "type": "structure", + "members": { + "AvailabilityZones": { + "shape": "__listOfAvailabilityZone", + "locationName": "availabilityZones", + "documentation": "

The list of available az.

" + }, + "EngineType": { + "shape": "EngineType", + "locationName": "engineType", + "documentation": "

The broker's engine type.

" + }, + "HostInstanceType": { + "shape": "__string", + "locationName": "hostInstanceType", + "documentation": "

The broker's instance type.

" + }, + "StorageType": { + "shape": "BrokerStorageType", + "locationName": "storageType", + "documentation": "

The broker's storage type.

" + }, + "SupportedDeploymentModes": { + "shape": "__listOfDeploymentMode", + "locationName": "supportedDeploymentModes", + "documentation": "

The list of supported deployment modes.

" + }, + "SupportedEngineVersions": { + "shape": "__listOf__string", + "locationName": "supportedEngineVersions", + "documentation": "

The list of supported engine versions.

" + } + }, + "documentation": "

Option for host instance type.

" + }, + "BrokerInstanceOptionsOutput": { + "type": "structure", + "members": { + "BrokerInstanceOptions": { + "shape": "__listOfBrokerInstanceOption", + "locationName": "brokerInstanceOptions", + "documentation": "

List of available broker instance options.

" + }, + "MaxResults": { + "shape": "__integerMin5Max100", + "locationName": "maxResults", + "documentation": "

Required. The maximum number of instance options that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + } + }, + "documentation": "

Returns a list of broker instance options.

", + "required": [ + "MaxResults" + ] + }, + "BrokerState": { + "type": "string", + "documentation": "

The broker's status.

", + "enum": [ + "CREATION_IN_PROGRESS", + "CREATION_FAILED", + "DELETION_IN_PROGRESS", + "RUNNING", + "REBOOT_IN_PROGRESS", + "CRITICAL_ACTION_REQUIRED", + "REPLICA" + ] + }, + "BrokerStorageType": { + "type": "string", + "documentation": "

The broker's storage type.

EFS is not supported for RabbitMQ engine type.

", + "enum": [ + "EBS", + "EFS" + ] + }, + "BrokerSummary": { + "type": "structure", + "members": { + "BrokerArn": { + "shape": "__string", + "locationName": "brokerArn", + "documentation": "

The broker's Amazon Resource Name (ARN).

" + }, + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + }, + "BrokerName": { + "shape": "__string", + "locationName": "brokerName", + "documentation": "

The broker's name. This value is unique in your Amazon Web Services account, 1-50 characters long, and containing only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

" + }, + "BrokerState": { + "shape": "BrokerState", + "locationName": "brokerState", + "documentation": "

The broker's status.

" + }, + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

The time when the broker was created.

" + }, + "DeploymentMode": { + "shape": "DeploymentMode", + "locationName": "deploymentMode", + "documentation": "

The broker's deployment mode.

" + }, + "EngineType": { + "shape": "EngineType", + "locationName": "engineType", + "documentation": "

The type of broker engine.

" + }, + "HostInstanceType": { + "shape": "__string", + "locationName": "hostInstanceType", + "documentation": "

The broker's instance type.

" + } + }, + "documentation": "

Returns information about all brokers.

", + "required": [ + "DeploymentMode", + "EngineType" + ] + }, + "ChangeType": { + "type": "string", + "documentation": "

The type of change pending for the ActiveMQ user.

", + "enum": [ + "CREATE", + "UPDATE", + "DELETE" + ] + }, + "Configuration": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

Required. The ARN of the configuration.

" + }, + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" + }, + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

Required. The date and time of the configuration revision.

" + }, + "Description": { + "shape": "__string", + "locationName": "description", + "documentation": "

Required. The description of the configuration.

" + }, + "EngineType": { + "shape": "EngineType", + "locationName": "engineType", + "documentation": "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version. Defaults to the latest available version for the specified broker engine type. For a list of supported engine versions, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "

Required. The unique ID that Amazon MQ generates for the configuration.

" + }, + "LatestRevision": { + "shape": "ConfigurationRevision", + "locationName": "latestRevision", + "documentation": "

Required. The latest revision of the configuration.

" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

The list of all tags associated with this configuration.

" + } + }, + "documentation": "

Returns information about all configurations.

", + "required": [ + "Description", + "EngineVersion", + "LatestRevision", + "AuthenticationStrategy", + "EngineType", + "Id", + "Arn", + "Name", + "Created" + ] + }, + "ConfigurationId": { + "type": "structure", + "members": { + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "

Required. The unique ID that Amazon MQ generates for the configuration.

" + }, + "Revision": { + "shape": "__integer", + "locationName": "revision", + "documentation": "

The revision number of the configuration.

" + } + }, + "documentation": "

A list of information about the configuration.

", + "required": [ + "Id" + ] + }, + "ConfigurationRevision": { + "type": "structure", + "members": { + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

Required. The date and time of the configuration revision.

" + }, + "Description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The description of the configuration revision.

" + }, + "Revision": { + "shape": "__integer", + "locationName": "revision", + "documentation": "

Required. The revision number of the configuration.

" + } + }, + "documentation": "

Returns information about the specified configuration revision.

", + "required": [ + "Revision", + "Created" + ] + }, + "Configurations": { + "type": "structure", + "members": { + "Current": { + "shape": "ConfigurationId", + "locationName": "current", + "documentation": "

The broker's current configuration.

" + }, + "History": { + "shape": "__listOfConfigurationId", + "locationName": "history", + "documentation": "

The history of configurations applied to the broker.

" + }, + "Pending": { + "shape": "ConfigurationId", + "locationName": "pending", + "documentation": "

The broker's pending configuration.

" + } + }, + "documentation": "

Broker configuration information

" + }, + "ConflictException": { + "type": "structure", + "members": { + "ErrorAttribute": { + "shape": "__string", + "locationName": "errorAttribute", + "documentation": "

The attribute which caused the error.

" + }, + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error.

" + } + }, + "documentation": "

Returns information about an error.

", + "exception": true, + "error": { + "httpStatusCode": 409 } }, - "CreateBrokerInput" : { - "type" : "structure", - "members" : { - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" - }, - "AutoMinorVersionUpgrade" : { - "shape" : "__boolean", - "locationName" : "autoMinorVersionUpgrade", - "documentation" : "

Enables automatic upgrades to new minor versions for brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot. Set to true by default, if no value is specified.

" - }, - "BrokerName" : { - "shape" : "__string", - "locationName" : "brokerName", - "documentation" : "

Required. The broker's name. This value must be unique in your Amazon Web Services account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

Do not add personally identifiable information (PII) or other confidential or sensitive information in broker names. Broker names are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker names are not intended to be used for private or sensitive data.

" - }, - "Configuration" : { - "shape" : "ConfigurationId", - "locationName" : "configuration", - "documentation" : "

A list of information about the configuration.

" - }, - "CreatorRequestId" : { - "shape" : "__string", - "locationName" : "creatorRequestId", - "documentation" : "

The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action.

We recommend using a Universally Unique Identifier (UUID) for the creatorRequestId. You may omit the creatorRequestId if your application doesn't require idempotency.

", - "idempotencyToken" : true - }, - "DeploymentMode" : { - "shape" : "DeploymentMode", - "locationName" : "deploymentMode", - "documentation" : "

Required. The broker's deployment mode.

" - }, - "DataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "dataReplicationMode", - "documentation" : "

Defines whether this broker is a part of a data replication pair.

" - }, - "DataReplicationPrimaryBrokerArn" : { - "shape" : "__string", - "locationName" : "dataReplicationPrimaryBrokerArn", - "documentation" : "

The Amazon Resource Name (ARN) of the primary broker that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when dataReplicationMode is set to CRDR.

" - }, - "EncryptionOptions" : { - "shape" : "EncryptionOptions", - "locationName" : "encryptionOptions", - "documentation" : "

Encryption options for the broker.

" - }, - "EngineType" : { - "shape" : "EngineType", - "locationName" : "engineType", - "documentation" : "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

Required. The broker engine's version. For a list of supported engine versions, see Supported engines.

" - }, - "HostInstanceType" : { - "shape" : "__string", - "locationName" : "hostInstanceType", - "documentation" : "

Required. The broker's instance type.

" - }, - "LdapServerMetadata" : { - "shape" : "LdapServerMetadataInput", - "locationName" : "ldapServerMetadata", - "documentation" : "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" - }, - "Logs" : { - "shape" : "Logs", - "locationName" : "logs", - "documentation" : "

Enables Amazon CloudWatch logging for brokers.

" - }, - "MaintenanceWindowStartTime" : { - "shape" : "WeeklyStartTime", - "locationName" : "maintenanceWindowStartTime", - "documentation" : "

The parameters that determine the WeeklyStartTime.

" - }, - "PubliclyAccessible" : { - "shape" : "__boolean", - "locationName" : "publiclyAccessible", - "documentation" : "

Enables connections from applications outside of the VPC that hosts the broker's subnets. Set to false by default, if no value is provided.

" - }, - "SecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "securityGroups", - "documentation" : "

The list of rules (1 minimum, 125 maximum) that authorize connections to brokers.

" - }, - "StorageType" : { - "shape" : "BrokerStorageType", - "locationName" : "storageType", - "documentation" : "

The broker's storage type.

" - }, - "SubnetIds" : { - "shape" : "__listOf__string", - "locationName" : "subnetIds", - "documentation" : "

The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. If you specify more than one subnet, the subnets must be in different Availability Zones. Amazon MQ will not be able to create VPC endpoints for your broker with multiple subnets in the same Availability Zone. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ Amazon MQ for ActiveMQ deployment requires two subnets. A CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no subnet requirements when deployed with public accessibility. Deployment without public accessibility requires at least one subnet.

If you specify subnets in a shared VPC for a RabbitMQ broker, the associated VPC to which the specified subnets belong must be owned by your Amazon Web Services account. Amazon MQ will not be able to create VPC endpoints in VPCs that are not owned by your Amazon Web Services account.

" - }, - "Tags" : { - "shape" : "__mapOf__string", - "locationName" : "tags", - "documentation" : "

Create tags when creating the broker.

" - }, - "Users" : { - "shape" : "__listOfUser", - "locationName" : "users", - "documentation" : "

The list of broker users (persons or applications) who can access queues and topics. For Amazon MQ for RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

" - } - }, - "documentation" : "

Creates a broker.

", - "required" : [ "EngineVersion", "HostInstanceType", "AutoMinorVersionUpgrade", "Users", "BrokerName", "DeploymentMode", "EngineType", "PubliclyAccessible" ] - }, - "CreateBrokerOutput" : { - "type" : "structure", - "members" : { - "BrokerArn" : { - "shape" : "__string", - "locationName" : "brokerArn", - "documentation" : "

The broker's Amazon Resource Name (ARN).

" - }, - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - } - }, - "documentation" : "

Returns information about the created broker.

" - }, - "CreateBrokerRequest" : { - "type" : "structure", - "members" : { - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" - }, - "AutoMinorVersionUpgrade" : { - "shape" : "__boolean", - "locationName" : "autoMinorVersionUpgrade", - "documentation" : "

Enables automatic upgrades to new minor versions for brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot. Set to true by default, if no value is specified.

" - }, - "BrokerName" : { - "shape" : "__string", - "locationName" : "brokerName", - "documentation" : "

Required. The broker's name. This value must be unique in your Amazon Web Services account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

Do not add personally identifiable information (PII) or other confidential or sensitive information in broker names. Broker names are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker names are not intended to be used for private or sensitive data.

" - }, - "Configuration" : { - "shape" : "ConfigurationId", - "locationName" : "configuration", - "documentation" : "

A list of information about the configuration.

" - }, - "CreatorRequestId" : { - "shape" : "__string", - "locationName" : "creatorRequestId", - "documentation" : "

The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action.

We recommend using a Universally Unique Identifier (UUID) for the creatorRequestId. You may omit the creatorRequestId if your application doesn't require idempotency.

", - "idempotencyToken" : true - }, - "DeploymentMode" : { - "shape" : "DeploymentMode", - "locationName" : "deploymentMode", - "documentation" : "

Required. The broker's deployment mode.

" - }, - "EncryptionOptions" : { - "shape" : "EncryptionOptions", - "locationName" : "encryptionOptions", - "documentation" : "

Encryption options for the broker.

" - }, - "EngineType" : { - "shape" : "EngineType", - "locationName" : "engineType", - "documentation" : "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

Required. The broker engine's version. For a list of supported engine versions, see Supported engines.

" - }, - "HostInstanceType" : { - "shape" : "__string", - "locationName" : "hostInstanceType", - "documentation" : "

Required. The broker's instance type.

" - }, - "LdapServerMetadata" : { - "shape" : "LdapServerMetadataInput", - "locationName" : "ldapServerMetadata", - "documentation" : "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" - }, - "Logs" : { - "shape" : "Logs", - "locationName" : "logs", - "documentation" : "

Enables Amazon CloudWatch logging for brokers.

" - }, - "MaintenanceWindowStartTime" : { - "shape" : "WeeklyStartTime", - "locationName" : "maintenanceWindowStartTime", - "documentation" : "

The parameters that determine the WeeklyStartTime.

" - }, - "PubliclyAccessible" : { - "shape" : "__boolean", - "locationName" : "publiclyAccessible", - "documentation" : "

Enables connections from applications outside of the VPC that hosts the broker's subnets. Set to false by default, if no value is provided.

" - }, - "SecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "securityGroups", - "documentation" : "

The list of rules (1 minimum, 125 maximum) that authorize connections to brokers.

" - }, - "StorageType" : { - "shape" : "BrokerStorageType", - "locationName" : "storageType", - "documentation" : "

The broker's storage type.

" - }, - "SubnetIds" : { - "shape" : "__listOf__string", - "locationName" : "subnetIds", - "documentation" : "

The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. If you specify more than one subnet, the subnets must be in different Availability Zones. Amazon MQ will not be able to create VPC endpoints for your broker with multiple subnets in the same Availability Zone. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ Amazon MQ for ActiveMQ deployment requires two subnets. A CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no subnet requirements when deployed with public accessibility. Deployment without public accessibility requires at least one subnet.

If you specify subnets in a shared VPC for a RabbitMQ broker, the associated VPC to which the specified subnets belong must be owned by your Amazon Web Services account. Amazon MQ will not be able to create VPC endpoints in VPCs that are not owned by your Amazon Web Services account.

" - }, - "Tags" : { - "shape" : "__mapOf__string", - "locationName" : "tags", - "documentation" : "

Create tags when creating the broker.

" - }, - "Users" : { - "shape" : "__listOfUser", - "locationName" : "users", - "documentation" : "

The list of broker users (persons or applications) who can access queues and topics. For Amazon MQ for RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

" - }, - "DataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "dataReplicationMode", - "documentation" : "

Defines whether this broker is a part of a data replication pair.

" - }, - "DataReplicationPrimaryBrokerArn" : { - "shape" : "__string", - "locationName" : "dataReplicationPrimaryBrokerArn", - "documentation" : "

The Amazon Resource Name (ARN) of the primary broker that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when dataReplicationMode is set to CRDR.

" - } - }, - "documentation" : "

Creates a broker using the specified properties.

", - "required" : [ "EngineVersion", "HostInstanceType", "AutoMinorVersionUpgrade", "Users", "BrokerName", "DeploymentMode", "EngineType", "PubliclyAccessible" ] - }, - "CreateBrokerResponse" : { - "type" : "structure", - "members" : { - "BrokerArn" : { - "shape" : "__string", - "locationName" : "brokerArn", - "documentation" : "

The broker's Amazon Resource Name (ARN).

" - }, - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" + "CreateBrokerInput": { + "type": "structure", + "members": { + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" + }, + "AutoMinorVersionUpgrade": { + "shape": "__boolean", + "locationName": "autoMinorVersionUpgrade", + "documentation": "

Enables automatic upgrades to new patch versions for brokers as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window or after a manual broker reboot. Set to true by default, if no value is specified.

Must be set to true for ActiveMQ brokers version 5.18 and above and for RabbitMQ brokers version 3.13 and above.

" + }, + "BrokerName": { + "shape": "__string", + "locationName": "brokerName", + "documentation": "

Required. The broker's name. This value must be unique in your Amazon Web Services account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

Do not add personally identifiable information (PII) or other confidential or sensitive information in broker names. Broker names are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker names are not intended to be used for private or sensitive data.

" + }, + "Configuration": { + "shape": "ConfigurationId", + "locationName": "configuration", + "documentation": "

A list of information about the configuration.

" + }, + "CreatorRequestId": { + "shape": "__string", + "locationName": "creatorRequestId", + "documentation": "

The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action.

We recommend using a Universally Unique Identifier (UUID) for the creatorRequestId. You may omit the creatorRequestId if your application doesn't require idempotency.

", + "idempotencyToken": true + }, + "DeploymentMode": { + "shape": "DeploymentMode", + "locationName": "deploymentMode", + "documentation": "

Required. The broker's deployment mode.

" + }, + "DataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "dataReplicationMode", + "documentation": "

Defines whether this broker is a part of a data replication pair.

" + }, + "DataReplicationPrimaryBrokerArn": { + "shape": "__string", + "locationName": "dataReplicationPrimaryBrokerArn", + "documentation": "

The Amazon Resource Name (ARN) of the primary broker that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when dataReplicationMode is set to CRDR.

" + }, + "EncryptionOptions": { + "shape": "EncryptionOptions", + "locationName": "encryptionOptions", + "documentation": "

Encryption options for the broker.

" + }, + "EngineType": { + "shape": "EngineType", + "locationName": "engineType", + "documentation": "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version. Defaults to the latest available version for the specified broker engine type. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "HostInstanceType": { + "shape": "__string", + "locationName": "hostInstanceType", + "documentation": "

Required. The broker's instance type.

" + }, + "LdapServerMetadata": { + "shape": "LdapServerMetadataInput", + "locationName": "ldapServerMetadata", + "documentation": "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" + }, + "Logs": { + "shape": "Logs", + "locationName": "logs", + "documentation": "

Enables Amazon CloudWatch logging for brokers.

" + }, + "MaintenanceWindowStartTime": { + "shape": "WeeklyStartTime", + "locationName": "maintenanceWindowStartTime", + "documentation": "

The parameters that determine the WeeklyStartTime.

" + }, + "PubliclyAccessible": { + "shape": "__boolean", + "locationName": "publiclyAccessible", + "documentation": "

Enables connections from applications outside of the VPC that hosts the broker's subnets. Set to false by default, if no value is provided.

" + }, + "SecurityGroups": { + "shape": "__listOf__string", + "locationName": "securityGroups", + "documentation": "

The list of rules (1 minimum, 125 maximum) that authorize connections to brokers.

" + }, + "StorageType": { + "shape": "BrokerStorageType", + "locationName": "storageType", + "documentation": "

The broker's storage type.

" + }, + "SubnetIds": { + "shape": "__listOf__string", + "locationName": "subnetIds", + "documentation": "

The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. If you specify more than one subnet, the subnets must be in different Availability Zones. Amazon MQ will not be able to create VPC endpoints for your broker with multiple subnets in the same Availability Zone. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ Amazon MQ for ActiveMQ deployment requires two subnets. A CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no subnet requirements when deployed with public accessibility. Deployment without public accessibility requires at least one subnet.

If you specify subnets in a shared VPC for a RabbitMQ broker, the associated VPC to which the specified subnets belong must be owned by your Amazon Web Services account. Amazon MQ will not be able to create VPC endpoints in VPCs that are not owned by your Amazon Web Services account.

" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

Create tags when creating the broker.

" + }, + "Users": { + "shape": "__listOfUser", + "locationName": "users", + "documentation": "

The list of broker users (persons or applications) who can access queues and topics. For Amazon MQ for RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

" + } + }, + "documentation": "

Creates a broker.

", + "required": [ + "HostInstanceType", + "Users", + "BrokerName", + "DeploymentMode", + "EngineType", + "PubliclyAccessible" + ] + }, + "CreateBrokerOutput": { + "type": "structure", + "members": { + "BrokerArn": { + "shape": "__string", + "locationName": "brokerArn", + "documentation": "

The broker's Amazon Resource Name (ARN).

" + }, + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + } + }, + "documentation": "

Returns information about the created broker.

" + }, + "CreateBrokerRequest": { + "type": "structure", + "members": { + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" + }, + "AutoMinorVersionUpgrade": { + "shape": "__boolean", + "locationName": "autoMinorVersionUpgrade", + "documentation": "

Enables automatic upgrades to new patch versions for brokers as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window or after a manual broker reboot. Set to true by default, if no value is specified.

Must be set to true for ActiveMQ brokers version 5.18 and above and for RabbitMQ brokers version 3.13 and above.

" + }, + "BrokerName": { + "shape": "__string", + "locationName": "brokerName", + "documentation": "

Required. The broker's name. This value must be unique in your Amazon Web Services account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

Do not add personally identifiable information (PII) or other confidential or sensitive information in broker names. Broker names are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker names are not intended to be used for private or sensitive data.

" + }, + "Configuration": { + "shape": "ConfigurationId", + "locationName": "configuration", + "documentation": "

A list of information about the configuration.

" + }, + "CreatorRequestId": { + "shape": "__string", + "locationName": "creatorRequestId", + "documentation": "

The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action.

We recommend using a Universally Unique Identifier (UUID) for the creatorRequestId. You may omit the creatorRequestId if your application doesn't require idempotency.

", + "idempotencyToken": true + }, + "DeploymentMode": { + "shape": "DeploymentMode", + "locationName": "deploymentMode", + "documentation": "

Required. The broker's deployment mode.

" + }, + "EncryptionOptions": { + "shape": "EncryptionOptions", + "locationName": "encryptionOptions", + "documentation": "

Encryption options for the broker.

" + }, + "EngineType": { + "shape": "EngineType", + "locationName": "engineType", + "documentation": "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version. Defaults to the latest available version for the specified broker engine type. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "HostInstanceType": { + "shape": "__string", + "locationName": "hostInstanceType", + "documentation": "

Required. The broker's instance type.

" + }, + "LdapServerMetadata": { + "shape": "LdapServerMetadataInput", + "locationName": "ldapServerMetadata", + "documentation": "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" + }, + "Logs": { + "shape": "Logs", + "locationName": "logs", + "documentation": "

Enables Amazon CloudWatch logging for brokers.

" + }, + "MaintenanceWindowStartTime": { + "shape": "WeeklyStartTime", + "locationName": "maintenanceWindowStartTime", + "documentation": "

The parameters that determine the WeeklyStartTime.

" + }, + "PubliclyAccessible": { + "shape": "__boolean", + "locationName": "publiclyAccessible", + "documentation": "

Enables connections from applications outside of the VPC that hosts the broker's subnets. Set to false by default, if no value is provided.

" + }, + "SecurityGroups": { + "shape": "__listOf__string", + "locationName": "securityGroups", + "documentation": "

The list of rules (1 minimum, 125 maximum) that authorize connections to brokers.

" + }, + "StorageType": { + "shape": "BrokerStorageType", + "locationName": "storageType", + "documentation": "

The broker's storage type.

" + }, + "SubnetIds": { + "shape": "__listOf__string", + "locationName": "subnetIds", + "documentation": "

The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. If you specify more than one subnet, the subnets must be in different Availability Zones. Amazon MQ will not be able to create VPC endpoints for your broker with multiple subnets in the same Availability Zone. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ Amazon MQ for ActiveMQ deployment requires two subnets. A CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no subnet requirements when deployed with public accessibility. Deployment without public accessibility requires at least one subnet.

If you specify subnets in a shared VPC for a RabbitMQ broker, the associated VPC to which the specified subnets belong must be owned by your Amazon Web Services account. Amazon MQ will not be able to create VPC endpoints in VPCs that are not owned by your Amazon Web Services account.

" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

Create tags when creating the broker.

" + }, + "Users": { + "shape": "__listOfUser", + "locationName": "users", + "documentation": "

The list of broker users (persons or applications) who can access queues and topics. For Amazon MQ for RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

" + }, + "DataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "dataReplicationMode", + "documentation": "

Defines whether this broker is a part of a data replication pair.

" + }, + "DataReplicationPrimaryBrokerArn": { + "shape": "__string", + "locationName": "dataReplicationPrimaryBrokerArn", + "documentation": "

The Amazon Resource Name (ARN) of the primary broker that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when dataReplicationMode is set to CRDR.

" + } + }, + "documentation": "

Creates a broker using the specified properties.

", + "required": [ + "HostInstanceType", + "Users", + "BrokerName", + "DeploymentMode", + "EngineType", + "PubliclyAccessible" + ] + }, + "CreateBrokerResponse": { + "type": "structure", + "members": { + "BrokerArn": { + "shape": "__string", + "locationName": "brokerArn", + "documentation": "

The broker's Amazon Resource Name (ARN).

" + }, + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" } } }, - "CreateConfigurationInput" : { - "type" : "structure", - "members" : { - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" - }, - "EngineType" : { - "shape" : "EngineType", - "locationName" : "engineType", - "documentation" : "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

Required. The broker engine's version. For a list of supported engine versions, see Supported engines.

" - }, - "Name" : { - "shape" : "__string", - "locationName" : "name", - "documentation" : "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" - }, - "Tags" : { - "shape" : "__mapOf__string", - "locationName" : "tags", - "documentation" : "

Create tags when creating the configuration.

" - } - }, - "documentation" : "

Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version).

", - "required" : [ "EngineVersion", "EngineType", "Name" ] - }, - "CreateConfigurationOutput" : { - "type" : "structure", - "members" : { - "Arn" : { - "shape" : "__string", - "locationName" : "arn", - "documentation" : "

Required. The Amazon Resource Name (ARN) of the configuration.

" - }, - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" - }, - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

Required. The date and time of the configuration.

" - }, - "Id" : { - "shape" : "__string", - "locationName" : "id", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the configuration.

" - }, - "LatestRevision" : { - "shape" : "ConfigurationRevision", - "locationName" : "latestRevision", - "documentation" : "

The latest revision of the configuration.

" - }, - "Name" : { - "shape" : "__string", - "locationName" : "name", - "documentation" : "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" - } - }, - "documentation" : "

Returns information about the created configuration.

", - "required" : [ "AuthenticationStrategy", "Id", "Arn", "Name", "Created" ] - }, - "CreateConfigurationRequest" : { - "type" : "structure", - "members" : { - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" - }, - "EngineType" : { - "shape" : "EngineType", - "locationName" : "engineType", - "documentation" : "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

Required. The broker engine's version. For a list of supported engine versions, see Supported engines.

" - }, - "Name" : { - "shape" : "__string", - "locationName" : "name", - "documentation" : "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" - }, - "Tags" : { - "shape" : "__mapOf__string", - "locationName" : "tags", - "documentation" : "

Create tags when creating the configuration.

" - } - }, - "documentation" : "

Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version).

", - "required" : [ "EngineVersion", "EngineType", "Name" ] - }, - "CreateConfigurationResponse" : { - "type" : "structure", - "members" : { - "Arn" : { - "shape" : "__string", - "locationName" : "arn", - "documentation" : "

Required. The Amazon Resource Name (ARN) of the configuration.

" - }, - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" - }, - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

Required. The date and time of the configuration.

" - }, - "Id" : { - "shape" : "__string", - "locationName" : "id", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the configuration.

" - }, - "LatestRevision" : { - "shape" : "ConfigurationRevision", - "locationName" : "latestRevision", - "documentation" : "

The latest revision of the configuration.

" - }, - "Name" : { - "shape" : "__string", - "locationName" : "name", - "documentation" : "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" + "CreateConfigurationInput": { + "type": "structure", + "members": { + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" + }, + "EngineType": { + "shape": "EngineType", + "locationName": "engineType", + "documentation": "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version. Defaults to the latest available version for the specified broker engine type. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

Create tags when creating the configuration.

" + } + }, + "documentation": "

Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version).

", + "required": [ + "EngineType", + "Name" + ] + }, + "CreateConfigurationOutput": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

Required. The Amazon Resource Name (ARN) of the configuration.

" + }, + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" + }, + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

Required. The date and time of the configuration.

" + }, + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "

Required. The unique ID that Amazon MQ generates for the configuration.

" + }, + "LatestRevision": { + "shape": "ConfigurationRevision", + "locationName": "latestRevision", + "documentation": "

The latest revision of the configuration.

" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" + } + }, + "documentation": "

Returns information about the created configuration.

", + "required": [ + "AuthenticationStrategy", + "Id", + "Arn", + "Name", + "Created" + ] + }, + "CreateConfigurationRequest": { + "type": "structure", + "members": { + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" + }, + "EngineType": { + "shape": "EngineType", + "locationName": "engineType", + "documentation": "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version. Defaults to the latest available version for the specified broker engine type. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

Create tags when creating the configuration.

" + } + }, + "documentation": "

Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version).

", + "required": [ + "EngineType", + "Name" + ] + }, + "CreateConfigurationResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

Required. The Amazon Resource Name (ARN) of the configuration.

" + }, + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" + }, + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

Required. The date and time of the configuration.

" + }, + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "

Required. The unique ID that Amazon MQ generates for the configuration.

" + }, + "LatestRevision": { + "shape": "ConfigurationRevision", + "locationName": "latestRevision", + "documentation": "

The latest revision of the configuration.

" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" } } }, - "CreateTagsRequest" : { - "type" : "structure", - "members" : { - "ResourceArn" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "resource-arn", - "documentation" : "

The Amazon Resource Name (ARN) of the resource tag.

" - }, - "Tags" : { - "shape" : "__mapOf__string", - "locationName" : "tags", - "documentation" : "

The key-value pair for the resource tag.

" - } - }, - "documentation" : "

A map of the key-value pairs for the resource tag.

", - "required" : [ "ResourceArn" ] - }, - "CreateUserInput" : { - "type" : "structure", - "members" : { - "ConsoleAccess" : { - "shape" : "__boolean", - "locationName" : "consoleAccess", - "documentation" : "

Enables access to the ActiveMQ Web Console for the ActiveMQ user.

" - }, - "Groups" : { - "shape" : "__listOf__string", - "locationName" : "groups", - "documentation" : "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - }, - "Password" : { - "shape" : "__string", - "locationName" : "password", - "documentation" : "

Required. The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

" - }, - "ReplicationUser" : { - "shape" : "__boolean", - "locationName" : "replicationUser", - "documentation" : "

Defines if this user is intended for CRDR replication purposes.

" - } - }, - "documentation" : "

Creates a new ActiveMQ user.

", - "required" : [ "Password" ] - }, - "CreateUserRequest" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "broker-id", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - }, - "ConsoleAccess" : { - "shape" : "__boolean", - "locationName" : "consoleAccess", - "documentation" : "

Enables access to the ActiveMQ Web Console for the ActiveMQ user.

" - }, - "Groups" : { - "shape" : "__listOf__string", - "locationName" : "groups", - "documentation" : "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - }, - "Password" : { - "shape" : "__string", - "locationName" : "password", - "documentation" : "

Required. The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

" - }, - "Username" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "username", - "documentation" : "

The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - }, - "ReplicationUser" : { - "shape" : "__boolean", - "locationName" : "replicationUser", - "documentation" : "

Defines if this user is intended for CRDR replication purposes.

" - } - }, - "documentation" : "

Creates a new ActiveMQ user.

", - "required" : [ "Username", "BrokerId", "Password" ] - }, - "CreateUserResponse" : { - "type" : "structure", - "members" : { } - }, - "DataReplicationCounterpart" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

Required. The unique broker id generated by Amazon MQ.

" - }, - "Region" : { - "shape" : "__string", - "locationName" : "region", - "documentation" : "

Required. The region of the broker.

" - } - }, - "documentation" : "

Specifies a broker in a data replication pair.

", - "required" : [ "BrokerId", "Region" ] - }, - "DataReplicationMetadataOutput" : { - "type" : "structure", - "members" : { - "DataReplicationCounterpart" : { - "shape" : "DataReplicationCounterpart", - "locationName" : "dataReplicationCounterpart", - "documentation" : "

Describes the replica/primary broker. Only returned if this broker is currently set as a primary or replica in the broker's dataReplicationRole property.

" - }, - "DataReplicationRole" : { - "shape" : "__string", - "locationName" : "dataReplicationRole", - "documentation" : "

Defines the role of this broker in a data replication pair. When a replica broker is promoted to primary, this role is interchanged.

" - } - }, - "documentation" : "

The replication details of the data replication-enabled broker. Only returned if dataReplicationMode or pendingDataReplicationMode is set to CRDR.

", - "required" : [ "DataReplicationRole" ] - }, - "DataReplicationMode" : { - "type" : "string", - "documentation" : "

Specifies whether a broker is a part of a data replication pair.

", - "enum" : [ "NONE", "CRDR" ] - }, - "DayOfWeek" : { - "type" : "string", - "enum" : [ "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY" ] - }, - "DeleteBrokerOutput" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - } - }, - "documentation" : "

Returns information about the deleted broker.

" - }, - "DeleteBrokerRequest" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "broker-id", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - } - }, - "required" : [ "BrokerId" ] - }, - "DeleteBrokerResponse" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" + "CreateTagsRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "

The Amazon Resource Name (ARN) of the resource tag.

" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

The key-value pair for the resource tag.

" + } + }, + "documentation": "

A map of the key-value pairs for the resource tag.

", + "required": [ + "ResourceArn" + ] + }, + "CreateUserInput": { + "type": "structure", + "members": { + "ConsoleAccess": { + "shape": "__boolean", + "locationName": "consoleAccess", + "documentation": "

Enables access to the ActiveMQ Web Console for the ActiveMQ user.

" + }, + "Groups": { + "shape": "__listOf__string", + "locationName": "groups", + "documentation": "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + }, + "Password": { + "shape": "__string", + "locationName": "password", + "documentation": "

Required. The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

" + }, + "ReplicationUser": { + "shape": "__boolean", + "locationName": "replicationUser", + "documentation": "

Defines if this user is intended for CRDR replication purposes.

" + } + }, + "documentation": "

Creates a new ActiveMQ user.

", + "required": [ + "Password" + ] + }, + "CreateUserRequest": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "location": "uri", + "locationName": "broker-id", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + }, + "ConsoleAccess": { + "shape": "__boolean", + "locationName": "consoleAccess", + "documentation": "

Enables access to the ActiveMQ Web Console for the ActiveMQ user.

" + }, + "Groups": { + "shape": "__listOf__string", + "locationName": "groups", + "documentation": "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + }, + "Password": { + "shape": "__string", + "locationName": "password", + "documentation": "

Required. The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

" + }, + "Username": { + "shape": "__string", + "location": "uri", + "locationName": "username", + "documentation": "

The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + }, + "ReplicationUser": { + "shape": "__boolean", + "locationName": "replicationUser", + "documentation": "

Defines if this user is intended for CRDR replication purposes.

" + } + }, + "documentation": "

Creates a new ActiveMQ user.

", + "required": [ + "Username", + "BrokerId", + "Password" + ] + }, + "CreateUserResponse": { + "type": "structure", + "members": { + } + }, + "DataReplicationCounterpart": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

Required. The unique broker id generated by Amazon MQ.

" + }, + "Region": { + "shape": "__string", + "locationName": "region", + "documentation": "

Required. The region of the broker.

" + } + }, + "documentation": "

Specifies a broker in a data replication pair.

", + "required": [ + "BrokerId", + "Region" + ] + }, + "DataReplicationMetadataOutput": { + "type": "structure", + "members": { + "DataReplicationCounterpart": { + "shape": "DataReplicationCounterpart", + "locationName": "dataReplicationCounterpart", + "documentation": "

Describes the replica/primary broker. Only returned if this broker is currently set as a primary or replica in the broker's dataReplicationRole property.

" + }, + "DataReplicationRole": { + "shape": "__string", + "locationName": "dataReplicationRole", + "documentation": "

Defines the role of this broker in a data replication pair. When a replica broker is promoted to primary, this role is interchanged.

" + } + }, + "documentation": "

The replication details of the data replication-enabled broker. Only returned if dataReplicationMode or pendingDataReplicationMode is set to CRDR.

", + "required": [ + "DataReplicationRole" + ] + }, + "DataReplicationMode": { + "type": "string", + "documentation": "

Specifies whether a broker is a part of a data replication pair.

", + "enum": [ + "NONE", + "CRDR" + ] + }, + "DayOfWeek": { + "type": "string", + "enum": [ + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY", + "SUNDAY" + ] + }, + "DeleteBrokerOutput": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + } + }, + "documentation": "

Returns information about the deleted broker.

" + }, + "DeleteBrokerRequest": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "location": "uri", + "locationName": "broker-id", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + } + }, + "required": [ + "BrokerId" + ] + }, + "DeleteBrokerResponse": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" } } }, - "DeleteTagsRequest" : { - "type" : "structure", - "members" : { - "ResourceArn" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "resource-arn", - "documentation" : "

The Amazon Resource Name (ARN) of the resource tag.

" - }, - "TagKeys" : { - "shape" : "__listOf__string", - "location" : "querystring", - "locationName" : "tagKeys", - "documentation" : "

An array of tag keys to delete

" - } - }, - "required" : [ "TagKeys", "ResourceArn" ] - }, - "DeleteUserRequest" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "broker-id", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - }, - "Username" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "username", - "documentation" : "

The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - } - }, - "required" : [ "Username", "BrokerId" ] - }, - "DeleteUserResponse" : { - "type" : "structure", - "members" : { } - }, - "DeploymentMode" : { - "type" : "string", - "documentation" : "

The broker's deployment mode.

", - "enum" : [ "SINGLE_INSTANCE", "ACTIVE_STANDBY_MULTI_AZ", "CLUSTER_MULTI_AZ" ] - }, - "DescribeBrokerEngineTypesRequest" : { - "type" : "structure", - "members" : { - "EngineType" : { - "shape" : "__string", - "location" : "querystring", - "locationName" : "engineType", - "documentation" : "

Filter response by engine type.

" - }, - "MaxResults" : { - "shape" : "MaxResults", - "location" : "querystring", - "locationName" : "maxResults", - "documentation" : "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "location" : "querystring", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + "DeleteTagsRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "

The Amazon Resource Name (ARN) of the resource tag.

" + }, + "TagKeys": { + "shape": "__listOf__string", + "location": "querystring", + "locationName": "tagKeys", + "documentation": "

An array of tag keys to delete

" + } + }, + "required": [ + "TagKeys", + "ResourceArn" + ] + }, + "DeleteUserRequest": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "location": "uri", + "locationName": "broker-id", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + }, + "Username": { + "shape": "__string", + "location": "uri", + "locationName": "username", + "documentation": "

The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + } + }, + "required": [ + "Username", + "BrokerId" + ] + }, + "DeleteUserResponse": { + "type": "structure", + "members": { + } + }, + "DeploymentMode": { + "type": "string", + "documentation": "

The broker's deployment mode.

", + "enum": [ + "SINGLE_INSTANCE", + "ACTIVE_STANDBY_MULTI_AZ", + "CLUSTER_MULTI_AZ" + ] + }, + "DescribeBrokerEngineTypesRequest": { + "type": "structure", + "members": { + "EngineType": { + "shape": "__string", + "location": "querystring", + "locationName": "engineType", + "documentation": "

Filter response by engine type.

" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" } } }, - "DescribeBrokerEngineTypesResponse" : { - "type" : "structure", - "members" : { - "BrokerEngineTypes" : { - "shape" : "__listOfBrokerEngineType", - "locationName" : "brokerEngineTypes", - "documentation" : "

List of available engine types and versions.

" + "DescribeBrokerEngineTypesResponse": { + "type": "structure", + "members": { + "BrokerEngineTypes": { + "shape": "__listOfBrokerEngineType", + "locationName": "brokerEngineTypes", + "documentation": "

List of available engine types and versions.

" }, - "MaxResults" : { - "shape" : "__integerMin5Max100", - "locationName" : "maxResults", - "documentation" : "

Required. The maximum number of engine types that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" + "MaxResults": { + "shape": "__integerMin5Max100", + "locationName": "maxResults", + "documentation": "

Required. The maximum number of engine types that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" } } }, - "DescribeBrokerInstanceOptionsRequest" : { - "type" : "structure", - "members" : { - "EngineType" : { - "shape" : "__string", - "location" : "querystring", - "locationName" : "engineType", - "documentation" : "

Filter response by engine type.

" - }, - "HostInstanceType" : { - "shape" : "__string", - "location" : "querystring", - "locationName" : "hostInstanceType", - "documentation" : "

Filter response by host instance type.

" - }, - "MaxResults" : { - "shape" : "MaxResults", - "location" : "querystring", - "locationName" : "maxResults", - "documentation" : "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "location" : "querystring", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" - }, - "StorageType" : { - "shape" : "__string", - "location" : "querystring", - "locationName" : "storageType", - "documentation" : "

Filter response by storage type.

" + "DescribeBrokerInstanceOptionsRequest": { + "type": "structure", + "members": { + "EngineType": { + "shape": "__string", + "location": "querystring", + "locationName": "engineType", + "documentation": "

Filter response by engine type.

" + }, + "HostInstanceType": { + "shape": "__string", + "location": "querystring", + "locationName": "hostInstanceType", + "documentation": "

Filter response by host instance type.

" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + }, + "StorageType": { + "shape": "__string", + "location": "querystring", + "locationName": "storageType", + "documentation": "

Filter response by storage type.

" } } }, - "DescribeBrokerInstanceOptionsResponse" : { - "type" : "structure", - "members" : { - "BrokerInstanceOptions" : { - "shape" : "__listOfBrokerInstanceOption", - "locationName" : "brokerInstanceOptions", - "documentation" : "

List of available broker instance options.

" + "DescribeBrokerInstanceOptionsResponse": { + "type": "structure", + "members": { + "BrokerInstanceOptions": { + "shape": "__listOfBrokerInstanceOption", + "locationName": "brokerInstanceOptions", + "documentation": "

List of available broker instance options.

" }, - "MaxResults" : { - "shape" : "__integerMin5Max100", - "locationName" : "maxResults", - "documentation" : "

Required. The maximum number of instance options that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" + "MaxResults": { + "shape": "__integerMin5Max100", + "locationName": "maxResults", + "documentation": "

Required. The maximum number of instance options that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" } } }, - "DescribeBrokerOutput" : { - "type" : "structure", - "members" : { - "ActionsRequired" : { - "shape" : "__listOfActionRequired", - "locationName" : "actionsRequired", - "documentation" : "

Actions required for a broker.

" - }, - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

The authentication strategy used to secure the broker. The default is SIMPLE.

" - }, - "AutoMinorVersionUpgrade" : { - "shape" : "__boolean", - "locationName" : "autoMinorVersionUpgrade", - "documentation" : "

Enables automatic upgrades to new minor versions for brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot.

" - }, - "BrokerArn" : { - "shape" : "__string", - "locationName" : "brokerArn", - "documentation" : "

The broker's Amazon Resource Name (ARN).

" - }, - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - }, - "BrokerInstances" : { - "shape" : "__listOfBrokerInstance", - "locationName" : "brokerInstances", - "documentation" : "

A list of information about allocated brokers.

" - }, - "BrokerName" : { - "shape" : "__string", - "locationName" : "brokerName", - "documentation" : "

The broker's name. This value must be unique in your Amazon Web Services account account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

" - }, - "BrokerState" : { - "shape" : "BrokerState", - "locationName" : "brokerState", - "documentation" : "

The broker's status.

" - }, - "Configurations" : { - "shape" : "Configurations", - "locationName" : "configurations", - "documentation" : "

The list of all revisions for the specified configuration.

" - }, - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

The time when the broker was created.

" - }, - "DeploymentMode" : { - "shape" : "DeploymentMode", - "locationName" : "deploymentMode", - "documentation" : "

The broker's deployment mode.

" - }, - "DataReplicationMetadata" : { - "shape" : "DataReplicationMetadataOutput", - "locationName" : "dataReplicationMetadata", - "documentation" : "

The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

" - }, - "DataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "dataReplicationMode", - "documentation" : "

Describes whether this broker is a part of a data replication pair.

" - }, - "EncryptionOptions" : { - "shape" : "EncryptionOptions", - "locationName" : "encryptionOptions", - "documentation" : "

Encryption options for the broker.

" - }, - "EngineType" : { - "shape" : "EngineType", - "locationName" : "engineType", - "documentation" : "

The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

The broker engine's version. For a list of supported engine versions, see Supported engines.

" - }, - "HostInstanceType" : { - "shape" : "__string", - "locationName" : "hostInstanceType", - "documentation" : "

The broker's instance type.

" - }, - "LdapServerMetadata" : { - "shape" : "LdapServerMetadataOutput", - "locationName" : "ldapServerMetadata", - "documentation" : "

The metadata of the LDAP server used to authenticate and authorize connections to the broker.

" - }, - "Logs" : { - "shape" : "LogsSummary", - "locationName" : "logs", - "documentation" : "

The list of information about logs currently enabled and pending to be deployed for the specified broker.

" - }, - "MaintenanceWindowStartTime" : { - "shape" : "WeeklyStartTime", - "locationName" : "maintenanceWindowStartTime", - "documentation" : "

The parameters that determine the WeeklyStartTime.

" - }, - "PendingAuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "pendingAuthenticationStrategy", - "documentation" : "

The authentication strategy that will be applied when the broker is rebooted. The default is SIMPLE.

" - }, - "PendingDataReplicationMetadata" : { - "shape" : "DataReplicationMetadataOutput", - "locationName" : "pendingDataReplicationMetadata", - "documentation" : "

The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

" - }, - "PendingDataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "pendingDataReplicationMode", - "documentation" : "

Describes whether this broker will be a part of a data replication pair after reboot.

" - }, - "PendingEngineVersion" : { - "shape" : "__string", - "locationName" : "pendingEngineVersion", - "documentation" : "

The broker engine version to upgrade to. For a list of supported engine versions, see Supported engines.

" - }, - "PendingHostInstanceType" : { - "shape" : "__string", - "locationName" : "pendingHostInstanceType", - "documentation" : "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" - }, - "PendingLdapServerMetadata" : { - "shape" : "LdapServerMetadataOutput", - "locationName" : "pendingLdapServerMetadata", - "documentation" : "

The metadata of the LDAP server that will be used to authenticate and authorize connections to the broker after it is rebooted.

" - }, - "PendingSecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "pendingSecurityGroups", - "documentation" : "

The list of pending security groups to authorize connections to brokers.

" - }, - "PubliclyAccessible" : { - "shape" : "__boolean", - "locationName" : "publiclyAccessible", - "documentation" : "

Enables connections from applications outside of the VPC that hosts the broker's subnets.

" - }, - "SecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "securityGroups", - "documentation" : "

The list of rules (1 minimum, 125 maximum) that authorize connections to brokers.

" - }, - "StorageType" : { - "shape" : "BrokerStorageType", - "locationName" : "storageType", - "documentation" : "

The broker's storage type.

" - }, - "SubnetIds" : { - "shape" : "__listOf__string", - "locationName" : "subnetIds", - "documentation" : "

The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones.

" - }, - "Tags" : { - "shape" : "__mapOf__string", - "locationName" : "tags", - "documentation" : "

The list of all tags associated with this broker.

" - }, - "Users" : { - "shape" : "__listOfUserSummary", - "locationName" : "users", - "documentation" : "

The list of all broker usernames for the specified broker.

" - } - }, - "documentation" : "

Returns information about the specified broker.

", - "required" : [ "DeploymentMode", "EngineType", "AutoMinorVersionUpgrade", "PubliclyAccessible" ] - }, - "DescribeBrokerRequest" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "broker-id", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - } - }, - "required" : [ "BrokerId" ] - }, - "DescribeBrokerResponse" : { - "type" : "structure", - "members" : { - "ActionsRequired" : { - "shape" : "__listOfActionRequired", - "locationName" : "actionsRequired", - "documentation" : "

Actions required for a broker.

" - }, - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

The authentication strategy used to secure the broker. The default is SIMPLE.

" - }, - "AutoMinorVersionUpgrade" : { - "shape" : "__boolean", - "locationName" : "autoMinorVersionUpgrade", - "documentation" : "

Enables automatic upgrades to new minor versions for brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot.

" - }, - "BrokerArn" : { - "shape" : "__string", - "locationName" : "brokerArn", - "documentation" : "

The broker's Amazon Resource Name (ARN).

" - }, - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - }, - "BrokerInstances" : { - "shape" : "__listOfBrokerInstance", - "locationName" : "brokerInstances", - "documentation" : "

A list of information about allocated brokers.

" - }, - "BrokerName" : { - "shape" : "__string", - "locationName" : "brokerName", - "documentation" : "

The broker's name. This value must be unique in your Amazon Web Services account account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

" - }, - "BrokerState" : { - "shape" : "BrokerState", - "locationName" : "brokerState", - "documentation" : "

The broker's status.

" - }, - "Configurations" : { - "shape" : "Configurations", - "locationName" : "configurations", - "documentation" : "

The list of all revisions for the specified configuration.

" - }, - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

The time when the broker was created.

" - }, - "DeploymentMode" : { - "shape" : "DeploymentMode", - "locationName" : "deploymentMode", - "documentation" : "

The broker's deployment mode.

" - }, - "EncryptionOptions" : { - "shape" : "EncryptionOptions", - "locationName" : "encryptionOptions", - "documentation" : "

Encryption options for the broker.

" - }, - "EngineType" : { - "shape" : "EngineType", - "locationName" : "engineType", - "documentation" : "

The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

The broker engine's version. For a list of supported engine versions, see Supported engines.

" - }, - "HostInstanceType" : { - "shape" : "__string", - "locationName" : "hostInstanceType", - "documentation" : "

The broker's instance type.

" - }, - "LdapServerMetadata" : { - "shape" : "LdapServerMetadataOutput", - "locationName" : "ldapServerMetadata", - "documentation" : "

The metadata of the LDAP server used to authenticate and authorize connections to the broker.

" - }, - "Logs" : { - "shape" : "LogsSummary", - "locationName" : "logs", - "documentation" : "

The list of information about logs currently enabled and pending to be deployed for the specified broker.

" - }, - "MaintenanceWindowStartTime" : { - "shape" : "WeeklyStartTime", - "locationName" : "maintenanceWindowStartTime", - "documentation" : "

The parameters that determine the WeeklyStartTime.

" - }, - "PendingAuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "pendingAuthenticationStrategy", - "documentation" : "

The authentication strategy that will be applied when the broker is rebooted. The default is SIMPLE.

" - }, - "PendingEngineVersion" : { - "shape" : "__string", - "locationName" : "pendingEngineVersion", - "documentation" : "

The broker engine version to upgrade to. For a list of supported engine versions, see Supported engines.

" - }, - "PendingHostInstanceType" : { - "shape" : "__string", - "locationName" : "pendingHostInstanceType", - "documentation" : "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" - }, - "PendingLdapServerMetadata" : { - "shape" : "LdapServerMetadataOutput", - "locationName" : "pendingLdapServerMetadata", - "documentation" : "

The metadata of the LDAP server that will be used to authenticate and authorize connections to the broker after it is rebooted.

" - }, - "PendingSecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "pendingSecurityGroups", - "documentation" : "

The list of pending security groups to authorize connections to brokers.

" - }, - "PubliclyAccessible" : { - "shape" : "__boolean", - "locationName" : "publiclyAccessible", - "documentation" : "

Enables connections from applications outside of the VPC that hosts the broker's subnets.

" - }, - "SecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "securityGroups", - "documentation" : "

The list of rules (1 minimum, 125 maximum) that authorize connections to brokers.

" - }, - "StorageType" : { - "shape" : "BrokerStorageType", - "locationName" : "storageType", - "documentation" : "

The broker's storage type.

" - }, - "SubnetIds" : { - "shape" : "__listOf__string", - "locationName" : "subnetIds", - "documentation" : "

The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones.

" - }, - "Tags" : { - "shape" : "__mapOf__string", - "locationName" : "tags", - "documentation" : "

The list of all tags associated with this broker.

" - }, - "Users" : { - "shape" : "__listOfUserSummary", - "locationName" : "users", - "documentation" : "

The list of all broker usernames for the specified broker.

" - }, - "DataReplicationMetadata" : { - "shape" : "DataReplicationMetadataOutput", - "locationName" : "dataReplicationMetadata", - "documentation" : "

The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

" - }, - "DataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "dataReplicationMode", - "documentation" : "

Describes whether this broker is a part of a data replication pair.

" - }, - "PendingDataReplicationMetadata" : { - "shape" : "DataReplicationMetadataOutput", - "locationName" : "pendingDataReplicationMetadata", - "documentation" : "

The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

" - }, - "PendingDataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "pendingDataReplicationMode", - "documentation" : "

Describes whether this broker will be a part of a data replication pair after reboot.

" + "DescribeBrokerOutput": { + "type": "structure", + "members": { + "ActionsRequired": { + "shape": "__listOfActionRequired", + "locationName": "actionsRequired", + "documentation": "

Actions required for a broker.

" + }, + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

The authentication strategy used to secure the broker. The default is SIMPLE.

" + }, + "AutoMinorVersionUpgrade": { + "shape": "__boolean", + "locationName": "autoMinorVersionUpgrade", + "documentation": "

Enables automatic upgrades to new patch versions for brokers as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window or after a manual broker reboot.

" + }, + "BrokerArn": { + "shape": "__string", + "locationName": "brokerArn", + "documentation": "

The broker's Amazon Resource Name (ARN).

" + }, + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + }, + "BrokerInstances": { + "shape": "__listOfBrokerInstance", + "locationName": "brokerInstances", + "documentation": "

A list of information about allocated brokers.

" + }, + "BrokerName": { + "shape": "__string", + "locationName": "brokerName", + "documentation": "

The broker's name. This value must be unique in your Amazon Web Services account account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

" + }, + "BrokerState": { + "shape": "BrokerState", + "locationName": "brokerState", + "documentation": "

The broker's status.

" + }, + "Configurations": { + "shape": "Configurations", + "locationName": "configurations", + "documentation": "

The list of all revisions for the specified configuration.

" + }, + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

The time when the broker was created.

" + }, + "DeploymentMode": { + "shape": "DeploymentMode", + "locationName": "deploymentMode", + "documentation": "

The broker's deployment mode.

" + }, + "DataReplicationMetadata": { + "shape": "DataReplicationMetadataOutput", + "locationName": "dataReplicationMetadata", + "documentation": "

The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

" + }, + "DataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "dataReplicationMode", + "documentation": "

Describes whether this broker is a part of a data replication pair.

" + }, + "EncryptionOptions": { + "shape": "EncryptionOptions", + "locationName": "encryptionOptions", + "documentation": "

Encryption options for the broker.

" + }, + "EngineType": { + "shape": "EngineType", + "locationName": "engineType", + "documentation": "

The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "HostInstanceType": { + "shape": "__string", + "locationName": "hostInstanceType", + "documentation": "

The broker's instance type.

" + }, + "LdapServerMetadata": { + "shape": "LdapServerMetadataOutput", + "locationName": "ldapServerMetadata", + "documentation": "

The metadata of the LDAP server used to authenticate and authorize connections to the broker.

" + }, + "Logs": { + "shape": "LogsSummary", + "locationName": "logs", + "documentation": "

The list of information about logs currently enabled and pending to be deployed for the specified broker.

" + }, + "MaintenanceWindowStartTime": { + "shape": "WeeklyStartTime", + "locationName": "maintenanceWindowStartTime", + "documentation": "

The parameters that determine the WeeklyStartTime.

" + }, + "PendingAuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "pendingAuthenticationStrategy", + "documentation": "

The authentication strategy that will be applied when the broker is rebooted. The default is SIMPLE.

" + }, + "PendingDataReplicationMetadata": { + "shape": "DataReplicationMetadataOutput", + "locationName": "pendingDataReplicationMetadata", + "documentation": "

The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

" + }, + "PendingDataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "pendingDataReplicationMode", + "documentation": "

Describes whether this broker will be a part of a data replication pair after reboot.

" + }, + "PendingEngineVersion": { + "shape": "__string", + "locationName": "pendingEngineVersion", + "documentation": "

The broker engine version to upgrade to. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "PendingHostInstanceType": { + "shape": "__string", + "locationName": "pendingHostInstanceType", + "documentation": "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" + }, + "PendingLdapServerMetadata": { + "shape": "LdapServerMetadataOutput", + "locationName": "pendingLdapServerMetadata", + "documentation": "

The metadata of the LDAP server that will be used to authenticate and authorize connections to the broker after it is rebooted.

" + }, + "PendingSecurityGroups": { + "shape": "__listOf__string", + "locationName": "pendingSecurityGroups", + "documentation": "

The list of pending security groups to authorize connections to brokers.

" + }, + "PubliclyAccessible": { + "shape": "__boolean", + "locationName": "publiclyAccessible", + "documentation": "

Enables connections from applications outside of the VPC that hosts the broker's subnets.

" + }, + "SecurityGroups": { + "shape": "__listOf__string", + "locationName": "securityGroups", + "documentation": "

The list of rules (1 minimum, 125 maximum) that authorize connections to brokers.

" + }, + "StorageType": { + "shape": "BrokerStorageType", + "locationName": "storageType", + "documentation": "

The broker's storage type.

" + }, + "SubnetIds": { + "shape": "__listOf__string", + "locationName": "subnetIds", + "documentation": "

The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones.

" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

The list of all tags associated with this broker.

" + }, + "Users": { + "shape": "__listOfUserSummary", + "locationName": "users", + "documentation": "

The list of all broker usernames for the specified broker.

" + } + }, + "documentation": "

Returns information about the specified broker.

", + "required": [ + "DeploymentMode", + "EngineType", + "AutoMinorVersionUpgrade", + "PubliclyAccessible" + ] + }, + "DescribeBrokerRequest": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "location": "uri", + "locationName": "broker-id", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + } + }, + "required": [ + "BrokerId" + ] + }, + "DescribeBrokerResponse": { + "type": "structure", + "members": { + "ActionsRequired": { + "shape": "__listOfActionRequired", + "locationName": "actionsRequired", + "documentation": "

Actions required for a broker.

" + }, + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

The authentication strategy used to secure the broker. The default is SIMPLE.

" + }, + "AutoMinorVersionUpgrade": { + "shape": "__boolean", + "locationName": "autoMinorVersionUpgrade", + "documentation": "

Enables automatic upgrades to new patch versions for brokers as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window or after a manual broker reboot.

" + }, + "BrokerArn": { + "shape": "__string", + "locationName": "brokerArn", + "documentation": "

The broker's Amazon Resource Name (ARN).

" + }, + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + }, + "BrokerInstances": { + "shape": "__listOfBrokerInstance", + "locationName": "brokerInstances", + "documentation": "

A list of information about allocated brokers.

" + }, + "BrokerName": { + "shape": "__string", + "locationName": "brokerName", + "documentation": "

The broker's name. This value must be unique in your Amazon Web Services account account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

" + }, + "BrokerState": { + "shape": "BrokerState", + "locationName": "brokerState", + "documentation": "

The broker's status.

" + }, + "Configurations": { + "shape": "Configurations", + "locationName": "configurations", + "documentation": "

The list of all revisions for the specified configuration.

" + }, + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

The time when the broker was created.

" + }, + "DeploymentMode": { + "shape": "DeploymentMode", + "locationName": "deploymentMode", + "documentation": "

The broker's deployment mode.

" + }, + "EncryptionOptions": { + "shape": "EncryptionOptions", + "locationName": "encryptionOptions", + "documentation": "

Encryption options for the broker.

" + }, + "EngineType": { + "shape": "EngineType", + "locationName": "engineType", + "documentation": "

The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "HostInstanceType": { + "shape": "__string", + "locationName": "hostInstanceType", + "documentation": "

The broker's instance type.

" + }, + "LdapServerMetadata": { + "shape": "LdapServerMetadataOutput", + "locationName": "ldapServerMetadata", + "documentation": "

The metadata of the LDAP server used to authenticate and authorize connections to the broker.

" + }, + "Logs": { + "shape": "LogsSummary", + "locationName": "logs", + "documentation": "

The list of information about logs currently enabled and pending to be deployed for the specified broker.

" + }, + "MaintenanceWindowStartTime": { + "shape": "WeeklyStartTime", + "locationName": "maintenanceWindowStartTime", + "documentation": "

The parameters that determine the WeeklyStartTime.

" + }, + "PendingAuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "pendingAuthenticationStrategy", + "documentation": "

The authentication strategy that will be applied when the broker is rebooted. The default is SIMPLE.

" + }, + "PendingEngineVersion": { + "shape": "__string", + "locationName": "pendingEngineVersion", + "documentation": "

The broker engine version to upgrade to. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "PendingHostInstanceType": { + "shape": "__string", + "locationName": "pendingHostInstanceType", + "documentation": "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" + }, + "PendingLdapServerMetadata": { + "shape": "LdapServerMetadataOutput", + "locationName": "pendingLdapServerMetadata", + "documentation": "

The metadata of the LDAP server that will be used to authenticate and authorize connections to the broker after it is rebooted.

" + }, + "PendingSecurityGroups": { + "shape": "__listOf__string", + "locationName": "pendingSecurityGroups", + "documentation": "

The list of pending security groups to authorize connections to brokers.

" + }, + "PubliclyAccessible": { + "shape": "__boolean", + "locationName": "publiclyAccessible", + "documentation": "

Enables connections from applications outside of the VPC that hosts the broker's subnets.

" + }, + "SecurityGroups": { + "shape": "__listOf__string", + "locationName": "securityGroups", + "documentation": "

The list of rules (1 minimum, 125 maximum) that authorize connections to brokers.

" + }, + "StorageType": { + "shape": "BrokerStorageType", + "locationName": "storageType", + "documentation": "

The broker's storage type.

" + }, + "SubnetIds": { + "shape": "__listOf__string", + "locationName": "subnetIds", + "documentation": "

The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones.

" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

The list of all tags associated with this broker.

" + }, + "Users": { + "shape": "__listOfUserSummary", + "locationName": "users", + "documentation": "

The list of all broker usernames for the specified broker.

" + }, + "DataReplicationMetadata": { + "shape": "DataReplicationMetadataOutput", + "locationName": "dataReplicationMetadata", + "documentation": "

The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

" + }, + "DataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "dataReplicationMode", + "documentation": "

Describes whether this broker is a part of a data replication pair.

" + }, + "PendingDataReplicationMetadata": { + "shape": "DataReplicationMetadataOutput", + "locationName": "pendingDataReplicationMetadata", + "documentation": "

The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

" + }, + "PendingDataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "pendingDataReplicationMode", + "documentation": "

Describes whether this broker will be a part of a data replication pair after reboot.

" } } }, - "DescribeConfigurationRequest" : { - "type" : "structure", - "members" : { - "ConfigurationId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "configuration-id", - "documentation" : "

The unique ID that Amazon MQ generates for the configuration.

" - } - }, - "required" : [ "ConfigurationId" ] - }, - "DescribeConfigurationResponse" : { - "type" : "structure", - "members" : { - "Arn" : { - "shape" : "__string", - "locationName" : "arn", - "documentation" : "

Required. The ARN of the configuration.

" - }, - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" - }, - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

Required. The date and time of the configuration revision.

" - }, - "Description" : { - "shape" : "__string", - "locationName" : "description", - "documentation" : "

Required. The description of the configuration.

" - }, - "EngineType" : { - "shape" : "EngineType", - "locationName" : "engineType", - "documentation" : "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

Required. The broker engine's version. For a list of supported engine versions, see, Supported engines.

" - }, - "Id" : { - "shape" : "__string", - "locationName" : "id", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the configuration.

" - }, - "LatestRevision" : { - "shape" : "ConfigurationRevision", - "locationName" : "latestRevision", - "documentation" : "

Required. The latest revision of the configuration.

" - }, - "Name" : { - "shape" : "__string", - "locationName" : "name", - "documentation" : "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" - }, - "Tags" : { - "shape" : "__mapOf__string", - "locationName" : "tags", - "documentation" : "

The list of all tags associated with this configuration.

" + "DescribeConfigurationRequest": { + "type": "structure", + "members": { + "ConfigurationId": { + "shape": "__string", + "location": "uri", + "locationName": "configuration-id", + "documentation": "

The unique ID that Amazon MQ generates for the configuration.

" + } + }, + "required": [ + "ConfigurationId" + ] + }, + "DescribeConfigurationResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

Required. The ARN of the configuration.

" + }, + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy associated with the configuration. The default is SIMPLE.

" + }, + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

Required. The date and time of the configuration revision.

" + }, + "Description": { + "shape": "__string", + "locationName": "description", + "documentation": "

Required. The description of the configuration.

" + }, + "EngineType": { + "shape": "EngineType", + "locationName": "engineType", + "documentation": "

Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version. Defaults to the latest available version for the specified broker engine type. For a list of supported engine versions, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "

Required. The unique ID that Amazon MQ generates for the configuration.

" + }, + "LatestRevision": { + "shape": "ConfigurationRevision", + "locationName": "latestRevision", + "documentation": "

Required. The latest revision of the configuration.

" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "

Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

The list of all tags associated with this configuration.

" } } }, - "DescribeConfigurationRevisionOutput" : { - "type" : "structure", - "members" : { - "ConfigurationId" : { - "shape" : "__string", - "locationName" : "configurationId", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the configuration.

" - }, - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

Required. The date and time of the configuration.

" - }, - "Data" : { - "shape" : "__string", - "locationName" : "data", - "documentation" : "

Amazon MQ for ActiveMQ: the base64-encoded XML configuration. Amazon MQ for RabbitMQ: base64-encoded Cuttlefish.

" - }, - "Description" : { - "shape" : "__string", - "locationName" : "description", - "documentation" : "

The description of the configuration.

" - } - }, - "documentation" : "

Returns the specified configuration revision for the specified configuration.

", - "required" : [ "Data", "ConfigurationId", "Created" ] - }, - "DescribeConfigurationRevisionRequest" : { - "type" : "structure", - "members" : { - "ConfigurationId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "configuration-id", - "documentation" : "

The unique ID that Amazon MQ generates for the configuration.

" - }, - "ConfigurationRevision" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "configuration-revision", - "documentation" : "

The revision of the configuration.

" - } - }, - "required" : [ "ConfigurationRevision", "ConfigurationId" ] - }, - "DescribeConfigurationRevisionResponse" : { - "type" : "structure", - "members" : { - "ConfigurationId" : { - "shape" : "__string", - "locationName" : "configurationId", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the configuration.

" - }, - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

Required. The date and time of the configuration.

" - }, - "Data" : { - "shape" : "__string", - "locationName" : "data", - "documentation" : "

Amazon MQ for ActiveMQ: the base64-encoded XML configuration. Amazon MQ for RabbitMQ: base64-encoded Cuttlefish.

" - }, - "Description" : { - "shape" : "__string", - "locationName" : "description", - "documentation" : "

The description of the configuration.

" + "DescribeConfigurationRevisionOutput": { + "type": "structure", + "members": { + "ConfigurationId": { + "shape": "__string", + "locationName": "configurationId", + "documentation": "

Required. The unique ID that Amazon MQ generates for the configuration.

" + }, + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

Required. The date and time of the configuration.

" + }, + "Data": { + "shape": "__string", + "locationName": "data", + "documentation": "

Amazon MQ for ActiveMQ: the base64-encoded XML configuration. Amazon MQ for RabbitMQ: base64-encoded Cuttlefish.

" + }, + "Description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The description of the configuration.

" + } + }, + "documentation": "

Returns the specified configuration revision for the specified configuration.

", + "required": [ + "Data", + "ConfigurationId", + "Created" + ] + }, + "DescribeConfigurationRevisionRequest": { + "type": "structure", + "members": { + "ConfigurationId": { + "shape": "__string", + "location": "uri", + "locationName": "configuration-id", + "documentation": "

The unique ID that Amazon MQ generates for the configuration.

" + }, + "ConfigurationRevision": { + "shape": "__string", + "location": "uri", + "locationName": "configuration-revision", + "documentation": "

The revision of the configuration.

" + } + }, + "required": [ + "ConfigurationRevision", + "ConfigurationId" + ] + }, + "DescribeConfigurationRevisionResponse": { + "type": "structure", + "members": { + "ConfigurationId": { + "shape": "__string", + "locationName": "configurationId", + "documentation": "

Required. The unique ID that Amazon MQ generates for the configuration.

" + }, + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

Required. The date and time of the configuration.

" + }, + "Data": { + "shape": "__string", + "locationName": "data", + "documentation": "

Amazon MQ for ActiveMQ: the base64-encoded XML configuration. Amazon MQ for RabbitMQ: base64-encoded Cuttlefish.

" + }, + "Description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The description of the configuration.

" } } }, - "DescribeUserOutput" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the broker.

" - }, - "ConsoleAccess" : { - "shape" : "__boolean", - "locationName" : "consoleAccess", - "documentation" : "

Enables access to the the ActiveMQ Web Console for the ActiveMQ user.

" - }, - "Groups" : { - "shape" : "__listOf__string", - "locationName" : "groups", - "documentation" : "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - }, - "Pending" : { - "shape" : "UserPendingChanges", - "locationName" : "pending", - "documentation" : "

The status of the changes pending for the ActiveMQ user.

" - }, - "ReplicationUser" : { - "shape" : "__boolean", - "locationName" : "replicationUser", - "documentation" : "

Describes whether the user is intended for data replication

" - }, - "Username" : { - "shape" : "__string", - "locationName" : "username", - "documentation" : "

Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - } - }, - "documentation" : "

Returns information about an ActiveMQ user.

", - "required" : [ "Username", "BrokerId" ] - }, - "DescribeUserRequest" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "broker-id", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - }, - "Username" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "username", - "documentation" : "

The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - } - }, - "required" : [ "Username", "BrokerId" ] - }, - "DescribeUserResponse" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the broker.

" - }, - "ConsoleAccess" : { - "shape" : "__boolean", - "locationName" : "consoleAccess", - "documentation" : "

Enables access to the the ActiveMQ Web Console for the ActiveMQ user.

" - }, - "Groups" : { - "shape" : "__listOf__string", - "locationName" : "groups", - "documentation" : "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - }, - "Pending" : { - "shape" : "UserPendingChanges", - "locationName" : "pending", - "documentation" : "

The status of the changes pending for the ActiveMQ user.

" - }, - "Username" : { - "shape" : "__string", - "locationName" : "username", - "documentation" : "

Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - }, - "ReplicationUser" : { - "shape" : "__boolean", - "locationName" : "replicationUser", - "documentation" : "

Describes whether the user is intended for data replication

" + "DescribeUserOutput": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

Required. The unique ID that Amazon MQ generates for the broker.

" + }, + "ConsoleAccess": { + "shape": "__boolean", + "locationName": "consoleAccess", + "documentation": "

Enables access to the the ActiveMQ Web Console for the ActiveMQ user.

" + }, + "Groups": { + "shape": "__listOf__string", + "locationName": "groups", + "documentation": "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + }, + "Pending": { + "shape": "UserPendingChanges", + "locationName": "pending", + "documentation": "

The status of the changes pending for the ActiveMQ user.

" + }, + "ReplicationUser": { + "shape": "__boolean", + "locationName": "replicationUser", + "documentation": "

Describes whether the user is intended for data replication

" + }, + "Username": { + "shape": "__string", + "locationName": "username", + "documentation": "

Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + } + }, + "documentation": "

Returns information about an ActiveMQ user.

", + "required": [ + "Username", + "BrokerId" + ] + }, + "DescribeUserRequest": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "location": "uri", + "locationName": "broker-id", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + }, + "Username": { + "shape": "__string", + "location": "uri", + "locationName": "username", + "documentation": "

The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + } + }, + "required": [ + "Username", + "BrokerId" + ] + }, + "DescribeUserResponse": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

Required. The unique ID that Amazon MQ generates for the broker.

" + }, + "ConsoleAccess": { + "shape": "__boolean", + "locationName": "consoleAccess", + "documentation": "

Enables access to the the ActiveMQ Web Console for the ActiveMQ user.

" + }, + "Groups": { + "shape": "__listOf__string", + "locationName": "groups", + "documentation": "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + }, + "Pending": { + "shape": "UserPendingChanges", + "locationName": "pending", + "documentation": "

The status of the changes pending for the ActiveMQ user.

" + }, + "Username": { + "shape": "__string", + "locationName": "username", + "documentation": "

Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + }, + "ReplicationUser": { + "shape": "__boolean", + "locationName": "replicationUser", + "documentation": "

Describes whether the user is intended for data replication

" } } }, - "EncryptionOptions" : { - "type" : "structure", - "members" : { - "KmsKeyId" : { - "shape" : "__string", - "locationName" : "kmsKeyId", - "documentation" : "

The customer master key (CMK) to use for the A KMS (KMS). This key is used to encrypt your data at rest. If not provided, Amazon MQ will use a default CMK to encrypt your data.

" - }, - "UseAwsOwnedKey" : { - "shape" : "__boolean", - "locationName" : "useAwsOwnedKey", - "documentation" : "

Enables the use of an Amazon Web Services owned CMK using KMS (KMS). Set to true by default, if no value is provided, for example, for RabbitMQ brokers.

" - } - }, - "documentation" : "

Encryption options for the broker.

", - "required" : [ "UseAwsOwnedKey" ] - }, - "EngineType" : { - "type" : "string", - "documentation" : "

The type of broker engine. Amazon MQ supports ActiveMQ and RabbitMQ.

", - "enum" : [ "ACTIVEMQ", "RABBITMQ" ] - }, - "EngineVersion" : { - "type" : "structure", - "members" : { - "Name" : { - "shape" : "__string", - "locationName" : "name", - "documentation" : "

Id for the version.

" - } - }, - "documentation" : "

Id of the engine version.

" - }, - "Error" : { - "type" : "structure", - "members" : { - "ErrorAttribute" : { - "shape" : "__string", - "locationName" : "errorAttribute", - "documentation" : "

The attribute which caused the error.

" - }, - "Message" : { - "shape" : "__string", - "locationName" : "message", - "documentation" : "

The explanation of the error.

" - } - }, - "documentation" : "

Returns information about an error.

" - }, - "ForbiddenException" : { - "type" : "structure", - "members" : { - "ErrorAttribute" : { - "shape" : "__string", - "locationName" : "errorAttribute", - "documentation" : "

The attribute which caused the error.

" - }, - "Message" : { - "shape" : "__string", - "locationName" : "message", - "documentation" : "

The explanation of the error.

" - } - }, - "documentation" : "

Returns information about an error.

", - "exception" : true, - "error" : { - "httpStatusCode" : 403 + "EncryptionOptions": { + "type": "structure", + "members": { + "KmsKeyId": { + "shape": "__string", + "locationName": "kmsKeyId", + "documentation": "

The customer master key (CMK) to use for the A KMS (KMS). This key is used to encrypt your data at rest. If not provided, Amazon MQ will use a default CMK to encrypt your data.

" + }, + "UseAwsOwnedKey": { + "shape": "__boolean", + "locationName": "useAwsOwnedKey", + "documentation": "

Enables the use of an Amazon Web Services owned CMK using KMS (KMS). Set to true by default, if no value is provided, for example, for RabbitMQ brokers.

" + } + }, + "documentation": "

Encryption options for the broker.

", + "required": [ + "UseAwsOwnedKey" + ] + }, + "EngineType": { + "type": "string", + "documentation": "

The type of broker engine. Amazon MQ supports ActiveMQ and RabbitMQ.

", + "enum": [ + "ACTIVEMQ", + "RABBITMQ" + ] + }, + "EngineVersion": { + "type": "structure", + "members": { + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "

Id for the version.

" + } + }, + "documentation": "

Id of the engine version.

" + }, + "Error": { + "type": "structure", + "members": { + "ErrorAttribute": { + "shape": "__string", + "locationName": "errorAttribute", + "documentation": "

The attribute which caused the error.

" + }, + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error.

" + } + }, + "documentation": "

Returns information about an error.

" + }, + "ForbiddenException": { + "type": "structure", + "members": { + "ErrorAttribute": { + "shape": "__string", + "locationName": "errorAttribute", + "documentation": "

The attribute which caused the error.

" + }, + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error.

" + } + }, + "documentation": "

Returns information about an error.

", + "exception": true, + "error": { + "httpStatusCode": 403 } }, - "InternalServerErrorException" : { - "type" : "structure", - "members" : { - "ErrorAttribute" : { - "shape" : "__string", - "locationName" : "errorAttribute", - "documentation" : "

The attribute which caused the error.

" + "InternalServerErrorException": { + "type": "structure", + "members": { + "ErrorAttribute": { + "shape": "__string", + "locationName": "errorAttribute", + "documentation": "

The attribute which caused the error.

" }, - "Message" : { - "shape" : "__string", - "locationName" : "message", - "documentation" : "

The explanation of the error.

" + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error.

" } }, - "documentation" : "

Returns information about an error.

", - "exception" : true, - "error" : { - "httpStatusCode" : 500 + "documentation": "

Returns information about an error.

", + "exception": true, + "error": { + "httpStatusCode": 500 } }, - "LdapServerMetadataInput" : { - "type" : "structure", - "members" : { - "Hosts" : { - "shape" : "__listOf__string", - "locationName" : "hosts", - "documentation" : "

Specifies the location of the LDAP server such as Directory Service for Microsoft Active Directory. Optional failover server.

" - }, - "RoleBase" : { - "shape" : "__string", - "locationName" : "roleBase", - "documentation" : "

The distinguished name of the node in the directory information tree (DIT) to search for roles or groups. For example, ou=group, ou=corp, dc=corp,\n dc=example, dc=com.

" - }, - "RoleName" : { - "shape" : "__string", - "locationName" : "roleName", - "documentation" : "

Specifies the LDAP attribute that identifies the group name attribute in the object returned from the group membership query.

" - }, - "RoleSearchMatching" : { - "shape" : "__string", - "locationName" : "roleSearchMatching", - "documentation" : "

The LDAP search filter used to find roles within the roleBase. The distinguished name of the user matched by userSearchMatching is substituted into the {0} placeholder in the search filter. The client's username is substituted into the {1} placeholder. For example, if you set this option to (member=uid={1})for the user janedoe, the search filter becomes (member=uid=janedoe) after string substitution. It matches all role entries that have a member attribute equal to uid=janedoe under the subtree selected by the roleBase.

" - }, - "RoleSearchSubtree" : { - "shape" : "__boolean", - "locationName" : "roleSearchSubtree", - "documentation" : "

The directory search scope for the role. If set to true, scope is to search the entire subtree.

" - }, - "ServiceAccountPassword" : { - "shape" : "__string", - "locationName" : "serviceAccountPassword", - "documentation" : "

Service account password. A service account is an account in your LDAP server that has access to initiate a connection. For example, cn=admin,dc=corp, dc=example,\n dc=com.

" - }, - "ServiceAccountUsername" : { - "shape" : "__string", - "locationName" : "serviceAccountUsername", - "documentation" : "

Service account username. A service account is an account in your LDAP server that has access to initiate a connection. For example, cn=admin,dc=corp, dc=example,\n dc=com.

" - }, - "UserBase" : { - "shape" : "__string", - "locationName" : "userBase", - "documentation" : "

Select a particular subtree of the directory information tree (DIT) to search for user entries. The subtree is specified by a DN, which specifies the base node of the subtree. For example, by setting this option to ou=Users,ou=corp, dc=corp,\n dc=example, dc=com, the search for user entries is restricted to the subtree beneath ou=Users, ou=corp, dc=corp, dc=example, dc=com.

" - }, - "UserRoleName" : { - "shape" : "__string", - "locationName" : "userRoleName", - "documentation" : "

Specifies the name of the LDAP attribute for the user group membership.

" - }, - "UserSearchMatching" : { - "shape" : "__string", - "locationName" : "userSearchMatching", - "documentation" : "

The LDAP search filter used to find users within the userBase. The client's username is substituted into the {0} placeholder in the search filter. For example, if this option is set to (uid={0}) and the received username is janedoe, the search filter becomes (uid=janedoe) after string substitution. It will result in matching an entry like uid=janedoe, ou=Users,ou=corp, dc=corp, dc=example,\n dc=com.

" - }, - "UserSearchSubtree" : { - "shape" : "__boolean", - "locationName" : "userSearchSubtree", - "documentation" : "

The directory search scope for the user. If set to true, scope is to search the entire subtree.

" - } - }, - "documentation" : "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker.

Does not apply to RabbitMQ brokers.

", - "required" : [ "Hosts", "UserSearchMatching", "UserBase", "RoleSearchMatching", "ServiceAccountUsername", "RoleBase", "ServiceAccountPassword" ] - }, - "LdapServerMetadataOutput" : { - "type" : "structure", - "members" : { - "Hosts" : { - "shape" : "__listOf__string", - "locationName" : "hosts", - "documentation" : "

Specifies the location of the LDAP server such as Directory Service for Microsoft Active Directory. Optional failover server.

" - }, - "RoleBase" : { - "shape" : "__string", - "locationName" : "roleBase", - "documentation" : "

The distinguished name of the node in the directory information tree (DIT) to search for roles or groups. For example, ou=group, ou=corp, dc=corp,\n dc=example, dc=com.

" - }, - "RoleName" : { - "shape" : "__string", - "locationName" : "roleName", - "documentation" : "

Specifies the LDAP attribute that identifies the group name attribute in the object returned from the group membership query.

" - }, - "RoleSearchMatching" : { - "shape" : "__string", - "locationName" : "roleSearchMatching", - "documentation" : "

The LDAP search filter used to find roles within the roleBase. The distinguished name of the user matched by userSearchMatching is substituted into the {0} placeholder in the search filter. The client's username is substituted into the {1} placeholder. For example, if you set this option to (member=uid={1})for the user janedoe, the search filter becomes (member=uid=janedoe) after string substitution. It matches all role entries that have a member attribute equal to uid=janedoe under the subtree selected by the roleBase.

" - }, - "RoleSearchSubtree" : { - "shape" : "__boolean", - "locationName" : "roleSearchSubtree", - "documentation" : "

The directory search scope for the role. If set to true, scope is to search the entire subtree.

" - }, - "ServiceAccountUsername" : { - "shape" : "__string", - "locationName" : "serviceAccountUsername", - "documentation" : "

Service account username. A service account is an account in your LDAP server that has access to initiate a connection. For example, cn=admin,dc=corp, dc=example,\n dc=com.

" - }, - "UserBase" : { - "shape" : "__string", - "locationName" : "userBase", - "documentation" : "

Select a particular subtree of the directory information tree (DIT) to search for user entries. The subtree is specified by a DN, which specifies the base node of the subtree. For example, by setting this option to ou=Users,ou=corp, dc=corp,\n dc=example, dc=com, the search for user entries is restricted to the subtree beneath ou=Users, ou=corp, dc=corp, dc=example, dc=com.

" - }, - "UserRoleName" : { - "shape" : "__string", - "locationName" : "userRoleName", - "documentation" : "

Specifies the name of the LDAP attribute for the user group membership.

" - }, - "UserSearchMatching" : { - "shape" : "__string", - "locationName" : "userSearchMatching", - "documentation" : "

The LDAP search filter used to find users within the userBase. The client's username is substituted into the {0} placeholder in the search filter. For example, if this option is set to (uid={0}) and the received username is janedoe, the search filter becomes (uid=janedoe) after string substitution. It will result in matching an entry like uid=janedoe, ou=Users,ou=corp, dc=corp, dc=example,\n dc=com.

" - }, - "UserSearchSubtree" : { - "shape" : "__boolean", - "locationName" : "userSearchSubtree", - "documentation" : "

The directory search scope for the user. If set to true, scope is to search the entire subtree.

" - } - }, - "documentation" : "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker.

", - "required" : [ "Hosts", "UserSearchMatching", "UserBase", "RoleSearchMatching", "ServiceAccountUsername", "RoleBase" ] - }, - "ListBrokersOutput" : { - "type" : "structure", - "members" : { - "BrokerSummaries" : { - "shape" : "__listOfBrokerSummary", - "locationName" : "brokerSummaries", - "documentation" : "

A list of information about all brokers.

" - }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" - } - }, - "documentation" : "

A list of information about all brokers.

" - }, - "ListBrokersRequest" : { - "type" : "structure", - "members" : { - "MaxResults" : { - "shape" : "MaxResults", - "location" : "querystring", - "locationName" : "maxResults", - "documentation" : "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "location" : "querystring", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + "LdapServerMetadataInput": { + "type": "structure", + "members": { + "Hosts": { + "shape": "__listOf__string", + "locationName": "hosts", + "documentation": "

Specifies the location of the LDAP server such as Directory Service for Microsoft Active Directory. Optional failover server.

" + }, + "RoleBase": { + "shape": "__string", + "locationName": "roleBase", + "documentation": "

The distinguished name of the node in the directory information tree (DIT) to search for roles or groups. For example, ou=group, ou=corp, dc=corp,\n dc=example, dc=com.

" + }, + "RoleName": { + "shape": "__string", + "locationName": "roleName", + "documentation": "

Specifies the LDAP attribute that identifies the group name attribute in the object returned from the group membership query.

" + }, + "RoleSearchMatching": { + "shape": "__string", + "locationName": "roleSearchMatching", + "documentation": "

The LDAP search filter used to find roles within the roleBase. The distinguished name of the user matched by userSearchMatching is substituted into the {0} placeholder in the search filter. The client's username is substituted into the {1} placeholder. For example, if you set this option to (member=uid={1})for the user janedoe, the search filter becomes (member=uid=janedoe) after string substitution. It matches all role entries that have a member attribute equal to uid=janedoe under the subtree selected by the roleBase.

" + }, + "RoleSearchSubtree": { + "shape": "__boolean", + "locationName": "roleSearchSubtree", + "documentation": "

The directory search scope for the role. If set to true, scope is to search the entire subtree.

" + }, + "ServiceAccountPassword": { + "shape": "__string", + "locationName": "serviceAccountPassword", + "documentation": "

Service account password. A service account is an account in your LDAP server that has access to initiate a connection. For example, cn=admin,dc=corp, dc=example,\n dc=com.

" + }, + "ServiceAccountUsername": { + "shape": "__string", + "locationName": "serviceAccountUsername", + "documentation": "

Service account username. A service account is an account in your LDAP server that has access to initiate a connection. For example, cn=admin,dc=corp, dc=example,\n dc=com.

" + }, + "UserBase": { + "shape": "__string", + "locationName": "userBase", + "documentation": "

Select a particular subtree of the directory information tree (DIT) to search for user entries. The subtree is specified by a DN, which specifies the base node of the subtree. For example, by setting this option to ou=Users,ou=corp, dc=corp,\n dc=example, dc=com, the search for user entries is restricted to the subtree beneath ou=Users, ou=corp, dc=corp, dc=example, dc=com.

" + }, + "UserRoleName": { + "shape": "__string", + "locationName": "userRoleName", + "documentation": "

Specifies the name of the LDAP attribute for the user group membership.

" + }, + "UserSearchMatching": { + "shape": "__string", + "locationName": "userSearchMatching", + "documentation": "

The LDAP search filter used to find users within the userBase. The client's username is substituted into the {0} placeholder in the search filter. For example, if this option is set to (uid={0}) and the received username is janedoe, the search filter becomes (uid=janedoe) after string substitution. It will result in matching an entry like uid=janedoe, ou=Users,ou=corp, dc=corp, dc=example,\n dc=com.

" + }, + "UserSearchSubtree": { + "shape": "__boolean", + "locationName": "userSearchSubtree", + "documentation": "

The directory search scope for the user. If set to true, scope is to search the entire subtree.

" + } + }, + "documentation": "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker.

Does not apply to RabbitMQ brokers.

", + "required": [ + "Hosts", + "UserSearchMatching", + "UserBase", + "RoleSearchMatching", + "ServiceAccountUsername", + "RoleBase", + "ServiceAccountPassword" + ] + }, + "LdapServerMetadataOutput": { + "type": "structure", + "members": { + "Hosts": { + "shape": "__listOf__string", + "locationName": "hosts", + "documentation": "

Specifies the location of the LDAP server such as Directory Service for Microsoft Active Directory. Optional failover server.

" + }, + "RoleBase": { + "shape": "__string", + "locationName": "roleBase", + "documentation": "

The distinguished name of the node in the directory information tree (DIT) to search for roles or groups. For example, ou=group, ou=corp, dc=corp,\n dc=example, dc=com.

" + }, + "RoleName": { + "shape": "__string", + "locationName": "roleName", + "documentation": "

Specifies the LDAP attribute that identifies the group name attribute in the object returned from the group membership query.

" + }, + "RoleSearchMatching": { + "shape": "__string", + "locationName": "roleSearchMatching", + "documentation": "

The LDAP search filter used to find roles within the roleBase. The distinguished name of the user matched by userSearchMatching is substituted into the {0} placeholder in the search filter. The client's username is substituted into the {1} placeholder. For example, if you set this option to (member=uid={1})for the user janedoe, the search filter becomes (member=uid=janedoe) after string substitution. It matches all role entries that have a member attribute equal to uid=janedoe under the subtree selected by the roleBase.

" + }, + "RoleSearchSubtree": { + "shape": "__boolean", + "locationName": "roleSearchSubtree", + "documentation": "

The directory search scope for the role. If set to true, scope is to search the entire subtree.

" + }, + "ServiceAccountUsername": { + "shape": "__string", + "locationName": "serviceAccountUsername", + "documentation": "

Service account username. A service account is an account in your LDAP server that has access to initiate a connection. For example, cn=admin,dc=corp, dc=example,\n dc=com.

" + }, + "UserBase": { + "shape": "__string", + "locationName": "userBase", + "documentation": "

Select a particular subtree of the directory information tree (DIT) to search for user entries. The subtree is specified by a DN, which specifies the base node of the subtree. For example, by setting this option to ou=Users,ou=corp, dc=corp,\n dc=example, dc=com, the search for user entries is restricted to the subtree beneath ou=Users, ou=corp, dc=corp, dc=example, dc=com.

" + }, + "UserRoleName": { + "shape": "__string", + "locationName": "userRoleName", + "documentation": "

Specifies the name of the LDAP attribute for the user group membership.

" + }, + "UserSearchMatching": { + "shape": "__string", + "locationName": "userSearchMatching", + "documentation": "

The LDAP search filter used to find users within the userBase. The client's username is substituted into the {0} placeholder in the search filter. For example, if this option is set to (uid={0}) and the received username is janedoe, the search filter becomes (uid=janedoe) after string substitution. It will result in matching an entry like uid=janedoe, ou=Users,ou=corp, dc=corp, dc=example,\n dc=com.

" + }, + "UserSearchSubtree": { + "shape": "__boolean", + "locationName": "userSearchSubtree", + "documentation": "

The directory search scope for the user. If set to true, scope is to search the entire subtree.

" + } + }, + "documentation": "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker.

", + "required": [ + "Hosts", + "UserSearchMatching", + "UserBase", + "RoleSearchMatching", + "ServiceAccountUsername", + "RoleBase" + ] + }, + "ListBrokersOutput": { + "type": "structure", + "members": { + "BrokerSummaries": { + "shape": "__listOfBrokerSummary", + "locationName": "brokerSummaries", + "documentation": "

A list of information about all brokers.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + } + }, + "documentation": "

A list of information about all brokers.

" + }, + "ListBrokersRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" } } }, - "ListBrokersResponse" : { - "type" : "structure", - "members" : { - "BrokerSummaries" : { - "shape" : "__listOfBrokerSummary", - "locationName" : "brokerSummaries", - "documentation" : "

A list of information about all brokers.

" + "ListBrokersResponse": { + "type": "structure", + "members": { + "BrokerSummaries": { + "shape": "__listOfBrokerSummary", + "locationName": "brokerSummaries", + "documentation": "

A list of information about all brokers.

" }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" } } }, - "ListConfigurationRevisionsOutput" : { - "type" : "structure", - "members" : { - "ConfigurationId" : { - "shape" : "__string", - "locationName" : "configurationId", - "documentation" : "

The unique ID that Amazon MQ generates for the configuration.

" - }, - "MaxResults" : { - "shape" : "__integer", - "locationName" : "maxResults", - "documentation" : "

The maximum number of configuration revisions that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" - }, - "Revisions" : { - "shape" : "__listOfConfigurationRevision", - "locationName" : "revisions", - "documentation" : "

The list of all revisions for the specified configuration.

" - } - }, - "documentation" : "

Returns a list of all revisions for the specified configuration.

" - }, - "ListConfigurationRevisionsRequest" : { - "type" : "structure", - "members" : { - "ConfigurationId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "configuration-id", - "documentation" : "

The unique ID that Amazon MQ generates for the configuration.

" - }, - "MaxResults" : { - "shape" : "MaxResults", - "location" : "querystring", - "locationName" : "maxResults", - "documentation" : "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "location" : "querystring", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" - } - }, - "required" : [ "ConfigurationId" ] - }, - "ListConfigurationRevisionsResponse" : { - "type" : "structure", - "members" : { - "ConfigurationId" : { - "shape" : "__string", - "locationName" : "configurationId", - "documentation" : "

The unique ID that Amazon MQ generates for the configuration.

" - }, - "MaxResults" : { - "shape" : "__integer", - "locationName" : "maxResults", - "documentation" : "

The maximum number of configuration revisions that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" - }, - "Revisions" : { - "shape" : "__listOfConfigurationRevision", - "locationName" : "revisions", - "documentation" : "

The list of all revisions for the specified configuration.

" + "ListConfigurationRevisionsOutput": { + "type": "structure", + "members": { + "ConfigurationId": { + "shape": "__string", + "locationName": "configurationId", + "documentation": "

The unique ID that Amazon MQ generates for the configuration.

" + }, + "MaxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

The maximum number of configuration revisions that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + }, + "Revisions": { + "shape": "__listOfConfigurationRevision", + "locationName": "revisions", + "documentation": "

The list of all revisions for the specified configuration.

" + } + }, + "documentation": "

Returns a list of all revisions for the specified configuration.

" + }, + "ListConfigurationRevisionsRequest": { + "type": "structure", + "members": { + "ConfigurationId": { + "shape": "__string", + "location": "uri", + "locationName": "configuration-id", + "documentation": "

The unique ID that Amazon MQ generates for the configuration.

" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + } + }, + "required": [ + "ConfigurationId" + ] + }, + "ListConfigurationRevisionsResponse": { + "type": "structure", + "members": { + "ConfigurationId": { + "shape": "__string", + "locationName": "configurationId", + "documentation": "

The unique ID that Amazon MQ generates for the configuration.

" + }, + "MaxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

The maximum number of configuration revisions that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + }, + "Revisions": { + "shape": "__listOfConfigurationRevision", + "locationName": "revisions", + "documentation": "

The list of all revisions for the specified configuration.

" } } }, - "ListConfigurationsOutput" : { - "type" : "structure", - "members" : { - "Configurations" : { - "shape" : "__listOfConfiguration", - "locationName" : "configurations", - "documentation" : "

The list of all revisions for the specified configuration.

" - }, - "MaxResults" : { - "shape" : "__integer", - "locationName" : "maxResults", - "documentation" : "

The maximum number of configurations that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" - } - }, - "documentation" : "

Returns a list of all configurations.

" - }, - "ListConfigurationsRequest" : { - "type" : "structure", - "members" : { - "MaxResults" : { - "shape" : "MaxResults", - "location" : "querystring", - "locationName" : "maxResults", - "documentation" : "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "location" : "querystring", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + "ListConfigurationsOutput": { + "type": "structure", + "members": { + "Configurations": { + "shape": "__listOfConfiguration", + "locationName": "configurations", + "documentation": "

The list of all revisions for the specified configuration.

" + }, + "MaxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

The maximum number of configurations that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + } + }, + "documentation": "

Returns a list of all configurations.

" + }, + "ListConfigurationsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" } } }, - "ListConfigurationsResponse" : { - "type" : "structure", - "members" : { - "Configurations" : { - "shape" : "__listOfConfiguration", - "locationName" : "configurations", - "documentation" : "

The list of all revisions for the specified configuration.

" + "ListConfigurationsResponse": { + "type": "structure", + "members": { + "Configurations": { + "shape": "__listOfConfiguration", + "locationName": "configurations", + "documentation": "

The list of all revisions for the specified configuration.

" }, - "MaxResults" : { - "shape" : "__integer", - "locationName" : "maxResults", - "documentation" : "

The maximum number of configurations that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" + "MaxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

The maximum number of configurations that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" } } }, - "ListTagsRequest" : { - "type" : "structure", - "members" : { - "ResourceArn" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "resource-arn", - "documentation" : "

The Amazon Resource Name (ARN) of the resource tag.

" + "ListTagsRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "

The Amazon Resource Name (ARN) of the resource tag.

" } }, - "required" : [ "ResourceArn" ] + "required": [ + "ResourceArn" + ] }, - "ListTagsResponse" : { - "type" : "structure", - "members" : { - "Tags" : { - "shape" : "__mapOf__string", - "locationName" : "tags", - "documentation" : "

The key-value pair for the resource tag.

" + "ListTagsResponse": { + "type": "structure", + "members": { + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

The key-value pair for the resource tag.

" } } }, - "ListUsersOutput" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the broker.

" - }, - "MaxResults" : { - "shape" : "__integerMin5Max100", - "locationName" : "maxResults", - "documentation" : "

Required. The maximum number of ActiveMQ users that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" - }, - "Users" : { - "shape" : "__listOfUserSummary", - "locationName" : "users", - "documentation" : "

Required. The list of all ActiveMQ usernames for the specified broker. Does not apply to RabbitMQ brokers.

" - } - }, - "documentation" : "

Returns a list of all ActiveMQ users.

", - "required" : [ "BrokerId", "MaxResults", "Users" ] - }, - "ListUsersRequest" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "broker-id", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - }, - "MaxResults" : { - "shape" : "MaxResults", - "location" : "querystring", - "locationName" : "maxResults", - "documentation" : "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "location" : "querystring", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" - } - }, - "required" : [ "BrokerId" ] - }, - "ListUsersResponse" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the broker.

" - }, - "MaxResults" : { - "shape" : "__integerMin5Max100", - "locationName" : "maxResults", - "documentation" : "

Required. The maximum number of ActiveMQ users that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" - }, - "NextToken" : { - "shape" : "__string", - "locationName" : "nextToken", - "documentation" : "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" - }, - "Users" : { - "shape" : "__listOfUserSummary", - "locationName" : "users", - "documentation" : "

Required. The list of all ActiveMQ usernames for the specified broker. Does not apply to RabbitMQ brokers.

" + "ListUsersOutput": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

Required. The unique ID that Amazon MQ generates for the broker.

" + }, + "MaxResults": { + "shape": "__integerMin5Max100", + "locationName": "maxResults", + "documentation": "

Required. The maximum number of ActiveMQ users that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + }, + "Users": { + "shape": "__listOfUserSummary", + "locationName": "users", + "documentation": "

Required. The list of all ActiveMQ usernames for the specified broker. Does not apply to RabbitMQ brokers.

" + } + }, + "documentation": "

Returns a list of all ActiveMQ users.

", + "required": [ + "BrokerId", + "MaxResults", + "Users" + ] + }, + "ListUsersRequest": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "location": "uri", + "locationName": "broker-id", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of brokers that Amazon MQ can return per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + } + }, + "required": [ + "BrokerId" + ] + }, + "ListUsersResponse": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

Required. The unique ID that Amazon MQ generates for the broker.

" + }, + "MaxResults": { + "shape": "__integerMin5Max100", + "locationName": "maxResults", + "documentation": "

Required. The maximum number of ActiveMQ users that can be returned per page (20 by default). This value must be an integer from 5 to 100.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

" + }, + "Users": { + "shape": "__listOfUserSummary", + "locationName": "users", + "documentation": "

Required. The list of all ActiveMQ usernames for the specified broker. Does not apply to RabbitMQ brokers.

" } } }, - "Logs" : { - "type" : "structure", - "members" : { - "Audit" : { - "shape" : "__boolean", - "locationName" : "audit", - "documentation" : "

Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged. Does not apply to RabbitMQ brokers.

" - }, - "General" : { - "shape" : "__boolean", - "locationName" : "general", - "documentation" : "

Enables general logging.

" - } - }, - "documentation" : "

The list of information about logs to be enabled for the specified broker.

" - }, - "LogsSummary" : { - "type" : "structure", - "members" : { - "Audit" : { - "shape" : "__boolean", - "locationName" : "audit", - "documentation" : "

Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged.

" - }, - "AuditLogGroup" : { - "shape" : "__string", - "locationName" : "auditLogGroup", - "documentation" : "

The location of the CloudWatch Logs log group where audit logs are sent.

" - }, - "General" : { - "shape" : "__boolean", - "locationName" : "general", - "documentation" : "

Enables general logging.

" - }, - "GeneralLogGroup" : { - "shape" : "__string", - "locationName" : "generalLogGroup", - "documentation" : "

The location of the CloudWatch Logs log group where general logs are sent.

" - }, - "Pending" : { - "shape" : "PendingLogs", - "locationName" : "pending", - "documentation" : "

The list of information about logs pending to be deployed for the specified broker.

" - } - }, - "documentation" : "

The list of information about logs currently enabled and pending to be deployed for the specified broker.

", - "required" : [ "GeneralLogGroup", "General" ] - }, - "MaxResults" : { - "type" : "integer", - "min" : 1, - "max" : 100 - }, - "NotFoundException" : { - "type" : "structure", - "members" : { - "ErrorAttribute" : { - "shape" : "__string", - "locationName" : "errorAttribute", - "documentation" : "

The attribute which caused the error.

" - }, - "Message" : { - "shape" : "__string", - "locationName" : "message", - "documentation" : "

The explanation of the error.

" - } - }, - "documentation" : "

Returns information about an error.

", - "exception" : true, - "error" : { - "httpStatusCode" : 404 + "Logs": { + "type": "structure", + "members": { + "Audit": { + "shape": "__boolean", + "locationName": "audit", + "documentation": "

Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged. Does not apply to RabbitMQ brokers.

" + }, + "General": { + "shape": "__boolean", + "locationName": "general", + "documentation": "

Enables general logging.

" + } + }, + "documentation": "

The list of information about logs to be enabled for the specified broker.

" + }, + "LogsSummary": { + "type": "structure", + "members": { + "Audit": { + "shape": "__boolean", + "locationName": "audit", + "documentation": "

Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged.

" + }, + "AuditLogGroup": { + "shape": "__string", + "locationName": "auditLogGroup", + "documentation": "

The location of the CloudWatch Logs log group where audit logs are sent.

" + }, + "General": { + "shape": "__boolean", + "locationName": "general", + "documentation": "

Enables general logging.

" + }, + "GeneralLogGroup": { + "shape": "__string", + "locationName": "generalLogGroup", + "documentation": "

The location of the CloudWatch Logs log group where general logs are sent.

" + }, + "Pending": { + "shape": "PendingLogs", + "locationName": "pending", + "documentation": "

The list of information about logs pending to be deployed for the specified broker.

" + } + }, + "documentation": "

The list of information about logs currently enabled and pending to be deployed for the specified broker.

", + "required": [ + "GeneralLogGroup", + "General" + ] + }, + "MaxResults": { + "type": "integer", + "min": 1, + "max": 100 + }, + "NotFoundException": { + "type": "structure", + "members": { + "ErrorAttribute": { + "shape": "__string", + "locationName": "errorAttribute", + "documentation": "

The attribute which caused the error.

" + }, + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error.

" + } + }, + "documentation": "

Returns information about an error.

", + "exception": true, + "error": { + "httpStatusCode": 404 } }, - "PendingLogs" : { - "type" : "structure", - "members" : { - "Audit" : { - "shape" : "__boolean", - "locationName" : "audit", - "documentation" : "

Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged.

" - }, - "General" : { - "shape" : "__boolean", - "locationName" : "general", - "documentation" : "

Enables general logging.

" + "PendingLogs": { + "type": "structure", + "members": { + "Audit": { + "shape": "__boolean", + "locationName": "audit", + "documentation": "

Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged.

" + }, + "General": { + "shape": "__boolean", + "locationName": "general", + "documentation": "

Enables general logging.

" + } + }, + "documentation": "

The list of information about logs to be enabled for the specified broker.

" + }, + "PromoteInput": { + "type": "structure", + "members": { + "Mode": { + "shape": "PromoteMode", + "locationName": "mode", + "documentation": "

The Promote mode requested. Note: Valid values for the parameter are SWITCHOVER, FAILOVER.

" + } + }, + "documentation": "

Creates a Promote request with the properties specified.

", + "required": [ + "Mode" + ] + }, + "PromoteMode": { + "type": "string", + "documentation": "

The Promote mode requested.

", + "enum": [ + "SWITCHOVER", + "FAILOVER" + ] + }, + "PromoteOutput": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + } + }, + "documentation": "

Returns information about the updated broker.

" + }, + "PromoteRequest": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "location": "uri", + "locationName": "broker-id", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + }, + "Mode": { + "shape": "PromoteMode", + "locationName": "mode", + "documentation": "

The Promote mode requested. Note: Valid values for the parameter are SWITCHOVER, FAILOVER.

" + } + }, + "documentation": "

Promotes a data replication replica broker to the primary broker role.

", + "required": [ + "BrokerId", + "Mode" + ] + }, + "PromoteResponse": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" } - }, - "documentation" : "

The list of information about logs to be enabled for the specified broker.

" - }, - "PromoteInput" : { - "type" : "structure", - "members" : { - "Mode" : { - "shape" : "PromoteMode", - "locationName" : "mode", - "documentation" : "

The Promote mode requested. Note: Valid values for the parameter are SWITCHOVER, FAILOVER.

" - } - }, - "documentation" : "

Creates a Promote request with the properties specified.

", - "required" : [ "Mode" ] - }, - "PromoteMode" : { - "type" : "string", - "documentation" : "

The Promote mode requested.

", - "enum" : [ "SWITCHOVER", "FAILOVER" ] + } }, - "PromoteOutput" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" + "RebootBrokerRequest": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "location": "uri", + "locationName": "broker-id", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" } }, - "documentation" : "

Returns information about the updated broker.

" + "required": [ + "BrokerId" + ] }, - "PromoteRequest" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "broker-id", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - }, - "Mode" : { - "shape" : "PromoteMode", - "locationName" : "mode", - "documentation" : "

The Promote mode requested. Note: Valid values for the parameter are SWITCHOVER, FAILOVER.

" - } - }, - "documentation" : "

Promotes a data replication replica broker to the primary broker role.

", - "required" : [ "BrokerId", "Mode" ] - }, - "PromoteResponse" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - } + "RebootBrokerResponse": { + "type": "structure", + "members": { } }, - "RebootBrokerRequest" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "broker-id", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - } - }, - "required" : [ "BrokerId" ] - }, - "RebootBrokerResponse" : { - "type" : "structure", - "members" : { } - }, - "SanitizationWarning" : { - "type" : "structure", - "members" : { - "AttributeName" : { - "shape" : "__string", - "locationName" : "attributeName", - "documentation" : "

The name of the configuration attribute that has been sanitized.

" - }, - "ElementName" : { - "shape" : "__string", - "locationName" : "elementName", - "documentation" : "

The name of the configuration element that has been sanitized.

" - }, - "Reason" : { - "shape" : "SanitizationWarningReason", - "locationName" : "reason", - "documentation" : "

The reason for which the configuration elements or attributes were sanitized.

" - } - }, - "documentation" : "

Returns information about the configuration element or attribute that was sanitized in the configuration.

", - "required" : [ "Reason" ] - }, - "SanitizationWarningReason" : { - "type" : "string", - "documentation" : "

The reason for which the configuration elements or attributes were sanitized.

", - "enum" : [ "DISALLOWED_ELEMENT_REMOVED", "DISALLOWED_ATTRIBUTE_REMOVED", "INVALID_ATTRIBUTE_VALUE_REMOVED" ] - }, - "Tags" : { - "type" : "structure", - "members" : { - "Tags" : { - "shape" : "__mapOf__string", - "locationName" : "tags", - "documentation" : "

The key-value pair for the resource tag.

" - } - }, - "documentation" : "

A map of the key-value pairs for the resource tag.

" - }, - "UnauthorizedException" : { - "type" : "structure", - "members" : { - "ErrorAttribute" : { - "shape" : "__string", - "locationName" : "errorAttribute", - "documentation" : "

The attribute which caused the error.

" - }, - "Message" : { - "shape" : "__string", - "locationName" : "message", - "documentation" : "

The explanation of the error.

" - } - }, - "documentation" : "

Returns information about an error.

", - "exception" : true, - "error" : { - "httpStatusCode" : 401 + "SanitizationWarning": { + "type": "structure", + "members": { + "AttributeName": { + "shape": "__string", + "locationName": "attributeName", + "documentation": "

The name of the configuration attribute that has been sanitized.

" + }, + "ElementName": { + "shape": "__string", + "locationName": "elementName", + "documentation": "

The name of the configuration element that has been sanitized.

" + }, + "Reason": { + "shape": "SanitizationWarningReason", + "locationName": "reason", + "documentation": "

The reason for which the configuration elements or attributes were sanitized.

" + } + }, + "documentation": "

Returns information about the configuration element or attribute that was sanitized in the configuration.

", + "required": [ + "Reason" + ] + }, + "SanitizationWarningReason": { + "type": "string", + "documentation": "

The reason for which the configuration elements or attributes were sanitized.

", + "enum": [ + "DISALLOWED_ELEMENT_REMOVED", + "DISALLOWED_ATTRIBUTE_REMOVED", + "INVALID_ATTRIBUTE_VALUE_REMOVED" + ] + }, + "Tags": { + "type": "structure", + "members": { + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

The key-value pair for the resource tag.

" + } + }, + "documentation": "

A map of the key-value pairs for the resource tag.

" + }, + "UnauthorizedException": { + "type": "structure", + "members": { + "ErrorAttribute": { + "shape": "__string", + "locationName": "errorAttribute", + "documentation": "

The attribute which caused the error.

" + }, + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error.

" + } + }, + "documentation": "

Returns information about an error.

", + "exception": true, + "error": { + "httpStatusCode": 401 } }, - "UpdateBrokerInput" : { - "type" : "structure", - "members" : { - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" - }, - "AutoMinorVersionUpgrade" : { - "shape" : "__boolean", - "locationName" : "autoMinorVersionUpgrade", - "documentation" : "

Enables automatic upgrades to new minor versions for brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot.

" - }, - "Configuration" : { - "shape" : "ConfigurationId", - "locationName" : "configuration", - "documentation" : "

A list of information about the configuration.

" - }, - "DataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "dataReplicationMode", - "documentation" : "

Defines whether this broker is a part of a data replication pair.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

The broker engine version. For a list of supported engine versions, see Supported engines.

" - }, - "HostInstanceType" : { - "shape" : "__string", - "locationName" : "hostInstanceType", - "documentation" : "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" - }, - "LdapServerMetadata" : { - "shape" : "LdapServerMetadataInput", - "locationName" : "ldapServerMetadata", - "documentation" : "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" - }, - "Logs" : { - "shape" : "Logs", - "locationName" : "logs", - "documentation" : "

Enables Amazon CloudWatch logging for brokers.

" - }, - "MaintenanceWindowStartTime" : { - "shape" : "WeeklyStartTime", - "locationName" : "maintenanceWindowStartTime", - "documentation" : "

The parameters that determine the WeeklyStartTime.

" - }, - "SecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "securityGroups", - "documentation" : "

The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.

" - } - }, - "documentation" : "

Updates the broker using the specified properties.

" - }, - "UpdateBrokerOutput" : { - "type" : "structure", - "members" : { - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" - }, - "AutoMinorVersionUpgrade" : { - "shape" : "__boolean", - "locationName" : "autoMinorVersionUpgrade", - "documentation" : "

The new boolean value that specifies whether broker engines automatically upgrade to new minor versions as new versions are released and supported by Amazon MQ.

" - }, - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the broker.

" - }, - "Configuration" : { - "shape" : "ConfigurationId", - "locationName" : "configuration", - "documentation" : "

The ID of the updated configuration.

" - }, - "DataReplicationMetadata" : { - "shape" : "DataReplicationMetadataOutput", - "locationName" : "dataReplicationMetadata", - "documentation" : "

The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

" - }, - "DataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "dataReplicationMode", - "documentation" : "

Describes whether this broker is a part of a data replication pair.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

The broker engine version to upgrade to. For a list of supported engine versions, see Supported engines.

" - }, - "HostInstanceType" : { - "shape" : "__string", - "locationName" : "hostInstanceType", - "documentation" : "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" - }, - "LdapServerMetadata" : { - "shape" : "LdapServerMetadataOutput", - "locationName" : "ldapServerMetadata", - "documentation" : "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" - }, - "Logs" : { - "shape" : "Logs", - "locationName" : "logs", - "documentation" : "

The list of information about logs to be enabled for the specified broker.

" - }, - "MaintenanceWindowStartTime" : { - "shape" : "WeeklyStartTime", - "locationName" : "maintenanceWindowStartTime", - "documentation" : "

The parameters that determine the WeeklyStartTime.

" - }, - "PendingDataReplicationMetadata" : { - "shape" : "DataReplicationMetadataOutput", - "locationName" : "pendingDataReplicationMetadata", - "documentation" : "

The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

" - }, - "PendingDataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "pendingDataReplicationMode", - "documentation" : "

Describes whether this broker will be a part of a data replication pair after reboot.

" - }, - "SecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "securityGroups", - "documentation" : "

The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.

" - } - }, - "documentation" : "

Returns information about the updated broker.

", - "required" : [ "BrokerId" ] - }, - "UpdateBrokerRequest" : { - "type" : "structure", - "members" : { - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" - }, - "AutoMinorVersionUpgrade" : { - "shape" : "__boolean", - "locationName" : "autoMinorVersionUpgrade", - "documentation" : "

Enables automatic upgrades to new minor versions for brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot.

" - }, - "BrokerId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "broker-id", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - }, - "Configuration" : { - "shape" : "ConfigurationId", - "locationName" : "configuration", - "documentation" : "

A list of information about the configuration.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

The broker engine version. For a list of supported engine versions, see Supported engines.

" - }, - "HostInstanceType" : { - "shape" : "__string", - "locationName" : "hostInstanceType", - "documentation" : "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" - }, - "LdapServerMetadata" : { - "shape" : "LdapServerMetadataInput", - "locationName" : "ldapServerMetadata", - "documentation" : "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" - }, - "Logs" : { - "shape" : "Logs", - "locationName" : "logs", - "documentation" : "

Enables Amazon CloudWatch logging for brokers.

" - }, - "MaintenanceWindowStartTime" : { - "shape" : "WeeklyStartTime", - "locationName" : "maintenanceWindowStartTime", - "documentation" : "

The parameters that determine the WeeklyStartTime.

" - }, - "SecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "securityGroups", - "documentation" : "

The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.

" - }, - "DataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "dataReplicationMode", - "documentation" : "

Defines whether this broker is a part of a data replication pair.

" - } - }, - "documentation" : "

Updates the broker using the specified properties.

", - "required" : [ "BrokerId" ] - }, - "UpdateBrokerResponse" : { - "type" : "structure", - "members" : { - "AuthenticationStrategy" : { - "shape" : "AuthenticationStrategy", - "locationName" : "authenticationStrategy", - "documentation" : "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" - }, - "AutoMinorVersionUpgrade" : { - "shape" : "__boolean", - "locationName" : "autoMinorVersionUpgrade", - "documentation" : "

The new boolean value that specifies whether broker engines automatically upgrade to new minor versions as new versions are released and supported by Amazon MQ.

" - }, - "BrokerId" : { - "shape" : "__string", - "locationName" : "brokerId", - "documentation" : "

Required. The unique ID that Amazon MQ generates for the broker.

" - }, - "Configuration" : { - "shape" : "ConfigurationId", - "locationName" : "configuration", - "documentation" : "

The ID of the updated configuration.

" - }, - "EngineVersion" : { - "shape" : "__string", - "locationName" : "engineVersion", - "documentation" : "

The broker engine version to upgrade to. For a list of supported engine versions, see Supported engines.

" - }, - "HostInstanceType" : { - "shape" : "__string", - "locationName" : "hostInstanceType", - "documentation" : "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" - }, - "LdapServerMetadata" : { - "shape" : "LdapServerMetadataOutput", - "locationName" : "ldapServerMetadata", - "documentation" : "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" - }, - "Logs" : { - "shape" : "Logs", - "locationName" : "logs", - "documentation" : "

The list of information about logs to be enabled for the specified broker.

" - }, - "MaintenanceWindowStartTime" : { - "shape" : "WeeklyStartTime", - "locationName" : "maintenanceWindowStartTime", - "documentation" : "

The parameters that determine the WeeklyStartTime.

" - }, - "SecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "securityGroups", - "documentation" : "

The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.

" - }, - "DataReplicationMetadata" : { - "shape" : "DataReplicationMetadataOutput", - "locationName" : "dataReplicationMetadata", - "documentation" : "

The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

" - }, - "DataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "dataReplicationMode", - "documentation" : "

Describes whether this broker is a part of a data replication pair.

" - }, - "PendingDataReplicationMetadata" : { - "shape" : "DataReplicationMetadataOutput", - "locationName" : "pendingDataReplicationMetadata", - "documentation" : "

The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

" - }, - "PendingDataReplicationMode" : { - "shape" : "DataReplicationMode", - "locationName" : "pendingDataReplicationMode", - "documentation" : "

Describes whether this broker will be a part of a data replication pair after reboot.

" + "UpdateBrokerInput": { + "type": "structure", + "members": { + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" + }, + "AutoMinorVersionUpgrade": { + "shape": "__boolean", + "locationName": "autoMinorVersionUpgrade", + "documentation": "

Enables automatic upgrades to new patch versions for brokers as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window or after a manual broker reboot.

Must be set to true for ActiveMQ brokers version 5.18 and above and for RabbitMQ brokers version 3.13 and above.

" + }, + "Configuration": { + "shape": "ConfigurationId", + "locationName": "configuration", + "documentation": "

A list of information about the configuration.

" + }, + "DataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "dataReplicationMode", + "documentation": "

Defines whether this broker is a part of a data replication pair.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

When upgrading to ActiveMQ version 5.18 and above or RabbitMQ version 3.13 and above, you must have autoMinorVersionUpgrade set to true for the broker.

" + }, + "HostInstanceType": { + "shape": "__string", + "locationName": "hostInstanceType", + "documentation": "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" + }, + "LdapServerMetadata": { + "shape": "LdapServerMetadataInput", + "locationName": "ldapServerMetadata", + "documentation": "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" + }, + "Logs": { + "shape": "Logs", + "locationName": "logs", + "documentation": "

Enables Amazon CloudWatch logging for brokers.

" + }, + "MaintenanceWindowStartTime": { + "shape": "WeeklyStartTime", + "locationName": "maintenanceWindowStartTime", + "documentation": "

The parameters that determine the WeeklyStartTime.

" + }, + "SecurityGroups": { + "shape": "__listOf__string", + "locationName": "securityGroups", + "documentation": "

The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.

" + } + }, + "documentation": "

Updates the broker using the specified properties.

" + }, + "UpdateBrokerOutput": { + "type": "structure", + "members": { + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" + }, + "AutoMinorVersionUpgrade": { + "shape": "__boolean", + "locationName": "autoMinorVersionUpgrade", + "documentation": "

Enables automatic upgrades to new patch versions for brokers as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window or after a manual broker reboot.

" + }, + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

Required. The unique ID that Amazon MQ generates for the broker.

" + }, + "Configuration": { + "shape": "ConfigurationId", + "locationName": "configuration", + "documentation": "

The ID of the updated configuration.

" + }, + "DataReplicationMetadata": { + "shape": "DataReplicationMetadataOutput", + "locationName": "dataReplicationMetadata", + "documentation": "

The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

" + }, + "DataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "dataReplicationMode", + "documentation": "

Describes whether this broker is a part of a data replication pair.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version to upgrade to. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "HostInstanceType": { + "shape": "__string", + "locationName": "hostInstanceType", + "documentation": "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" + }, + "LdapServerMetadata": { + "shape": "LdapServerMetadataOutput", + "locationName": "ldapServerMetadata", + "documentation": "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" + }, + "Logs": { + "shape": "Logs", + "locationName": "logs", + "documentation": "

The list of information about logs to be enabled for the specified broker.

" + }, + "MaintenanceWindowStartTime": { + "shape": "WeeklyStartTime", + "locationName": "maintenanceWindowStartTime", + "documentation": "

The parameters that determine the WeeklyStartTime.

" + }, + "PendingDataReplicationMetadata": { + "shape": "DataReplicationMetadataOutput", + "locationName": "pendingDataReplicationMetadata", + "documentation": "

The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

" + }, + "PendingDataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "pendingDataReplicationMode", + "documentation": "

Describes whether this broker will be a part of a data replication pair after reboot.

" + }, + "SecurityGroups": { + "shape": "__listOf__string", + "locationName": "securityGroups", + "documentation": "

The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.

" + } + }, + "documentation": "

Returns information about the updated broker.

", + "required": [ + "BrokerId" + ] + }, + "UpdateBrokerRequest": { + "type": "structure", + "members": { + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" + }, + "AutoMinorVersionUpgrade": { + "shape": "__boolean", + "locationName": "autoMinorVersionUpgrade", + "documentation": "

Enables automatic upgrades to new patch versions for brokers as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window or after a manual broker reboot.

Must be set to true for ActiveMQ brokers version 5.18 and above and for RabbitMQ brokers version 3.13 and above.

" + }, + "BrokerId": { + "shape": "__string", + "location": "uri", + "locationName": "broker-id", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + }, + "Configuration": { + "shape": "ConfigurationId", + "locationName": "configuration", + "documentation": "

A list of information about the configuration.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

When upgrading to ActiveMQ version 5.18 and above or RabbitMQ version 3.13 and above, you must have autoMinorVersionUpgrade set to true for the broker.

" + }, + "HostInstanceType": { + "shape": "__string", + "locationName": "hostInstanceType", + "documentation": "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" + }, + "LdapServerMetadata": { + "shape": "LdapServerMetadataInput", + "locationName": "ldapServerMetadata", + "documentation": "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" + }, + "Logs": { + "shape": "Logs", + "locationName": "logs", + "documentation": "

Enables Amazon CloudWatch logging for brokers.

" + }, + "MaintenanceWindowStartTime": { + "shape": "WeeklyStartTime", + "locationName": "maintenanceWindowStartTime", + "documentation": "

The parameters that determine the WeeklyStartTime.

" + }, + "SecurityGroups": { + "shape": "__listOf__string", + "locationName": "securityGroups", + "documentation": "

The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.

" + }, + "DataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "dataReplicationMode", + "documentation": "

Defines whether this broker is a part of a data replication pair.

" + } + }, + "documentation": "

Updates the broker using the specified properties.

", + "required": [ + "BrokerId" + ] + }, + "UpdateBrokerResponse": { + "type": "structure", + "members": { + "AuthenticationStrategy": { + "shape": "AuthenticationStrategy", + "locationName": "authenticationStrategy", + "documentation": "

Optional. The authentication strategy used to secure the broker. The default is SIMPLE.

" + }, + "AutoMinorVersionUpgrade": { + "shape": "__boolean", + "locationName": "autoMinorVersionUpgrade", + "documentation": "

Enables automatic upgrades to new patch versions for brokers as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window or after a manual broker reboot.

" + }, + "BrokerId": { + "shape": "__string", + "locationName": "brokerId", + "documentation": "

Required. The unique ID that Amazon MQ generates for the broker.

" + }, + "Configuration": { + "shape": "ConfigurationId", + "locationName": "configuration", + "documentation": "

The ID of the updated configuration.

" + }, + "EngineVersion": { + "shape": "__string", + "locationName": "engineVersion", + "documentation": "

The broker engine version to upgrade to. For more information, see the ActiveMQ version management and the RabbitMQ version management sections in the Amazon MQ Developer Guide.

" + }, + "HostInstanceType": { + "shape": "__string", + "locationName": "hostInstanceType", + "documentation": "

The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types.

" + }, + "LdapServerMetadata": { + "shape": "LdapServerMetadataOutput", + "locationName": "ldapServerMetadata", + "documentation": "

Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers.

" + }, + "Logs": { + "shape": "Logs", + "locationName": "logs", + "documentation": "

The list of information about logs to be enabled for the specified broker.

" + }, + "MaintenanceWindowStartTime": { + "shape": "WeeklyStartTime", + "locationName": "maintenanceWindowStartTime", + "documentation": "

The parameters that determine the WeeklyStartTime.

" + }, + "SecurityGroups": { + "shape": "__listOf__string", + "locationName": "securityGroups", + "documentation": "

The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.

" + }, + "DataReplicationMetadata": { + "shape": "DataReplicationMetadataOutput", + "locationName": "dataReplicationMetadata", + "documentation": "

The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

" + }, + "DataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "dataReplicationMode", + "documentation": "

Describes whether this broker is a part of a data replication pair.

" + }, + "PendingDataReplicationMetadata": { + "shape": "DataReplicationMetadataOutput", + "locationName": "pendingDataReplicationMetadata", + "documentation": "

The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

" + }, + "PendingDataReplicationMode": { + "shape": "DataReplicationMode", + "locationName": "pendingDataReplicationMode", + "documentation": "

Describes whether this broker will be a part of a data replication pair after reboot.

" } } }, - "UpdateConfigurationInput" : { - "type" : "structure", - "members" : { - "Data" : { - "shape" : "__string", - "locationName" : "data", - "documentation" : "

Amazon MQ for Active MQ: The base64-encoded XML configuration. Amazon MQ for RabbitMQ: the base64-encoded Cuttlefish configuration.

" - }, - "Description" : { - "shape" : "__string", - "locationName" : "description", - "documentation" : "

The description of the configuration.

" - } - }, - "documentation" : "

Updates the specified configuration.

", - "required" : [ "Data" ] - }, - "UpdateConfigurationOutput" : { - "type" : "structure", - "members" : { - "Arn" : { - "shape" : "__string", - "locationName" : "arn", - "documentation" : "

The Amazon Resource Name (ARN) of the configuration.

" - }, - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

Required. The date and time of the configuration.

" - }, - "Id" : { - "shape" : "__string", - "locationName" : "id", - "documentation" : "

The unique ID that Amazon MQ generates for the configuration.

" - }, - "LatestRevision" : { - "shape" : "ConfigurationRevision", - "locationName" : "latestRevision", - "documentation" : "

The latest revision of the configuration.

" - }, - "Name" : { - "shape" : "__string", - "locationName" : "name", - "documentation" : "

The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" - }, - "Warnings" : { - "shape" : "__listOfSanitizationWarning", - "locationName" : "warnings", - "documentation" : "

The list of the first 20 warnings about the configuration elements or attributes that were sanitized.

" - } - }, - "documentation" : "

Returns information about the updated configuration.

", - "required" : [ "Id", "Arn", "Name", "Created" ] - }, - "UpdateConfigurationRequest" : { - "type" : "structure", - "members" : { - "ConfigurationId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "configuration-id", - "documentation" : "

The unique ID that Amazon MQ generates for the configuration.

" - }, - "Data" : { - "shape" : "__string", - "locationName" : "data", - "documentation" : "

Amazon MQ for Active MQ: The base64-encoded XML configuration. Amazon MQ for RabbitMQ: the base64-encoded Cuttlefish configuration.

" - }, - "Description" : { - "shape" : "__string", - "locationName" : "description", - "documentation" : "

The description of the configuration.

" - } - }, - "documentation" : "

Updates the specified configuration.

", - "required" : [ "ConfigurationId", "Data" ] - }, - "UpdateConfigurationResponse" : { - "type" : "structure", - "members" : { - "Arn" : { - "shape" : "__string", - "locationName" : "arn", - "documentation" : "

The Amazon Resource Name (ARN) of the configuration.

" - }, - "Created" : { - "shape" : "__timestampIso8601", - "locationName" : "created", - "documentation" : "

Required. The date and time of the configuration.

" - }, - "Id" : { - "shape" : "__string", - "locationName" : "id", - "documentation" : "

The unique ID that Amazon MQ generates for the configuration.

" - }, - "LatestRevision" : { - "shape" : "ConfigurationRevision", - "locationName" : "latestRevision", - "documentation" : "

The latest revision of the configuration.

" - }, - "Name" : { - "shape" : "__string", - "locationName" : "name", - "documentation" : "

The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" - }, - "Warnings" : { - "shape" : "__listOfSanitizationWarning", - "locationName" : "warnings", - "documentation" : "

The list of the first 20 warnings about the configuration elements or attributes that were sanitized.

" + "UpdateConfigurationInput": { + "type": "structure", + "members": { + "Data": { + "shape": "__string", + "locationName": "data", + "documentation": "

Amazon MQ for Active MQ: The base64-encoded XML configuration. Amazon MQ for RabbitMQ: the base64-encoded Cuttlefish configuration.

" + }, + "Description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The description of the configuration.

" + } + }, + "documentation": "

Updates the specified configuration.

", + "required": [ + "Data" + ] + }, + "UpdateConfigurationOutput": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the configuration.

" + }, + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

Required. The date and time of the configuration.

" + }, + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "

The unique ID that Amazon MQ generates for the configuration.

" + }, + "LatestRevision": { + "shape": "ConfigurationRevision", + "locationName": "latestRevision", + "documentation": "

The latest revision of the configuration.

" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "

The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" + }, + "Warnings": { + "shape": "__listOfSanitizationWarning", + "locationName": "warnings", + "documentation": "

The list of the first 20 warnings about the configuration elements or attributes that were sanitized.

" + } + }, + "documentation": "

Returns information about the updated configuration.

", + "required": [ + "Id", + "Arn", + "Name", + "Created" + ] + }, + "UpdateConfigurationRequest": { + "type": "structure", + "members": { + "ConfigurationId": { + "shape": "__string", + "location": "uri", + "locationName": "configuration-id", + "documentation": "

The unique ID that Amazon MQ generates for the configuration.

" + }, + "Data": { + "shape": "__string", + "locationName": "data", + "documentation": "

Amazon MQ for Active MQ: The base64-encoded XML configuration. Amazon MQ for RabbitMQ: the base64-encoded Cuttlefish configuration.

" + }, + "Description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The description of the configuration.

" + } + }, + "documentation": "

Updates the specified configuration.

", + "required": [ + "ConfigurationId", + "Data" + ] + }, + "UpdateConfigurationResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the configuration.

" + }, + "Created": { + "shape": "__timestampIso8601", + "locationName": "created", + "documentation": "

Required. The date and time of the configuration.

" + }, + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "

The unique ID that Amazon MQ generates for the configuration.

" + }, + "LatestRevision": { + "shape": "ConfigurationRevision", + "locationName": "latestRevision", + "documentation": "

The latest revision of the configuration.

" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "

The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

" + }, + "Warnings": { + "shape": "__listOfSanitizationWarning", + "locationName": "warnings", + "documentation": "

The list of the first 20 warnings about the configuration elements or attributes that were sanitized.

" } } }, - "UpdateUserInput" : { - "type" : "structure", - "members" : { - "ConsoleAccess" : { - "shape" : "__boolean", - "locationName" : "consoleAccess", - "documentation" : "

Enables access to the the ActiveMQ Web Console for the ActiveMQ user.

" - }, - "Groups" : { - "shape" : "__listOf__string", - "locationName" : "groups", - "documentation" : "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - }, - "Password" : { - "shape" : "__string", - "locationName" : "password", - "documentation" : "

The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

" - }, - "ReplicationUser" : { - "shape" : "__boolean", - "locationName" : "replicationUser", - "documentation" : "

Defines whether the user is intended for data replication.

" - } - }, - "documentation" : "

Updates the information for an ActiveMQ user.

" - }, - "UpdateUserRequest" : { - "type" : "structure", - "members" : { - "BrokerId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "broker-id", - "documentation" : "

The unique ID that Amazon MQ generates for the broker.

" - }, - "ConsoleAccess" : { - "shape" : "__boolean", - "locationName" : "consoleAccess", - "documentation" : "

Enables access to the the ActiveMQ Web Console for the ActiveMQ user.

" - }, - "Groups" : { - "shape" : "__listOf__string", - "locationName" : "groups", - "documentation" : "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - }, - "Password" : { - "shape" : "__string", - "locationName" : "password", - "documentation" : "

The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

" - }, - "Username" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "username", - "documentation" : "

The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - }, - "ReplicationUser" : { - "shape" : "__boolean", - "locationName" : "replicationUser", - "documentation" : "

Defines whether the user is intended for data replication.

" - } - }, - "documentation" : "

Updates the information for an ActiveMQ user.

", - "required" : [ "Username", "BrokerId" ] - }, - "UpdateUserResponse" : { - "type" : "structure", - "members" : { } - }, - "User" : { - "type" : "structure", - "members" : { - "ConsoleAccess" : { - "shape" : "__boolean", - "locationName" : "consoleAccess", - "documentation" : "

Enables access to the ActiveMQ Web Console for the ActiveMQ user. Does not apply to RabbitMQ brokers.

" - }, - "Groups" : { - "shape" : "__listOf__string", - "locationName" : "groups", - "documentation" : "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long. Does not apply to RabbitMQ brokers.

" - }, - "Password" : { - "shape" : "__string", - "locationName" : "password", - "documentation" : "

Required. The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

" - }, - "Username" : { - "shape" : "__string", - "locationName" : "username", - "documentation" : "

The username of the broker user. The following restrictions apply to broker usernames:

  • For Amazon MQ for ActiveMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

  • para>For Amazon MQ for RabbitMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores (- . _). This value must not contain a tilde (~) character. Amazon MQ prohibts using guest as a valid usename. This value must be 2-100 characters long.

Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker usernames are not intended to be used for private or sensitive data.

" - }, - "ReplicationUser" : { - "shape" : "__boolean", - "locationName" : "replicationUser", - "documentation" : "

Defines if this user is intended for CRDR replication purposes.

" - } - }, - "documentation" : "

A user associated with the broker. For Amazon MQ for RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

", - "required" : [ "Username", "Password" ] - }, - "UserPendingChanges" : { - "type" : "structure", - "members" : { - "ConsoleAccess" : { - "shape" : "__boolean", - "locationName" : "consoleAccess", - "documentation" : "

Enables access to the the ActiveMQ Web Console for the ActiveMQ user.

" - }, - "Groups" : { - "shape" : "__listOf__string", - "locationName" : "groups", - "documentation" : "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - }, - "PendingChange" : { - "shape" : "ChangeType", - "locationName" : "pendingChange", - "documentation" : "

Required. The type of change pending for the ActiveMQ user.

" - } - }, - "documentation" : "

Returns information about the status of the changes pending for the ActiveMQ user.

", - "required" : [ "PendingChange" ] - }, - "UserSummary" : { - "type" : "structure", - "members" : { - "PendingChange" : { - "shape" : "ChangeType", - "locationName" : "pendingChange", - "documentation" : "

The type of change pending for the broker user.

" - }, - "Username" : { - "shape" : "__string", - "locationName" : "username", - "documentation" : "

Required. The username of the broker user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" - } - }, - "documentation" : "

Returns a list of all broker users. Does not apply to RabbitMQ brokers.

", - "required" : [ "Username" ] - }, - "WeeklyStartTime" : { - "type" : "structure", - "members" : { - "DayOfWeek" : { - "shape" : "DayOfWeek", - "locationName" : "dayOfWeek", - "documentation" : "

Required. The day of the week.

" - }, - "TimeOfDay" : { - "shape" : "__string", - "locationName" : "timeOfDay", - "documentation" : "

Required. The time, in 24-hour format.

" - }, - "TimeZone" : { - "shape" : "__string", - "locationName" : "timeZone", - "documentation" : "

The time zone, UTC by default, in either the Country/City format, or the UTC offset format.

" - } - }, - "documentation" : "

The scheduled time period relative to UTC during which Amazon MQ begins to apply pending updates or patches to the broker.

", - "required" : [ "TimeOfDay", "DayOfWeek" ] - }, - "__boolean" : { - "type" : "boolean" - }, - "__double" : { - "type" : "double" - }, - "__integer" : { - "type" : "integer" - }, - "__integerMin5Max100" : { - "type" : "integer", - "min" : 5, - "max" : 100 - }, - "__listOfActionRequired" : { - "type" : "list", - "member" : { - "shape" : "ActionRequired" + "UpdateUserInput": { + "type": "structure", + "members": { + "ConsoleAccess": { + "shape": "__boolean", + "locationName": "consoleAccess", + "documentation": "

Enables access to the the ActiveMQ Web Console for the ActiveMQ user.

" + }, + "Groups": { + "shape": "__listOf__string", + "locationName": "groups", + "documentation": "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + }, + "Password": { + "shape": "__string", + "locationName": "password", + "documentation": "

The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

" + }, + "ReplicationUser": { + "shape": "__boolean", + "locationName": "replicationUser", + "documentation": "

Defines whether the user is intended for data replication.

" + } + }, + "documentation": "

Updates the information for an ActiveMQ user.

" + }, + "UpdateUserRequest": { + "type": "structure", + "members": { + "BrokerId": { + "shape": "__string", + "location": "uri", + "locationName": "broker-id", + "documentation": "

The unique ID that Amazon MQ generates for the broker.

" + }, + "ConsoleAccess": { + "shape": "__boolean", + "locationName": "consoleAccess", + "documentation": "

Enables access to the the ActiveMQ Web Console for the ActiveMQ user.

" + }, + "Groups": { + "shape": "__listOf__string", + "locationName": "groups", + "documentation": "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + }, + "Password": { + "shape": "__string", + "locationName": "password", + "documentation": "

The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

" + }, + "Username": { + "shape": "__string", + "location": "uri", + "locationName": "username", + "documentation": "

The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + }, + "ReplicationUser": { + "shape": "__boolean", + "locationName": "replicationUser", + "documentation": "

Defines whether the user is intended for data replication.

" + } + }, + "documentation": "

Updates the information for an ActiveMQ user.

", + "required": [ + "Username", + "BrokerId" + ] + }, + "UpdateUserResponse": { + "type": "structure", + "members": { + } + }, + "User": { + "type": "structure", + "members": { + "ConsoleAccess": { + "shape": "__boolean", + "locationName": "consoleAccess", + "documentation": "

Enables access to the ActiveMQ Web Console for the ActiveMQ user. Does not apply to RabbitMQ brokers.

" + }, + "Groups": { + "shape": "__listOf__string", + "locationName": "groups", + "documentation": "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long. Does not apply to RabbitMQ brokers.

" + }, + "Password": { + "shape": "__string", + "locationName": "password", + "documentation": "

Required. The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

" + }, + "Username": { + "shape": "__string", + "locationName": "username", + "documentation": "

The username of the broker user. The following restrictions apply to broker usernames:

  • For Amazon MQ for ActiveMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

  • para>For Amazon MQ for RabbitMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores (- . _). This value must not contain a tilde (~) character. Amazon MQ prohibts using guest as a valid usename. This value must be 2-100 characters long.

Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker usernames are not intended to be used for private or sensitive data.

" + }, + "ReplicationUser": { + "shape": "__boolean", + "locationName": "replicationUser", + "documentation": "

Defines if this user is intended for CRDR replication purposes.

" + } + }, + "documentation": "

A user associated with the broker. For Amazon MQ for RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

", + "required": [ + "Username", + "Password" + ] + }, + "UserPendingChanges": { + "type": "structure", + "members": { + "ConsoleAccess": { + "shape": "__boolean", + "locationName": "consoleAccess", + "documentation": "

Enables access to the the ActiveMQ Web Console for the ActiveMQ user.

" + }, + "Groups": { + "shape": "__listOf__string", + "locationName": "groups", + "documentation": "

The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + }, + "PendingChange": { + "shape": "ChangeType", + "locationName": "pendingChange", + "documentation": "

Required. The type of change pending for the ActiveMQ user.

" + } + }, + "documentation": "

Returns information about the status of the changes pending for the ActiveMQ user.

", + "required": [ + "PendingChange" + ] + }, + "UserSummary": { + "type": "structure", + "members": { + "PendingChange": { + "shape": "ChangeType", + "locationName": "pendingChange", + "documentation": "

The type of change pending for the broker user.

" + }, + "Username": { + "shape": "__string", + "locationName": "username", + "documentation": "

Required. The username of the broker user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

" + } + }, + "documentation": "

Returns a list of all broker users. Does not apply to RabbitMQ brokers.

", + "required": [ + "Username" + ] + }, + "WeeklyStartTime": { + "type": "structure", + "members": { + "DayOfWeek": { + "shape": "DayOfWeek", + "locationName": "dayOfWeek", + "documentation": "

Required. The day of the week.

" + }, + "TimeOfDay": { + "shape": "__string", + "locationName": "timeOfDay", + "documentation": "

Required. The time, in 24-hour format.

" + }, + "TimeZone": { + "shape": "__string", + "locationName": "timeZone", + "documentation": "

The time zone, UTC by default, in either the Country/City format, or the UTC offset format.

" + } + }, + "documentation": "

The scheduled time period relative to UTC during which Amazon MQ begins to apply pending updates or patches to the broker.

", + "required": [ + "TimeOfDay", + "DayOfWeek" + ] + }, + "__boolean": { + "type": "boolean" + }, + "__double": { + "type": "double" + }, + "__integer": { + "type": "integer" + }, + "__integerMin5Max100": { + "type": "integer", + "min": 5, + "max": 100 + }, + "__listOfActionRequired": { + "type": "list", + "member": { + "shape": "ActionRequired" } }, - "__listOfAvailabilityZone" : { - "type" : "list", - "member" : { - "shape" : "AvailabilityZone" + "__listOfAvailabilityZone": { + "type": "list", + "member": { + "shape": "AvailabilityZone" } }, - "__listOfBrokerEngineType" : { - "type" : "list", - "member" : { - "shape" : "BrokerEngineType" + "__listOfBrokerEngineType": { + "type": "list", + "member": { + "shape": "BrokerEngineType" } }, - "__listOfBrokerInstance" : { - "type" : "list", - "member" : { - "shape" : "BrokerInstance" + "__listOfBrokerInstance": { + "type": "list", + "member": { + "shape": "BrokerInstance" } }, - "__listOfBrokerInstanceOption" : { - "type" : "list", - "member" : { - "shape" : "BrokerInstanceOption" + "__listOfBrokerInstanceOption": { + "type": "list", + "member": { + "shape": "BrokerInstanceOption" } }, - "__listOfBrokerSummary" : { - "type" : "list", - "member" : { - "shape" : "BrokerSummary" + "__listOfBrokerSummary": { + "type": "list", + "member": { + "shape": "BrokerSummary" } }, - "__listOfConfiguration" : { - "type" : "list", - "member" : { - "shape" : "Configuration" + "__listOfConfiguration": { + "type": "list", + "member": { + "shape": "Configuration" } }, - "__listOfConfigurationId" : { - "type" : "list", - "member" : { - "shape" : "ConfigurationId" + "__listOfConfigurationId": { + "type": "list", + "member": { + "shape": "ConfigurationId" } }, - "__listOfConfigurationRevision" : { - "type" : "list", - "member" : { - "shape" : "ConfigurationRevision" + "__listOfConfigurationRevision": { + "type": "list", + "member": { + "shape": "ConfigurationRevision" } }, - "__listOfDeploymentMode" : { - "type" : "list", - "member" : { - "shape" : "DeploymentMode" + "__listOfDeploymentMode": { + "type": "list", + "member": { + "shape": "DeploymentMode" } }, - "__listOfEngineVersion" : { - "type" : "list", - "member" : { - "shape" : "EngineVersion" + "__listOfEngineVersion": { + "type": "list", + "member": { + "shape": "EngineVersion" } }, - "__listOfSanitizationWarning" : { - "type" : "list", - "member" : { - "shape" : "SanitizationWarning" + "__listOfSanitizationWarning": { + "type": "list", + "member": { + "shape": "SanitizationWarning" } }, - "__listOfUser" : { - "type" : "list", - "member" : { - "shape" : "User" + "__listOfUser": { + "type": "list", + "member": { + "shape": "User" } }, - "__listOfUserSummary" : { - "type" : "list", - "member" : { - "shape" : "UserSummary" + "__listOfUserSummary": { + "type": "list", + "member": { + "shape": "UserSummary" } }, - "__listOf__string" : { - "type" : "list", - "member" : { - "shape" : "__string" + "__listOf__string": { + "type": "list", + "member": { + "shape": "__string" } }, - "__long" : { - "type" : "long" + "__long": { + "type": "long" }, - "__mapOf__string" : { - "type" : "map", - "key" : { - "shape" : "__string" + "__mapOf__string": { + "type": "map", + "key": { + "shape": "__string" }, - "value" : { - "shape" : "__string" + "value": { + "shape": "__string" } }, - "__string" : { - "type" : "string" + "__string": { + "type": "string" }, - "__timestampIso8601" : { - "type" : "timestamp", - "timestampFormat" : "iso8601" + "__timestampIso8601": { + "type": "timestamp", + "timestampFormat": "iso8601" }, - "__timestampUnix" : { - "type" : "timestamp", - "timestampFormat" : "unixTimestamp" + "__timestampUnix": { + "type": "timestamp", + "timestampFormat": "unixTimestamp" } }, - "documentation" : "

Amazon MQ is a managed message broker service for Apache ActiveMQ and RabbitMQ that makes it easy to set up and operate message brokers in the cloud. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols.

" + "documentation": "

Amazon MQ is a managed message broker service for Apache ActiveMQ and RabbitMQ that makes it easy to set up and operate message brokers in the cloud. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols.

" } \ No newline at end of file diff --git a/botocore/data/mwaa/2020-07-01/service-2.json b/botocore/data/mwaa/2020-07-01/service-2.json index 733ebbf306..939c10979d 100644 --- a/botocore/data/mwaa/2020-07-01/service-2.json +++ b/botocore/data/mwaa/2020-07-01/service-2.json @@ -3,8 +3,8 @@ "metadata":{ "apiVersion":"2020-07-01", "endpointPrefix":"airflow", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AmazonMWAA", "serviceId":"MWAA", "signatureVersion":"v4", @@ -230,31 +230,31 @@ "type":"string", "max":32, "min":1, - "pattern":"^[0-9a-z.]+$" + "pattern":"[0-9a-z.]+" }, "CeleryExecutorQueue":{ "type":"string", "max":1224, "min":1, - "pattern":"^arn:aws(-[a-z]+)?:sqs:[a-z0-9\\-]+:\\d{12}:[a-zA-Z_0-9+=,.@\\-_/]+$" + "pattern":"arn:aws(-[a-z]+)?:sqs:[a-z0-9\\-]+:\\d{12}:[a-zA-Z_0-9+=,.@\\-_/]+" }, "CloudWatchLogGroupArn":{ "type":"string", "max":1224, "min":1, - "pattern":"^arn:aws(-[a-z]+)?:logs:[a-z0-9\\-]+:\\d{12}:log-group:\\w+" + "pattern":"arn:aws(-[a-z]+)?:logs:[a-z0-9\\-]+:\\d{12}:log-group:\\w+.*" }, "ConfigKey":{ "type":"string", "max":64, "min":1, - "pattern":"^[a-z]+([a-z0-9._]*[a-z0-9_]+)?$" + "pattern":"[a-z]+([a-z0-9._]*[a-z0-9_]+)?" }, "ConfigValue":{ "type":"string", "max":65536, "min":1, - "pattern":"^[ -~]+$", + "pattern":"[ -~]+", "sensitive":true }, "CreateCliTokenRequest":{ @@ -285,94 +285,86 @@ "CreateEnvironmentInput":{ "type":"structure", "required":[ - "DagS3Path", - "ExecutionRoleArn", "Name", - "NetworkConfiguration", - "SourceBucketArn" + "ExecutionRoleArn", + "SourceBucketArn", + "DagS3Path", + "NetworkConfiguration" ], "members":{ - "AirflowConfigurationOptions":{ - "shape":"AirflowConfigurationOptions", - "documentation":"

A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options.

" - }, - "AirflowVersion":{ - "shape":"AirflowVersion", - "documentation":"

The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (MWAA).

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2 2.8.1

" - }, - "DagS3Path":{ - "shape":"RelativePath", - "documentation":"

The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs.

" - }, - "EndpointManagement":{ - "shape":"EndpointManagement", - "documentation":"

Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to SERVICE, Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to CUSTOMER, you must create, and manage, the VPC endpoints for your VPC. If you choose to create an environment in a shared VPC, you must set this value to CUSTOMER. In a shared VPC deployment, the environment will remain in PENDING status until you create the VPC endpoints. If you do not take action to create the endpoints within 72 hours, the status will change to CREATE_FAILED. You can delete the failed environment and create a new one.

" - }, - "EnvironmentClass":{ - "shape":"EnvironmentClass", - "documentation":"

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large. For more information, see Amazon MWAA environment class.

" + "Name":{ + "shape":"EnvironmentName", + "documentation":"

The name of the Amazon MWAA environment. For example, MyMWAAEnvironment.

", + "location":"uri", + "locationName":"Name" }, "ExecutionRoleArn":{ "shape":"IamRoleArn", "documentation":"

The Amazon Resource Name (ARN) of the execution role for your environment. An execution role is an Amazon Web Services Identity and Access Management (IAM) role that grants MWAA permission to access Amazon Web Services services and resources used by your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Amazon MWAA Execution role.

" }, - "KmsKey":{ - "shape":"KmsKey", - "documentation":"

The Amazon Web Services Key Management Service (KMS) key to encrypt the data in your environment. You can use an Amazon Web Services owned CMK, or a Customer managed CMK (advanced). For more information, see Create an Amazon MWAA environment.

" - }, - "LoggingConfiguration":{ - "shape":"LoggingConfigurationInput", - "documentation":"

Defines the Apache Airflow logs to send to CloudWatch Logs.

" - }, - "MaxWorkers":{ - "shape":"MaxWorkers", - "documentation":"

The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers.

" - }, - "MinWorkers":{ - "shape":"MinWorkers", - "documentation":"

The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the worker count you specify in the MinWorkers field. For example, 2.

" + "SourceBucketArn":{ + "shape":"S3BucketArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 bucket where your DAG code and supporting files are stored. For example, arn:aws:s3:::my-airflow-bucket-unique-name. For more information, see Create an Amazon S3 bucket for Amazon MWAA.

" }, - "Name":{ - "shape":"EnvironmentName", - "documentation":"

The name of the Amazon MWAA environment. For example, MyMWAAEnvironment.

", - "location":"uri", - "locationName":"Name" + "DagS3Path":{ + "shape":"RelativePath", + "documentation":"

The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs.

" }, "NetworkConfiguration":{ "shape":"NetworkConfiguration", "documentation":"

The VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. For more information, see About networking on Amazon MWAA.

" }, - "PluginsS3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"

The version of the plugins.zip file on your Amazon S3 bucket. You must specify a version each time a plugins.zip file is updated. For more information, see How S3 Versioning works.

" - }, "PluginsS3Path":{ "shape":"RelativePath", "documentation":"

The relative path to the plugins.zip file on your Amazon S3 bucket. For example, plugins.zip. If specified, then the plugins.zip version is required. For more information, see Installing custom plugins.

" }, - "RequirementsS3ObjectVersion":{ + "PluginsS3ObjectVersion":{ "shape":"S3ObjectVersion", - "documentation":"

The version of the requirements.txt file on your Amazon S3 bucket. You must specify a version each time a requirements.txt file is updated. For more information, see How S3 Versioning works.

" + "documentation":"

The version of the plugins.zip file on your Amazon S3 bucket. You must specify a version each time a plugins.zip file is updated. For more information, see How S3 Versioning works.

" }, "RequirementsS3Path":{ "shape":"RelativePath", "documentation":"

The relative path to the requirements.txt file on your Amazon S3 bucket. For example, requirements.txt. If specified, then a version is required. For more information, see Installing Python dependencies.

" }, - "Schedulers":{ - "shape":"Schedulers", - "documentation":"

The number of Apache Airflow schedulers to run in your environment. Valid values:

  • v2 - Accepts between 2 to 5. Defaults to 2.

  • v1 - Accepts 1.

" + "RequirementsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

The version of the requirements.txt file on your Amazon S3 bucket. You must specify a version each time a requirements.txt file is updated. For more information, see How S3 Versioning works.

" }, - "SourceBucketArn":{ - "shape":"S3BucketArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 bucket where your DAG code and supporting files are stored. For example, arn:aws:s3:::my-airflow-bucket-unique-name. For more information, see Create an Amazon S3 bucket for Amazon MWAA.

" + "StartupScriptS3Path":{ + "shape":"RelativePath", + "documentation":"

The relative path to the startup shell script in your Amazon S3 bucket. For example, s3://mwaa-environment/startup.sh.

Amazon MWAA runs the script as your environment starts, and before running the Apache Airflow process. You can use this script to install dependencies, modify Apache Airflow configuration options, and set environment variables. For more information, see Using a startup script.

" }, "StartupScriptS3ObjectVersion":{ "shape":"S3ObjectVersion", "documentation":"

The version of the startup shell script in your Amazon S3 bucket. You must specify the version ID that Amazon S3 assigns to the file every time you update the script.

Version IDs are Unicode, UTF-8 encoded, URL-ready, opaque strings that are no more than 1,024 bytes long. The following is an example:

3sL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nr8X8gdRQBpUMLUo

For more information, see Using a startup script.

" }, - "StartupScriptS3Path":{ - "shape":"RelativePath", - "documentation":"

The relative path to the startup shell script in your Amazon S3 bucket. For example, s3://mwaa-environment/startup.sh.

Amazon MWAA runs the script as your environment starts, and before running the Apache Airflow process. You can use this script to install dependencies, modify Apache Airflow configuration options, and set environment variables. For more information, see Using a startup script.

" + "AirflowConfigurationOptions":{ + "shape":"AirflowConfigurationOptions", + "documentation":"

A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options.

" + }, + "EnvironmentClass":{ + "shape":"EnvironmentClass", + "documentation":"

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class.

" + }, + "MaxWorkers":{ + "shape":"MaxWorkers", + "documentation":"

The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers.

" + }, + "KmsKey":{ + "shape":"KmsKey", + "documentation":"

The Amazon Web Services Key Management Service (KMS) key to encrypt the data in your environment. You can use an Amazon Web Services owned CMK, or a Customer managed CMK (advanced). For more information, see Create an Amazon MWAA environment.

" + }, + "AirflowVersion":{ + "shape":"AirflowVersion", + "documentation":"

The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (MWAA).

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2 2.8.1

" + }, + "LoggingConfiguration":{ + "shape":"LoggingConfigurationInput", + "documentation":"

Defines the Apache Airflow logs to send to CloudWatch Logs.

" + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only.

" }, "Tags":{ "shape":"TagMap", @@ -382,9 +374,25 @@ "shape":"WebserverAccessMode", "documentation":"

Defines the access mode for the Apache Airflow web server. For more information, see Apache Airflow access modes.

" }, - "WeeklyMaintenanceWindowStart":{ - "shape":"WeeklyMaintenanceWindowStart", - "documentation":"

The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only.

" + "MinWorkers":{ + "shape":"MinWorkers", + "documentation":"

The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the worker count you specify in the MinWorkers field. For example, 2.

" + }, + "Schedulers":{ + "shape":"Schedulers", + "documentation":"

The number of Apache Airflow schedulers to run in your environment. Valid values:

  • v2 - Accepts between 2 to 5. Defaults to 2.

  • v1 - Accepts 1.

" + }, + "EndpointManagement":{ + "shape":"EndpointManagement", + "documentation":"

Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to SERVICE, Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to CUSTOMER, you must create, and manage, the VPC endpoints for your VPC. If you choose to create an environment in a shared VPC, you must set this value to CUSTOMER. In a shared VPC deployment, the environment will remain in PENDING status until you create the VPC endpoints. If you do not take action to create the endpoints within 72 hours, the status will change to CREATE_FAILED. You can delete the failed environment and create a new one.

" + }, + "MinWebservers":{ + "shape":"MinWebservers", + "documentation":"

The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

Valid values: Accepts between 2 and 5. Defaults to 2.

" + }, + "MaxWebservers":{ + "shape":"MaxWebservers", + "documentation":"

The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

Valid values: Accepts between 2 and 5. Defaults to 2.

" } }, "documentation":"

This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation to create an environment. For more information, see Get started with Amazon Managed Workflows for Apache Airflow.

" @@ -413,21 +421,21 @@ "CreateWebLoginTokenResponse":{ "type":"structure", "members":{ - "AirflowIdentity":{ - "shape":"AirflowIdentity", - "documentation":"

The user name of the Apache Airflow identity creating the web login token.

" - }, - "IamIdentity":{ - "shape":"IamIdentity", - "documentation":"

The name of the IAM identity creating the web login token. This might be an IAM user, or an assumed or federated identity. For example, assumed-role/Admin/your-name.

" + "WebToken":{ + "shape":"Token", + "documentation":"

An Airflow web server login token.

" }, "WebServerHostname":{ "shape":"Hostname", "documentation":"

The Airflow web server hostname for the environment.

" }, - "WebToken":{ - "shape":"Token", - "documentation":"

An Airflow web server login token.

" + "IamIdentity":{ + "shape":"IamIdentity", + "documentation":"

The name of the IAM identity creating the web login token. This might be an IAM user, or an assumed or federated identity. For example, assumed-role/Admin/your-name.

" + }, + "AirflowIdentity":{ + "shape":"AirflowIdentity", + "documentation":"

The user name of the Apache Airflow identity creating the web login token.

" } } }, @@ -489,113 +497,101 @@ "Environment":{ "type":"structure", "members":{ - "AirflowConfigurationOptions":{ - "shape":"AirflowConfigurationOptions", - "documentation":"

A list of key-value pairs containing the Apache Airflow configuration options attached to your environment. For more information, see Apache Airflow configuration options.

" + "Name":{ + "shape":"EnvironmentName", + "documentation":"

The name of the Amazon MWAA environment. For example, MyMWAAEnvironment.

" }, - "AirflowVersion":{ - "shape":"AirflowVersion", - "documentation":"

The Apache Airflow version on your environment.

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2.

" + "Status":{ + "shape":"EnvironmentStatus", + "documentation":"

The status of the Amazon MWAA environment.

Valid values:

  • CREATING - Indicates the request to create the environment is in progress.

  • CREATING_SNAPSHOT - Indicates the request to update environment details, or upgrade the environment version, is in progress and Amazon MWAA is creating a storage volume snapshot of the Amazon RDS database cluster associated with the environment. A database snapshot is a backup created at a specific point in time. Amazon MWAA uses snapshots to recover environment metadata if the process to update or upgrade an environment fails.

  • CREATE_FAILED - Indicates the request to create the environment failed, and the environment could not be created.

  • AVAILABLE - Indicates the request was successful and the environment is ready to use.

  • PENDING - Indicates the request was successful, but the process to create the environment is paused until you create the required VPC endpoints in your VPC. After you create the VPC endpoints, the process resumes.

  • UPDATING - Indicates the request to update the environment is in progress.

  • ROLLING_BACK - Indicates the request to update environment details, or upgrade the environment version, failed and Amazon MWAA is restoring the environment using the latest storage volume snapshot.

  • DELETING - Indicates the request to delete the environment is in progress.

  • DELETED - Indicates the request to delete the environment is complete, and the environment has been deleted.

  • UNAVAILABLE - Indicates the request failed, but the environment did not return to its previous state and is not stable.

  • UPDATE_FAILED - Indicates the request to update the environment failed, and the environment was restored to its previous state successfully and is ready to use.

  • MAINTENANCE - Indicates that the environment is undergoing maintenance. Depending on the type of work Amazon MWAA is performing, your environment might become unavailable during this process. After all operations are done, your environment will return to its status prior to mainteneace operations.

We recommend reviewing our troubleshooting guide for a list of common errors and their solutions. For more information, see Amazon MWAA troubleshooting.

" }, "Arn":{ "shape":"EnvironmentArn", "documentation":"

The Amazon Resource Name (ARN) of the Amazon MWAA environment.

" }, - "CeleryExecutorQueue":{ - "shape":"CeleryExecutorQueue", - "documentation":"

The queue ARN for the environment's Celery Executor. Amazon MWAA uses a Celery Executor to distribute tasks across multiple workers. When you create an environment in a shared VPC, you must provide access to the Celery Executor queue from your VPC.

" - }, "CreatedAt":{ "shape":"CreatedAt", "documentation":"

The day and time the environment was created.

" }, - "DagS3Path":{ - "shape":"RelativePath", - "documentation":"

The relative path to the DAGs folder in your Amazon S3 bucket. For example, s3://mwaa-environment/dags. For more information, see Adding or updating DAGs.

" - }, - "DatabaseVpcEndpointService":{ - "shape":"VpcEndpointServiceName", - "documentation":"

The VPC endpoint for the environment's Amazon RDS database.

" - }, - "EndpointManagement":{ - "shape":"EndpointManagement", - "documentation":"

Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to SERVICE, Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to CUSTOMER, you must create, and manage, the VPC endpoints in your VPC.

" - }, - "EnvironmentClass":{ - "shape":"EnvironmentClass", - "documentation":"

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large. For more information, see Amazon MWAA environment class.

" + "WebserverUrl":{ + "shape":"WebserverUrl", + "documentation":"

The Apache Airflow web server host name for the Amazon MWAA environment. For more information, see Accessing the Apache Airflow UI.

" }, "ExecutionRoleArn":{ "shape":"IamRoleArn", "documentation":"

The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Amazon MWAA Execution role.

" }, + "ServiceRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) for the service-linked role of the environment. For more information, see Amazon MWAA Service-linked role.

" + }, "KmsKey":{ "shape":"KmsKey", "documentation":"

The KMS encryption key used to encrypt the data in your environment.

" }, - "LastUpdate":{ - "shape":"LastUpdate", - "documentation":"

The status of the last update on the environment.

" - }, - "LoggingConfiguration":{ - "shape":"LoggingConfiguration", - "documentation":"

The Apache Airflow logs published to CloudWatch Logs.

" - }, - "MaxWorkers":{ - "shape":"MaxWorkers", - "documentation":"

The maximum number of workers that run in your environment. For example, 20.

" - }, - "MinWorkers":{ - "shape":"MinWorkers", - "documentation":"

The minimum number of workers that run in your environment. For example, 2.

" - }, - "Name":{ - "shape":"EnvironmentName", - "documentation":"

The name of the Amazon MWAA environment. For example, MyMWAAEnvironment.

" + "AirflowVersion":{ + "shape":"AirflowVersion", + "documentation":"

The Apache Airflow version on your environment.

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1.

" }, - "NetworkConfiguration":{ - "shape":"NetworkConfiguration", - "documentation":"

Describes the VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. For more information, see About networking on Amazon MWAA.

" + "SourceBucketArn":{ + "shape":"S3BucketArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 bucket where your DAG code and supporting files are stored. For example, arn:aws:s3:::my-airflow-bucket-unique-name. For more information, see Create an Amazon S3 bucket for Amazon MWAA.

" }, - "PluginsS3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"

The version of the plugins.zip file in your Amazon S3 bucket. You must specify the version ID that Amazon S3 assigns to the file.

Version IDs are Unicode, UTF-8 encoded, URL-ready, opaque strings that are no more than 1,024 bytes long. The following is an example:

3sL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nr8X8gdRQBpUMLUo

For more information, see Installing custom plugins.

" + "DagS3Path":{ + "shape":"RelativePath", + "documentation":"

The relative path to the DAGs folder in your Amazon S3 bucket. For example, s3://mwaa-environment/dags. For more information, see Adding or updating DAGs.

" }, "PluginsS3Path":{ "shape":"RelativePath", "documentation":"

The relative path to the file in your Amazon S3 bucket. For example, s3://mwaa-environment/plugins.zip. For more information, see Installing custom plugins.

" }, - "RequirementsS3ObjectVersion":{ + "PluginsS3ObjectVersion":{ "shape":"S3ObjectVersion", - "documentation":"

The version of the requirements.txt file on your Amazon S3 bucket. You must specify the version ID that Amazon S3 assigns to the file.

Version IDs are Unicode, UTF-8 encoded, URL-ready, opaque strings that are no more than 1,024 bytes long. The following is an example:

3sL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nr8X8gdRQBpUMLUo

For more information, see Installing Python dependencies.

" + "documentation":"

The version of the plugins.zip file in your Amazon S3 bucket. You must specify the version ID that Amazon S3 assigns to the file.

Version IDs are Unicode, UTF-8 encoded, URL-ready, opaque strings that are no more than 1,024 bytes long. The following is an example:

3sL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nr8X8gdRQBpUMLUo

For more information, see Installing custom plugins.

" }, "RequirementsS3Path":{ "shape":"RelativePath", "documentation":"

The relative path to the requirements.txt file in your Amazon S3 bucket. For example, s3://mwaa-environment/requirements.txt. For more information, see Installing Python dependencies.

" }, - "Schedulers":{ - "shape":"Schedulers", - "documentation":"

The number of Apache Airflow schedulers that run in your Amazon MWAA environment.

" - }, - "ServiceRoleArn":{ - "shape":"IamRoleArn", - "documentation":"

The Amazon Resource Name (ARN) for the service-linked role of the environment. For more information, see Amazon MWAA Service-linked role.

" + "RequirementsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

The version of the requirements.txt file on your Amazon S3 bucket. You must specify the version ID that Amazon S3 assigns to the file.

Version IDs are Unicode, UTF-8 encoded, URL-ready, opaque strings that are no more than 1,024 bytes long. The following is an example:

3sL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nr8X8gdRQBpUMLUo

For more information, see Installing Python dependencies.

" }, - "SourceBucketArn":{ - "shape":"S3BucketArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 bucket where your DAG code and supporting files are stored. For example, arn:aws:s3:::my-airflow-bucket-unique-name. For more information, see Create an Amazon S3 bucket for Amazon MWAA.

" + "StartupScriptS3Path":{ + "shape":"String", + "documentation":"

The relative path to the startup shell script in your Amazon S3 bucket. For example, s3://mwaa-environment/startup.sh.

Amazon MWAA runs the script as your environment starts, and before running the Apache Airflow process. You can use this script to install dependencies, modify Apache Airflow configuration options, and set environment variables. For more information, see Using a startup script.

" }, "StartupScriptS3ObjectVersion":{ "shape":"String", "documentation":"

The version of the startup shell script in your Amazon S3 bucket. You must specify the version ID that Amazon S3 assigns to the file.

Version IDs are Unicode, UTF-8 encoded, URL-ready, opaque strings that are no more than 1,024 bytes long. The following is an example:

3sL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nr8X8gdRQBpUMLUo

For more information, see Using a startup script.

" }, - "StartupScriptS3Path":{ - "shape":"String", - "documentation":"

The relative path to the startup shell script in your Amazon S3 bucket. For example, s3://mwaa-environment/startup.sh.

Amazon MWAA runs the script as your environment starts, and before running the Apache Airflow process. You can use this script to install dependencies, modify Apache Airflow configuration options, and set environment variables. For more information, see Using a startup script.

" + "AirflowConfigurationOptions":{ + "shape":"AirflowConfigurationOptions", + "documentation":"

A list of key-value pairs containing the Apache Airflow configuration options attached to your environment. For more information, see Apache Airflow configuration options.

" }, - "Status":{ - "shape":"EnvironmentStatus", - "documentation":"

The status of the Amazon MWAA environment.

Valid values:

  • CREATING - Indicates the request to create the environment is in progress.

  • CREATING_SNAPSHOT - Indicates the request to update environment details, or upgrade the environment version, is in progress and Amazon MWAA is creating a storage volume snapshot of the Amazon RDS database cluster associated with the environment. A database snapshot is a backup created at a specific point in time. Amazon MWAA uses snapshots to recover environment metadata if the process to update or upgrade an environment fails.

  • CREATE_FAILED - Indicates the request to create the environment failed, and the environment could not be created.

  • AVAILABLE - Indicates the request was successful and the environment is ready to use.

  • PENDING - Indicates the request was successful, but the process to create the environment is paused until you create the required VPC endpoints in your VPC. After you create the VPC endpoints, the process resumes.

  • UPDATING - Indicates the request to update the environment is in progress.

  • ROLLING_BACK - Indicates the request to update environment details, or upgrade the environment version, failed and Amazon MWAA is restoring the environment using the latest storage volume snapshot.

  • DELETING - Indicates the request to delete the environment is in progress.

  • DELETED - Indicates the request to delete the environment is complete, and the environment has been deleted.

  • UNAVAILABLE - Indicates the request failed, but the environment did not return to its previous state and is not stable.

  • UPDATE_FAILED - Indicates the request to update the environment failed, and the environment was restored to its previous state successfully and is ready to use.

  • MAINTENANCE - Indicates that the environment is undergoing maintenance. Depending on the type of work Amazon MWAA is performing, your environment might become unavailable during this process. After all operations are done, your environment will return to its status prior to mainteneace operations.

We recommend reviewing our troubleshooting guide for a list of common errors and their solutions. For more information, see Amazon MWAA troubleshooting.

" + "EnvironmentClass":{ + "shape":"EnvironmentClass", + "documentation":"

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class.

" + }, + "MaxWorkers":{ + "shape":"MaxWorkers", + "documentation":"

The maximum number of workers that run in your environment. For example, 20.

" + }, + "NetworkConfiguration":{ + "shape":"NetworkConfiguration", + "documentation":"

Describes the VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. For more information, see About networking on Amazon MWAA.

" + }, + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

The Apache Airflow logs published to CloudWatch Logs.

" + }, + "LastUpdate":{ + "shape":"LastUpdate", + "documentation":"

The status of the last update on the environment.

" + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. For example: TUE:03:30.

" }, "Tags":{ "shape":"TagMap", @@ -605,17 +601,37 @@ "shape":"WebserverAccessMode", "documentation":"

The Apache Airflow web server access mode. For more information, see Apache Airflow access modes.

" }, - "WebserverUrl":{ - "shape":"WebserverUrl", - "documentation":"

The Apache Airflow web server host name for the Amazon MWAA environment. For more information, see Accessing the Apache Airflow UI.

" + "MinWorkers":{ + "shape":"MinWorkers", + "documentation":"

The minimum number of workers that run in your environment. For example, 2.

" + }, + "Schedulers":{ + "shape":"Schedulers", + "documentation":"

The number of Apache Airflow schedulers that run in your Amazon MWAA environment.

" }, "WebserverVpcEndpointService":{ "shape":"VpcEndpointServiceName", "documentation":"

The VPC endpoint for the environment's web server.

" }, - "WeeklyMaintenanceWindowStart":{ - "shape":"WeeklyMaintenanceWindowStart", - "documentation":"

The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. For example: TUE:03:30.

" + "DatabaseVpcEndpointService":{ + "shape":"VpcEndpointServiceName", + "documentation":"

The VPC endpoint for the environment's Amazon RDS database.

" + }, + "CeleryExecutorQueue":{ + "shape":"CeleryExecutorQueue", + "documentation":"

The queue ARN for the environment's Celery Executor. Amazon MWAA uses a Celery Executor to distribute tasks across multiple workers. When you create an environment in a shared VPC, you must provide access to the Celery Executor queue from your VPC.

" + }, + "EndpointManagement":{ + "shape":"EndpointManagement", + "documentation":"

Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to SERVICE, Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to CUSTOMER, you must create, and manage, the VPC endpoints in your VPC.

" + }, + "MinWebservers":{ + "shape":"MinWebservers", + "documentation":"

The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

Valid values: Accepts between 2 and 5. Defaults to 2.

" + }, + "MaxWebservers":{ + "shape":"MaxWebservers", + "documentation":"

The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

Valid values: Accepts between 2 and 5. Defaults to 2.

" } }, "documentation":"

Describes an Amazon Managed Workflows for Apache Airflow (MWAA) environment.

" @@ -624,7 +640,7 @@ "type":"string", "max":1224, "min":1, - "pattern":"^arn:aws(-[a-z]+)?:airflow:[a-z0-9\\-]+:\\d{12}:environment/\\w+" + "pattern":"arn:aws(-[a-z]+)?:airflow:[a-z0-9\\-]+:\\d{12}:environment/\\w+.*" }, "EnvironmentClass":{ "type":"string", @@ -639,7 +655,7 @@ "type":"string", "max":80, "min":1, - "pattern":"^[a-zA-Z][0-9a-zA-Z-_]*$" + "pattern":"[a-zA-Z][0-9a-zA-Z-_]*" }, "EnvironmentStatus":{ "type":"string", @@ -663,7 +679,7 @@ "type":"string", "max":1024, "min":1, - "pattern":"^.+$" + "pattern":".+" }, "GetEnvironmentInput":{ "type":"structure", @@ -690,14 +706,14 @@ "type":"string", "max":255, "min":1, - "pattern":"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])$" + "pattern":"(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])" }, "IamIdentity":{"type":"string"}, "IamRoleArn":{ "type":"string", "max":1224, "min":1, - "pattern":"^arn:aws(-[a-z]+)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" + "pattern":"arn:aws(-[a-z]+)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" }, "Integer":{ "type":"integer", @@ -717,11 +733,15 @@ "type":"string", "max":1224, "min":1, - "pattern":"^(((arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?key\\/)?[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}|(arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?alias/.+)$" + "pattern":"(((arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?key\\/)?[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}|(arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?alias/.+)" }, "LastUpdate":{ "type":"structure", "members":{ + "Status":{ + "shape":"UpdateStatus", + "documentation":"

The status of the last update on the environment.

" + }, "CreatedAt":{ "shape":"UpdateCreatedAt", "documentation":"

The day and time of the last update on the environment.

" @@ -733,10 +753,6 @@ "Source":{ "shape":"UpdateSource", "documentation":"

The source of the last update to the environment. Includes internal processes by Amazon MWAA, such as an environment maintenance update.

" - }, - "Status":{ - "shape":"UpdateStatus", - "documentation":"

The status of the last update on the environment.

" } }, "documentation":"

Describes the status of the last update on the environment, and any errors that were encountered.

" @@ -744,17 +760,17 @@ "ListEnvironmentsInput":{ "type":"structure", "members":{ - "MaxResults":{ - "shape":"ListEnvironmentsInputMaxResultsInteger", - "documentation":"

The maximum number of results to retrieve per page. For example, 5 environments per page.

", - "location":"querystring", - "locationName":"MaxResults" - }, "NextToken":{ "shape":"NextToken", "documentation":"

Retrieves the next page of the results.

", "location":"querystring", "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"ListEnvironmentsInputMaxResultsInteger", + "documentation":"

The maximum number of results to retrieve per page. For example, 5 environments per page.

", + "location":"querystring", + "locationName":"MaxResults" } } }, @@ -810,10 +826,6 @@ "shape":"ModuleLoggingConfiguration", "documentation":"

The Airflow scheduler logs published to CloudWatch Logs and the log level.

" }, - "TaskLogs":{ - "shape":"ModuleLoggingConfiguration", - "documentation":"

The Airflow task logs published to CloudWatch Logs and the log level.

" - }, "WebserverLogs":{ "shape":"ModuleLoggingConfiguration", "documentation":"

The Airflow web server logs published to CloudWatch Logs and the log level.

" @@ -821,6 +833,10 @@ "WorkerLogs":{ "shape":"ModuleLoggingConfiguration", "documentation":"

The Airflow worker logs published to CloudWatch Logs and the log level.

" + }, + "TaskLogs":{ + "shape":"ModuleLoggingConfiguration", + "documentation":"

The Airflow task logs published to CloudWatch Logs and the log level.

" } }, "documentation":"

Describes the Apache Airflow log types that are published to CloudWatch Logs.

" @@ -836,10 +852,6 @@ "shape":"ModuleLoggingConfigurationInput", "documentation":"

Publishes Airflow scheduler logs to CloudWatch Logs.

" }, - "TaskLogs":{ - "shape":"ModuleLoggingConfigurationInput", - "documentation":"

Publishes Airflow task logs to CloudWatch Logs.

" - }, "WebserverLogs":{ "shape":"ModuleLoggingConfigurationInput", "documentation":"

Publishes Airflow web server logs to CloudWatch Logs.

" @@ -847,6 +859,10 @@ "WorkerLogs":{ "shape":"ModuleLoggingConfigurationInput", "documentation":"

Publishes Airflow worker logs to CloudWatch Logs.

" + }, + "TaskLogs":{ + "shape":"ModuleLoggingConfigurationInput", + "documentation":"

Publishes Airflow task logs to CloudWatch Logs.

" } }, "documentation":"

Defines the Apache Airflow log types to send to CloudWatch Logs.

" @@ -865,6 +881,11 @@ "DEBUG" ] }, + "MaxWebservers":{ + "type":"integer", + "box":true, + "min":2 + }, "MaxWorkers":{ "type":"integer", "box":true, @@ -883,35 +904,40 @@ "Timestamp" ], "members":{ - "Dimensions":{ - "shape":"Dimensions", - "documentation":"

Internal only. The dimensions associated with the metric.

" - }, "MetricName":{ "shape":"String", "documentation":"

Internal only. The name of the metric.

" }, - "StatisticValues":{ - "shape":"StatisticSet", - "documentation":"

Internal only. The statistical values for the metric.

" - }, "Timestamp":{ "shape":"Timestamp", "documentation":"

Internal only. The time the metric data was received.

" }, - "Unit":{ - "shape":"Unit", - "documentation":"

Internal only. The unit used to store the metric.

" + "Dimensions":{ + "shape":"Dimensions", + "documentation":"

Internal only. The dimensions associated with the metric.

" }, "Value":{ "shape":"Double", "documentation":"

Internal only. The value for the metric.

" + }, + "Unit":{ + "shape":"Unit", + "documentation":"

Internal only. The unit used to store the metric.

" + }, + "StatisticValues":{ + "shape":"StatisticSet", + "documentation":"

Internal only. The statistical values for the metric.

" } }, "documentation":"

Internal only. Collects Apache Airflow metrics. To learn more about the metrics published to Amazon CloudWatch, see Amazon MWAA performance metrics in Amazon CloudWatch.

", "deprecated":true, "deprecatedMessage":"This type is for internal use and not meant for public use. Data set for this type will be ignored." }, + "MinWebservers":{ + "type":"integer", + "box":true, + "min":2 + }, "MinWorkers":{ "type":"integer", "box":true, @@ -920,10 +946,6 @@ "ModuleLoggingConfiguration":{ "type":"structure", "members":{ - "CloudWatchLogGroupArn":{ - "shape":"CloudWatchLogGroupArn", - "documentation":"

The Amazon Resource Name (ARN) for the CloudWatch Logs group where the Apache Airflow log type (e.g. DagProcessingLogs) is published. For example, arn:aws:logs:us-east-1:123456789012:log-group:airflow-MyMWAAEnvironment-MwaaEnvironment-DAGProcessing:*.

" - }, "Enabled":{ "shape":"LoggingEnabled", "documentation":"

Indicates whether the Apache Airflow log type (e.g. DagProcessingLogs) is enabled.

" @@ -931,6 +953,10 @@ "LogLevel":{ "shape":"LoggingLevel", "documentation":"

The Apache Airflow log level for the log type (e.g. DagProcessingLogs).

" + }, + "CloudWatchLogGroupArn":{ + "shape":"CloudWatchLogGroupArn", + "documentation":"

The Amazon Resource Name (ARN) for the CloudWatch Logs group where the Apache Airflow log type (e.g. DagProcessingLogs) is published. For example, arn:aws:logs:us-east-1:123456789012:log-group:airflow-MyMWAAEnvironment-MwaaEnvironment-DAGProcessing:*.

" } }, "documentation":"

Describes the Apache Airflow log details for the log type (e.g. DagProcessingLogs).

" @@ -956,13 +982,13 @@ "NetworkConfiguration":{ "type":"structure", "members":{ - "SecurityGroupIds":{ - "shape":"SecurityGroupList", - "documentation":"

A list of security group IDs. For more information, see Security in your VPC on Amazon MWAA.

" - }, "SubnetIds":{ "shape":"SubnetList", "documentation":"

A list of subnet IDs. For more information, see About networking on Amazon MWAA.

" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupList", + "documentation":"

A list of security group IDs. For more information, see Security in your VPC on Amazon MWAA.

" } }, "documentation":"

Describes the VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. For more information, see About networking on Amazon MWAA.

" @@ -1022,7 +1048,7 @@ "type":"string", "max":1224, "min":1, - "pattern":"^arn:aws(-[a-z]+)?:s3:::[a-z0-9.\\-]+$" + "pattern":"arn:aws(-[a-z]+)?:s3:::[a-z0-9.\\-]+" }, "S3ObjectVersion":{ "type":"string", @@ -1038,7 +1064,7 @@ "type":"string", "max":1024, "min":1, - "pattern":"^sg-[a-zA-Z0-9\\-._]+$" + "pattern":"sg-[a-zA-Z0-9\\-._]+" }, "SecurityGroupList":{ "type":"list", @@ -1049,14 +1075,6 @@ "StatisticSet":{ "type":"structure", "members":{ - "Maximum":{ - "shape":"Double", - "documentation":"

Internal only. The maximum value of the sample set.

" - }, - "Minimum":{ - "shape":"Double", - "documentation":"

Internal only. The minimum value of the sample set.

" - }, "SampleCount":{ "shape":"Integer", "documentation":"

Internal only. The number of samples used for the statistic set.

" @@ -1064,6 +1082,14 @@ "Sum":{ "shape":"Double", "documentation":"

Internal only. The sum of values for the sample set.

" + }, + "Minimum":{ + "shape":"Double", + "documentation":"

Internal only. The minimum value of the sample set.

" + }, + "Maximum":{ + "shape":"Double", + "documentation":"

Internal only. The maximum value of the sample set.

" } }, "documentation":"

Internal only. Represents a set of statistics that describe a specific metric. To learn more about the metrics published to Amazon CloudWatch, see Amazon MWAA performance metrics in Amazon CloudWatch.

", @@ -1075,7 +1101,7 @@ "type":"string", "max":1024, "min":1, - "pattern":"^subnet-[a-zA-Z0-9\\-._]+$" + "pattern":"subnet-[a-zA-Z0-9\\-._]+" }, "SubnetList":{ "type":"list", @@ -1087,7 +1113,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" }, "TagKeyList":{ "type":"list", @@ -1130,7 +1156,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" }, "Timestamp":{"type":"timestamp"}, "Token":{ @@ -1200,87 +1226,95 @@ "type":"structure", "required":["Name"], "members":{ - "AirflowConfigurationOptions":{ - "shape":"AirflowConfigurationOptions", - "documentation":"

A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options.

" - }, - "AirflowVersion":{ - "shape":"AirflowVersion", - "documentation":"

The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment.

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2.

" - }, - "DagS3Path":{ - "shape":"RelativePath", - "documentation":"

The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs.

" - }, - "EnvironmentClass":{ - "shape":"EnvironmentClass", - "documentation":"

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large. For more information, see Amazon MWAA environment class.

" + "Name":{ + "shape":"EnvironmentName", + "documentation":"

The name of your Amazon MWAA environment. For example, MyMWAAEnvironment.

", + "location":"uri", + "locationName":"Name" }, "ExecutionRoleArn":{ "shape":"IamRoleArn", "documentation":"

The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Amazon MWAA Execution role.

" }, - "LoggingConfiguration":{ - "shape":"LoggingConfigurationInput", - "documentation":"

The Apache Airflow log types to send to CloudWatch Logs.

" - }, - "MaxWorkers":{ - "shape":"MaxWorkers", - "documentation":"

The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers.

" + "AirflowVersion":{ + "shape":"AirflowVersion", + "documentation":"

The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment.

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1.

" }, - "MinWorkers":{ - "shape":"MinWorkers", - "documentation":"

The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the worker count you specify in the MinWorkers field. For example, 2.

" + "SourceBucketArn":{ + "shape":"S3BucketArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 bucket where your DAG code and supporting files are stored. For example, arn:aws:s3:::my-airflow-bucket-unique-name. For more information, see Create an Amazon S3 bucket for Amazon MWAA.

" }, - "Name":{ - "shape":"EnvironmentName", - "documentation":"

The name of your Amazon MWAA environment. For example, MyMWAAEnvironment.

", - "location":"uri", - "locationName":"Name" + "DagS3Path":{ + "shape":"RelativePath", + "documentation":"

The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs.

" }, - "NetworkConfiguration":{ - "shape":"UpdateNetworkConfigurationInput", - "documentation":"

The VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. For more information, see About networking on Amazon MWAA.

" + "PluginsS3Path":{ + "shape":"RelativePath", + "documentation":"

The relative path to the plugins.zip file on your Amazon S3 bucket. For example, plugins.zip. If specified, then the plugins.zip version is required. For more information, see Installing custom plugins.

" }, "PluginsS3ObjectVersion":{ "shape":"S3ObjectVersion", "documentation":"

The version of the plugins.zip file on your Amazon S3 bucket. You must specify a version each time a plugins.zip file is updated. For more information, see How S3 Versioning works.

" }, - "PluginsS3Path":{ + "RequirementsS3Path":{ "shape":"RelativePath", - "documentation":"

The relative path to the plugins.zip file on your Amazon S3 bucket. For example, plugins.zip. If specified, then the plugins.zip version is required. For more information, see Installing custom plugins.

" + "documentation":"

The relative path to the requirements.txt file on your Amazon S3 bucket. For example, requirements.txt. If specified, then a file version is required. For more information, see Installing Python dependencies.

" }, "RequirementsS3ObjectVersion":{ "shape":"S3ObjectVersion", "documentation":"

The version of the requirements.txt file on your Amazon S3 bucket. You must specify a version each time a requirements.txt file is updated. For more information, see How S3 Versioning works.

" }, - "RequirementsS3Path":{ + "StartupScriptS3Path":{ "shape":"RelativePath", - "documentation":"

The relative path to the requirements.txt file on your Amazon S3 bucket. For example, requirements.txt. If specified, then a file version is required. For more information, see Installing Python dependencies.

" - }, - "Schedulers":{ - "shape":"Schedulers", - "documentation":"

The number of Apache Airflow schedulers to run in your Amazon MWAA environment.

" - }, - "SourceBucketArn":{ - "shape":"S3BucketArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 bucket where your DAG code and supporting files are stored. For example, arn:aws:s3:::my-airflow-bucket-unique-name. For more information, see Create an Amazon S3 bucket for Amazon MWAA.

" + "documentation":"

The relative path to the startup shell script in your Amazon S3 bucket. For example, s3://mwaa-environment/startup.sh.

Amazon MWAA runs the script as your environment starts, and before running the Apache Airflow process. You can use this script to install dependencies, modify Apache Airflow configuration options, and set environment variables. For more information, see Using a startup script.

" }, "StartupScriptS3ObjectVersion":{ "shape":"S3ObjectVersion", "documentation":"

The version of the startup shell script in your Amazon S3 bucket. You must specify the version ID that Amazon S3 assigns to the file every time you update the script.

Version IDs are Unicode, UTF-8 encoded, URL-ready, opaque strings that are no more than 1,024 bytes long. The following is an example:

3sL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nr8X8gdRQBpUMLUo

For more information, see Using a startup script.

" }, - "StartupScriptS3Path":{ - "shape":"RelativePath", - "documentation":"

The relative path to the startup shell script in your Amazon S3 bucket. For example, s3://mwaa-environment/startup.sh.

Amazon MWAA runs the script as your environment starts, and before running the Apache Airflow process. You can use this script to install dependencies, modify Apache Airflow configuration options, and set environment variables. For more information, see Using a startup script.

" + "AirflowConfigurationOptions":{ + "shape":"AirflowConfigurationOptions", + "documentation":"

A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options.

" }, - "WebserverAccessMode":{ - "shape":"WebserverAccessMode", - "documentation":"

The Apache Airflow Web server access mode. For more information, see Apache Airflow access modes.

" + "EnvironmentClass":{ + "shape":"EnvironmentClass", + "documentation":"

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class.

" + }, + "MaxWorkers":{ + "shape":"MaxWorkers", + "documentation":"

The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers.

" + }, + "NetworkConfiguration":{ + "shape":"UpdateNetworkConfigurationInput", + "documentation":"

The VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. For more information, see About networking on Amazon MWAA.

" + }, + "LoggingConfiguration":{ + "shape":"LoggingConfigurationInput", + "documentation":"

The Apache Airflow log types to send to CloudWatch Logs.

" }, "WeeklyMaintenanceWindowStart":{ "shape":"WeeklyMaintenanceWindowStart", "documentation":"

The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only.

" + }, + "WebserverAccessMode":{ + "shape":"WebserverAccessMode", + "documentation":"

The Apache Airflow Web server access mode. For more information, see Apache Airflow access modes.

" + }, + "MinWorkers":{ + "shape":"MinWorkers", + "documentation":"

The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the worker count you specify in the MinWorkers field. For example, 2.

" + }, + "Schedulers":{ + "shape":"Schedulers", + "documentation":"

The number of Apache Airflow schedulers to run in your Amazon MWAA environment.

" + }, + "MinWebservers":{ + "shape":"MinWebservers", + "documentation":"

The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

Valid values: Accepts between 2 and 5. Defaults to 2.

" + }, + "MaxWebservers":{ + "shape":"MaxWebservers", + "documentation":"

The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

Valid values: Accepts between 2 and 5. Defaults to 2.

" } } }, @@ -1322,7 +1356,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^.+$" + "pattern":".+" }, "UpdateStatus":{ "type":"string", @@ -1348,7 +1382,7 @@ "type":"string", "max":1224, "min":1, - "pattern":"^([a-z.-]+)?com\\.amazonaws\\.vpce\\.[a-z0-9\\-]+\\.[a-zA-Z_0-9+=,.@\\-_/]+$" + "pattern":"([a-z.-]+)?com\\.amazonaws\\.vpce\\.[a-z0-9\\-]+\\.[a-zA-Z_0-9+=,.@\\-_/]+" }, "WebserverAccessMode":{ "type":"string", @@ -1361,13 +1395,13 @@ "type":"string", "max":256, "min":1, - "pattern":"^https://.+$" + "pattern":"https://.+" }, "WeeklyMaintenanceWindowStart":{ "type":"string", "max":9, "min":1, - "pattern":"(MON|TUE|WED|THU|FRI|SAT|SUN):([01]\\d|2[0-3]):(00|30)" + "pattern":".*(MON|TUE|WED|THU|FRI|SAT|SUN):([01]\\d|2[0-3]):(00|30).*" } }, "documentation":"

Amazon Managed Workflows for Apache Airflow

This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?.

Endpoints

Regions

For a list of supported regions, see Amazon MWAA endpoints and quotas in the Amazon Web Services General Reference.

" diff --git a/botocore/data/neptune-graph/2023-11-29/service-2.json b/botocore/data/neptune-graph/2023-11-29/service-2.json index 58f09b07ad..a6d97e651c 100644 --- a/botocore/data/neptune-graph/2023-11-29/service-2.json +++ b/botocore/data/neptune-graph/2023-11-29/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2023-11-29", + "auth":["aws.auth#sigv4"], "endpointPrefix":"neptune-graph", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "ripServiceName":"neptune-graph", "serviceAbbreviation":"Neptune Graph", "serviceFullName":"Amazon Neptune Graph", @@ -634,6 +635,10 @@ "min":1, "pattern":"arn:.+" }, + "BlankNodeHandling":{ + "type":"string", + "enum":["convertToIri"] + }, "Boolean":{ "type":"boolean", "box":true @@ -961,6 +966,10 @@ "shape":"Format", "documentation":"

Specifies the format of S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format or OPENCYPHER, which identies the openCypher load format.

" }, + "blankNodeHandling":{ + "shape":"BlankNodeHandling", + "documentation":"

The method to handle blank nodes in the dataset. Currently, only convertToIri is supported, meaning blank nodes are converted to unique IRIs at load time. Must be provided when format is ntriples. For more information, see Handling RDF values.

" + }, "roleArn":{ "shape":"RoleArn", "documentation":"

The ARN of the IAM role that will allow access to the data that is to be imported.

" @@ -990,7 +999,7 @@ }, "format":{ "shape":"Format", - "documentation":"

Specifies the format of S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format or OPENCYPHER, which identies the openCypher load format.

" + "documentation":"

Specifies the format of S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format, OPENCYPHER, which identifies the openCypher load format, or ntriples, which identifies the RDF n-triples format.

" }, "roleArn":{ "shape":"RoleArn", @@ -1343,7 +1352,8 @@ "type":"string", "enum":[ "CSV", - "OPEN_CYPHER" + "OPEN_CYPHER", + "NTRIPLES" ] }, "GetGraphInput":{ @@ -2342,7 +2352,7 @@ "type":"integer", "box":true, "max":24576, - "min":128 + "min":32 }, "QueryLanguage":{ "type":"string", @@ -2700,6 +2710,10 @@ "shape":"Format", "documentation":"

Specifies the format of Amazon S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format or OPENCYPHER, which identies the openCypher load format.

" }, + "blankNodeHandling":{ + "shape":"BlankNodeHandling", + "documentation":"

The method to handle blank nodes in the dataset. Currently, only convertToIri is supported, meaning blank nodes are converted to unique IRIs at load time. Must be provided when format is ntriples. For more information, see Handling RDF values.

" + }, "graphIdentifier":{ "shape":"GraphIdentifier", "documentation":"

The unique identifier of the Neptune Analytics graph.

", diff --git a/botocore/data/network-firewall/2020-11-12/service-2.json b/botocore/data/network-firewall/2020-11-12/service-2.json index 01fa19ed0c..9b38531ba3 100644 --- a/botocore/data/network-firewall/2020-11-12/service-2.json +++ b/botocore/data/network-firewall/2020-11-12/service-2.json @@ -5,13 +5,15 @@ "endpointPrefix":"network-firewall", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Network Firewall", "serviceFullName":"AWS Network Firewall", "serviceId":"Network Firewall", "signatureVersion":"v4", "signingName":"network-firewall", "targetPrefix":"NetworkFirewall_20201112", - "uid":"network-firewall-2020-11-12" + "uid":"network-firewall-2020-11-12", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateFirewallPolicy":{ @@ -118,7 +120,7 @@ {"shape":"LimitExceededException"}, {"shape":"InsufficientCapacityException"} ], - "documentation":"

Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration contains Certificate Manager certificate associations between and the scope configurations that Network Firewall uses to decrypt and re-encrypt traffic traveling through your firewall.

After you create a TLS inspection configuration, you can associate it with a new firewall policy.

To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.

To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.

To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.

For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

" + "documentation":"

Creates an Network Firewall TLS inspection configuration. Network Firewall uses TLS inspection configurations to decrypt your firewall's inbound and outbound SSL/TLS traffic. After decryption, Network Firewall inspects the traffic according to your firewall policy's stateful rules, and then re-encrypts it before sending it to its destination. You can enable inspection of your firewall's inbound traffic, outbound traffic, or both. To use TLS inspection with your firewall, you must first import or provision certificates using ACM, create a TLS inspection configuration, add that configuration to a new firewall policy, and then associate that policy with your firewall.

To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.

To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.

To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.

For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

" }, "DeleteFirewall":{ "name":"DeleteFirewall", @@ -2157,18 +2159,18 @@ "members":{ "LogType":{ "shape":"LogType", - "documentation":"

The type of log to send. Alert logs report traffic that matches a StatefulRule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs.

" + "documentation":"

The type of log to record. You can record the following types of logs from your Network Firewall stateful engine.

  • ALERT - Logs for traffic that matches your stateful rules and that have an action that sends an alert. A stateful rule sends alerts for the rule actions DROP, ALERT, and REJECT. For more information, see StatefulRule.

  • FLOW - Standard network traffic flow logs. The stateful rules engine records flow logs for all network traffic that it receives. Each flow log record captures the network flow for a specific standard stateless rule group.

  • TLS - Logs for events that are related to TLS inspection. For more information, see Inspecting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

" }, "LogDestinationType":{ "shape":"LogDestinationType", - "documentation":"

The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream.

" + "documentation":"

The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream.

" }, "LogDestination":{ "shape":"LogDestinationMap", - "documentation":"

The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.

  • For an Amazon S3 bucket, provide the name of the bucket, with key bucketName, and optionally provide a prefix, with key prefix. The following example specifies an Amazon S3 bucket named DOC-EXAMPLE-BUCKET and the prefix alerts:

    \"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }

  • For a CloudWatch log group, provide the name of the CloudWatch log group, with key logGroup. The following example specifies a log group named alert-log-group:

    \"LogDestination\": { \"logGroup\": \"alert-log-group\" }

  • For a Kinesis Data Firehose delivery stream, provide the name of the delivery stream, with key deliveryStream. The following example specifies a delivery stream named alert-delivery-stream:

    \"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }

" + "documentation":"

The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.

  • For an Amazon S3 bucket, provide the name of the bucket, with key bucketName, and optionally provide a prefix, with key prefix.

    The following example specifies an Amazon S3 bucket named DOC-EXAMPLE-BUCKET and the prefix alerts:

    \"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }

  • For a CloudWatch log group, provide the name of the CloudWatch log group, with key logGroup. The following example specifies a log group named alert-log-group:

    \"LogDestination\": { \"logGroup\": \"alert-log-group\" }

  • For a Firehose delivery stream, provide the name of the delivery stream, with key deliveryStream. The following example specifies a delivery stream named alert-delivery-stream:

    \"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }

" } }, - "documentation":"

Defines where Network Firewall sends logs for the firewall for one log type. This is used in LoggingConfiguration. You can send each type of log to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream.

Network Firewall generates logs for stateful rule groups. You can save alert and flow log types. The stateful rules engine records flow logs for all network traffic that it receives. It records alert logs for traffic that matches stateful rules that have the rule action set to DROP or ALERT.

" + "documentation":"

Defines where Network Firewall sends logs for the firewall for one log type. This is used in LoggingConfiguration. You can send each type of log to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream.

Network Firewall generates logs for stateful rule groups. You can save alert, flow, and TLS log types.

" }, "LogDestinationConfigs":{ "type":"list", @@ -2202,7 +2204,8 @@ "type":"string", "enum":[ "ALERT", - "FLOW" + "FLOW", + "TLS" ] }, "LoggingConfiguration":{ @@ -2848,7 +2851,7 @@ "members":{ "Action":{ "shape":"StatefulAction", - "documentation":"

Defines what Network Firewall should do with the packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow.

The actions for a stateful rule are defined as follows:

  • PASS - Permits the packets to go to the intended destination.

  • DROP - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

  • ALERT - Sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

    You can use this action to test a rule that you intend to use to drop traffic. You can enable the rule with ALERT action, verify in the logs that the rule is filtering as you want, then change the action to DROP.

" + "documentation":"

Defines what Network Firewall should do with the packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow.

The actions for a stateful rule are defined as follows:

  • PASS - Permits the packets to go to the intended destination.

  • DROP - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

  • ALERT - Sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

    You can use this action to test a rule that you intend to use to drop traffic. You can enable the rule with ALERT action, verify in the logs that the rule is filtering as you want, then change the action to DROP.

  • REJECT - Drops traffic that matches the conditions of the stateful rule, and sends a TCP reset packet back to sender of the packet. A TCP reset packet is a packet with no payload and an RST bit contained in the TCP header flags. REJECT is available only for TCP traffic. This option doesn't support FTP or IMAP protocols.

" }, "Header":{ "shape":"Header", diff --git a/botocore/data/networkmanager/2019-07-05/service-2.json b/botocore/data/networkmanager/2019-07-05/service-2.json index 428a316270..70025bd73a 100644 --- a/botocore/data/networkmanager/2019-07-05/service-2.json +++ b/botocore/data/networkmanager/2019-07-05/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"networkmanager", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"NetworkManager", "serviceFullName":"AWS Network Manager", "serviceId":"NetworkManager", "signatureVersion":"v4", "signingName":"networkmanager", - "uid":"networkmanager-2019-07-05" + "uid":"networkmanager-2019-07-05", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptAttachment":{ @@ -1777,6 +1779,10 @@ "shape":"ConstrainedString", "documentation":"

The name of the segment attachment.

" }, + "NetworkFunctionGroupName":{ + "shape":"NetworkFunctionGroupName", + "documentation":"

The name of the network function group.

" + }, "Tags":{ "shape":"TagList", "documentation":"

The tags associated with the attachment.

" @@ -1785,6 +1791,10 @@ "shape":"ProposedSegmentChange", "documentation":"

The attachment to move from one segment to another.

" }, + "ProposedNetworkFunctionGroupChange":{ + "shape":"ProposedNetworkFunctionGroupChange", + "documentation":"

Describes a proposed change to a network function group associated with the attachment.

" + }, "CreatedAt":{ "shape":"DateTime", "documentation":"

The timestamp when the attachment was created.

" @@ -1792,10 +1802,55 @@ "UpdatedAt":{ "shape":"DateTime", "documentation":"

The timestamp when the attachment was last updated.

" + }, + "LastModificationErrors":{ + "shape":"AttachmentErrorList", + "documentation":"

Describes the error associated with the attachment request.

" } }, "documentation":"

Describes a core network attachment.

" }, + "AttachmentError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"AttachmentErrorCode", + "documentation":"

The error code for the attachment request.

" + }, + "Message":{ + "shape":"ServerSideString", + "documentation":"

The message associated with the error code.

" + }, + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the requested attachment resource.

" + }, + "RequestId":{ + "shape":"ServerSideString", + "documentation":"

The ID of the attachment request.

" + } + }, + "documentation":"

Describes the error associated with an attachment request.

" + }, + "AttachmentErrorCode":{ + "type":"string", + "enum":[ + "VPC_NOT_FOUND", + "SUBNET_NOT_FOUND", + "SUBNET_DUPLICATED_IN_AVAILABILITY_ZONE", + "SUBNET_NO_FREE_ADDRESSES", + "SUBNET_UNSUPPORTED_AVAILABILITY_ZONE", + "SUBNET_NO_IPV6_CIDRS", + "VPN_CONNECTION_NOT_FOUND", + "MAXIMUM_NO_ENCAP_LIMIT_EXCEEDED" + ] + }, + "AttachmentErrorList":{ + "type":"list", + "member":{"shape":"AttachmentError"}, + "max":20, + "min":0 + }, "AttachmentId":{ "type":"string", "max":50, @@ -1886,6 +1941,7 @@ "type":"string", "enum":[ "CORE_NETWORK_SEGMENT", + "NETWORK_FUNCTION_GROUP", "CORE_NETWORK_EDGE", "ATTACHMENT_MAPPING", "ATTACHMENT_ROUTE_PROPAGATION", @@ -1989,7 +2045,11 @@ }, "SubnetArn":{ "shape":"SubnetArn", - "documentation":"

The subnet ARN for the Connect peer.

" + "documentation":"

The subnet ARN for the Connect peer. This only applies only when the protocol is NO_ENCAP.

" + }, + "LastModificationErrors":{ + "shape":"ConnectPeerErrorList", + "documentation":"

Describes the error associated with the attachment request.

" } }, "documentation":"

Describes a core network Connect peer.

" @@ -2085,6 +2145,45 @@ }, "documentation":"

Describes a core network Connect peer configuration.

" }, + "ConnectPeerError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"ConnectPeerErrorCode", + "documentation":"

The error code for the Connect peer request.

" + }, + "Message":{ + "shape":"ServerSideString", + "documentation":"

The message associated with the error code.

" + }, + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the requested Connect peer resource.

" + }, + "RequestId":{ + "shape":"ServerSideString", + "documentation":"

The ID of the Connect peer request.

" + } + }, + "documentation":"

Describes an error associated with a Connect peer request

" + }, + "ConnectPeerErrorCode":{ + "type":"string", + "enum":[ + "EDGE_LOCATION_NO_FREE_IPS", + "EDGE_LOCATION_PEER_DUPLICATE", + "SUBNET_NOT_FOUND", + "IP_OUTSIDE_SUBNET_CIDR_RANGE", + "INVALID_INSIDE_CIDR_BLOCK", + "NO_ASSOCIATED_CIDR_BLOCK" + ] + }, + "ConnectPeerErrorList":{ + "type":"list", + "member":{"shape":"ConnectPeerError"}, + "max":20, + "min":0 + }, "ConnectPeerId":{ "type":"string", "max":50, @@ -2298,6 +2397,10 @@ "shape":"CoreNetworkSegmentList", "documentation":"

The segments within a core network.

" }, + "NetworkFunctionGroups":{ + "shape":"CoreNetworkNetworkFunctionGroupList", + "documentation":"

The network function groups associated with a core network.

" + }, "Edges":{ "shape":"CoreNetworkEdgeList", "documentation":"

The edges within a core network.

" @@ -2390,6 +2493,10 @@ "shape":"ConstrainedString", "documentation":"

The segment name if the change event is associated with a segment.

" }, + "NetworkFunctionGroupName":{ + "shape":"ConstrainedString", + "documentation":"

The changed network function group name.

" + }, "AttachmentId":{ "shape":"AttachmentId", "documentation":"

The ID of the attachment if the change event is associated with an attachment.

" @@ -2412,6 +2519,10 @@ "shape":"ConstrainedString", "documentation":"

The names of the segments in a core network.

" }, + "NetworkFunctionGroupName":{ + "shape":"ConstrainedString", + "documentation":"

The network function group name if the change event is associated with a network function group.

" + }, "EdgeLocations":{ "shape":"ExternalRegionCodeList", "documentation":"

The Regions where edges are located in a core network.

" @@ -2435,6 +2546,10 @@ "SharedSegments":{ "shape":"ConstrainedStringList", "documentation":"

The shared segments for a core network change value.

" + }, + "ServiceInsertionActions":{ + "shape":"ServiceInsertionActionList", + "documentation":"

Describes the service insertion action.

" } }, "documentation":"

Describes a core network change.

" @@ -2467,6 +2582,46 @@ "min":0, "pattern":"^core-network-([0-9a-f]{8,17})$" }, + "CoreNetworkNetworkFunctionGroup":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ConstrainedString", + "documentation":"

The name of the network function group.

" + }, + "EdgeLocations":{ + "shape":"ExternalRegionCodeList", + "documentation":"

The core network edge locations.

" + }, + "Segments":{ + "shape":"ServiceInsertionSegments", + "documentation":"

The segments associated with the network function group.

" + } + }, + "documentation":"

Describes a network function group.

" + }, + "CoreNetworkNetworkFunctionGroupIdentifier":{ + "type":"structure", + "members":{ + "CoreNetworkId":{ + "shape":"CoreNetworkId", + "documentation":"

The ID of the core network.

" + }, + "NetworkFunctionGroupName":{ + "shape":"ConstrainedString", + "documentation":"

The network function group name.

" + }, + "EdgeLocation":{ + "shape":"ExternalRegionCode", + "documentation":"

The location for the core network edge.

" + } + }, + "documentation":"

Describes a core network

" + }, + "CoreNetworkNetworkFunctionGroupList":{ + "type":"list", + "member":{"shape":"CoreNetworkNetworkFunctionGroup"} + }, "CoreNetworkPolicy":{ "type":"structure", "members":{ @@ -2738,7 +2893,7 @@ }, "CoreNetworkAddress":{ "shape":"IPAddress", - "documentation":"

A Connect peer core network address.

" + "documentation":"

A Connect peer core network address. This only applies only when the protocol is GRE.

" }, "PeerAddress":{ "shape":"IPAddress", @@ -2746,7 +2901,7 @@ }, "BgpOptions":{ "shape":"BgpOptions", - "documentation":"

The Connect peer BGP options.

" + "documentation":"

The Connect peer BGP options. This only applies only when the protocol is GRE.

" }, "InsideCidrBlocks":{ "shape":"ConstrainedStringList", @@ -2763,7 +2918,7 @@ }, "SubnetArn":{ "shape":"SubnetArn", - "documentation":"

The subnet ARN for the Connect peer.

" + "documentation":"

The subnet ARN for the Connect peer. This only applies only when the protocol is NO_ENCAP.

" } } }, @@ -3777,6 +3932,28 @@ } } }, + "EdgeOverride":{ + "type":"structure", + "members":{ + "EdgeSets":{ + "shape":"EdgeSetList", + "documentation":"

The list of edge locations.

" + }, + "UseEdge":{ + "shape":"ConstrainedString", + "documentation":"

The edge that should be used when overriding the current edge order.

" + } + }, + "documentation":"

Describes the edge that's used for the override.

" + }, + "EdgeSet":{ + "type":"list", + "member":{"shape":"ConstrainedString"} + }, + "EdgeSetList":{ + "type":"list", + "member":{"shape":"EdgeSet"} + }, "ExceptionContextKey":{"type":"string"}, "ExceptionContextMap":{ "type":"map", @@ -4333,7 +4510,7 @@ }, "ResourceType":{ "shape":"ConstrainedString", - "documentation":"

The resource type.

The following are the supported resource types for Direct Connect:

  • dxcon

  • dx-gateway

  • dx-vif

The following are the supported resource types for Network Manager:

  • connection

  • device

  • link

  • site

The following are the supported resource types for Amazon VPC:

  • customer-gateway

  • transit-gateway

  • transit-gateway-attachment

  • transit-gateway-connect-peer

  • transit-gateway-route-table

  • vpn-connection

", + "documentation":"

The resource type.

The following are the supported resource types for Direct Connect:

  • dxcon

  • dx-gateway

  • dx-vif

The following are the supported resource types for Network Manager:

  • attachment

  • connect-peer

  • connection

  • core-network

  • device

  • link

  • peering

  • site

The following are the supported resource types for Amazon VPC:

  • customer-gateway

  • transit-gateway

  • transit-gateway-attachment

  • transit-gateway-connect-peer

  • transit-gateway-route-table

  • vpn-connection

", "location":"querystring", "locationName":"resourceType" }, @@ -4400,7 +4577,7 @@ }, "ResourceType":{ "shape":"ConstrainedString", - "documentation":"

The resource type.

The following are the supported resource types for Direct Connect:

  • dxcon

  • dx-gateway

  • dx-vif

The following are the supported resource types for Network Manager:

  • connection

  • device

  • link

  • site

The following are the supported resource types for Amazon VPC:

  • customer-gateway

  • transit-gateway

  • transit-gateway-attachment

  • transit-gateway-connect-peer

  • transit-gateway-route-table

  • vpn-connection

", + "documentation":"

The resource type.

The following are the supported resource types for Direct Connect:

  • dxcon

  • dx-gateway

  • dx-vif

The following are the supported resource types for Network Manager:

  • attachment

  • connect-peer

  • connection

  • core-network

  • device

  • link

  • peering

  • site

The following are the supported resource types for Amazon VPC:

  • customer-gateway

  • transit-gateway

  • transit-gateway-attachment

  • transit-gateway-connect-peer

  • transit-gateway-route-table

  • vpn-connection

", "location":"querystring", "locationName":"resourceType" }, @@ -4473,7 +4650,7 @@ }, "ResourceType":{ "shape":"ConstrainedString", - "documentation":"

The resource type.

The following are the supported resource types for Direct Connect:

The following are the supported resource types for Network Manager:

  • connection - The definition model is Connection.

  • device - The definition model is Device.

  • link - The definition model is Link.

  • site - The definition model is Site.

The following are the supported resource types for Amazon VPC:

", + "documentation":"

The resource type.

The following are the supported resource types for Direct Connect:

  • dxcon

  • dx-gateway

  • dx-vif

The following are the supported resource types for Network Manager:

  • attachment

  • connect-peer

  • connection

  • core-network

  • device

  • link

  • peering

  • site

The following are the supported resource types for Amazon VPC:

  • customer-gateway

  • transit-gateway

  • transit-gateway-attachment

  • transit-gateway-connect-peer

  • transit-gateway-route-table

  • vpn-connection

", "location":"querystring", "locationName":"resourceType" }, @@ -4622,7 +4799,7 @@ }, "ResourceType":{ "shape":"ConstrainedString", - "documentation":"

The resource type.

The following are the supported resource types for Direct Connect:

  • dxcon

  • dx-gateway

  • dx-vif

The following are the supported resource types for Network Manager:

  • connection

  • device

  • link

  • site

The following are the supported resource types for Amazon VPC:

  • customer-gateway

  • transit-gateway

  • transit-gateway-attachment

  • transit-gateway-connect-peer

  • transit-gateway-route-table

  • vpn-connection

", + "documentation":"

The resource type. The following are the supported resource types:

  • connect-peer

  • transit-gateway-connect-peer

  • vpn-connection

", "location":"querystring", "locationName":"resourceType" }, @@ -5414,6 +5591,24 @@ "max":500, "min":1 }, + "NetworkFunctionGroup":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ConstrainedString", + "documentation":"

The name of the network function group.

" + } + }, + "documentation":"

Describes a network function group for service insertion.

" + }, + "NetworkFunctionGroupList":{ + "type":"list", + "member":{"shape":"NetworkFunctionGroup"} + }, + "NetworkFunctionGroupName":{ + "type":"string", + "pattern":"[\\s\\S]*" + }, "NetworkResource":{ "type":"structure", "members":{ @@ -5435,7 +5630,7 @@ }, "ResourceType":{ "shape":"ConstrainedString", - "documentation":"

The resource type.

The following are the supported resource types for Direct Connect:

  • dxcon

  • dx-gateway

  • dx-vif

The following are the supported resource types for Network Manager:

  • connection

  • device

  • link

  • site

The following are the supported resource types for Amazon VPC:

  • customer-gateway

  • transit-gateway

  • transit-gateway-attachment

  • transit-gateway-connect-peer

  • transit-gateway-route-table

  • vpn-connection

" + "documentation":"

The resource type.

The following are the supported resource types for Direct Connect:

  • dxcon

  • dx-gateway

  • dx-vif

The following are the supported resource types for Network Manager:

  • attachment

  • connect-peer

  • connection

  • core-network

  • device

  • link

  • peering

  • site

The following are the supported resource types for Amazon VPC:

  • customer-gateway

  • transit-gateway

  • transit-gateway-attachment

  • transit-gateway-connect-peer

  • transit-gateway-route-table

  • vpn-connection

" }, "ResourceId":{ "shape":"ConstrainedString", @@ -5562,6 +5757,10 @@ "shape":"ConstrainedString", "documentation":"

The name of the segment.

" }, + "NetworkFunctionGroupName":{ + "shape":"ConstrainedString", + "documentation":"

The network function group name associated with the destination.

" + }, "EdgeLocation":{ "shape":"ExternalRegionCode", "documentation":"

The edge location for the network destination.

" @@ -5734,10 +5933,57 @@ "CreatedAt":{ "shape":"DateTime", "documentation":"

The timestamp when the attachment peer was created.

" + }, + "LastModificationErrors":{ + "shape":"PeeringErrorList", + "documentation":"

Describes the error associated with the Connect peer request.

" } }, "documentation":"

Describes a peering connection.

" }, + "PeeringError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"PeeringErrorCode", + "documentation":"

The error code for the peering request.

" + }, + "Message":{ + "shape":"ServerSideString", + "documentation":"

The message associated with the error code.

" + }, + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the requested peering resource.

" + }, + "RequestId":{ + "shape":"ServerSideString", + "documentation":"

The ID of the Peering request.

" + }, + "MissingPermissionsContext":{ + "shape":"PermissionsErrorContext", + "documentation":"

Provides additional information about missing permissions for the peering error.

" + } + }, + "documentation":"

Describes an error associated with a peering request.

" + }, + "PeeringErrorCode":{ + "type":"string", + "enum":[ + "TRANSIT_GATEWAY_NOT_FOUND", + "TRANSIT_GATEWAY_PEERS_LIMIT_EXCEEDED", + "MISSING_PERMISSIONS", + "INTERNAL_ERROR", + "EDGE_LOCATION_PEER_DUPLICATE", + "INVALID_TRANSIT_GATEWAY_STATE" + ] + }, + "PeeringErrorList":{ + "type":"list", + "member":{"shape":"PeeringError"}, + "max":20, + "min":0 + }, "PeeringId":{ "type":"string", "max":50, @@ -5761,6 +6007,34 @@ "type":"string", "enum":["TRANSIT_GATEWAY"] }, + "PermissionsErrorContext":{ + "type":"structure", + "members":{ + "MissingPermission":{ + "shape":"ServerSideString", + "documentation":"

The missing permissions.

" + } + }, + "documentation":"

Describes additional information about missing permissions.

" + }, + "ProposedNetworkFunctionGroupChange":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

The list of proposed changes to the key-value tags associated with the network function group.

" + }, + "AttachmentPolicyRuleNumber":{ + "shape":"Integer", + "documentation":"

The proposed new attachment policy rule number for the network function group.

" + }, + "NetworkFunctionGroupName":{ + "shape":"ConstrainedString", + "documentation":"

The proposed name change for the network function group name.

" + } + }, + "documentation":"

Describes proposed changes to a network function group.

" + }, "ProposedSegmentChange":{ "type":"structure", "members":{ @@ -6165,6 +6439,10 @@ "CoreNetworkSegmentEdge":{ "shape":"CoreNetworkSegmentEdgeIdentifier", "documentation":"

The segment edge in a core network.

" + }, + "CoreNetworkNetworkFunctionGroup":{ + "shape":"CoreNetworkNetworkFunctionGroupIdentifier", + "documentation":"

The route table identifier associated with the network function group.

" } }, "documentation":"

Describes a route table.

" @@ -6173,7 +6451,8 @@ "type":"string", "enum":[ "TRANSIT_GATEWAY_ROUTE_TABLE", - "CORE_NETWORK_SEGMENT" + "CORE_NETWORK_SEGMENT", + "NETWORK_FUNCTION_GROUP" ] }, "RouteType":{ @@ -6192,12 +6471,66 @@ "max":50, "min":0 }, + "SegmentActionServiceInsertion":{ + "type":"string", + "enum":[ + "send-via", + "send-to" + ] + }, + "SendViaMode":{ + "type":"string", + "enum":[ + "dual-hop", + "single-hop" + ] + }, "ServerSideString":{ "type":"string", "max":10000000, "min":0, "pattern":"[\\s\\S]*" }, + "ServiceInsertionAction":{ + "type":"structure", + "members":{ + "Action":{ + "shape":"SegmentActionServiceInsertion", + "documentation":"

The action the service insertion takes for traffic. send-via sends east-west traffic between attachments. send-to sends north-south traffic to the security appliance, and then from that to either the Internet or to an on-premesis location.

" + }, + "Mode":{ + "shape":"SendViaMode", + "documentation":"

Describes the mode packets take for the send-via action. This is not used when the action is send-to. dual-hop packets traverse attachments in both the source to the destination core network edges. This mode requires that an inspection attachment must be present in all Regions of the service insertion-enabled segments. For single-hop, packets traverse a single intermediate inserted attachment. You can use EdgeOverride to specify a specific edge to use.

" + }, + "WhenSentTo":{ + "shape":"WhenSentTo", + "documentation":"

The list of destination segments if the service insertion action is send-via.

" + }, + "Via":{ + "shape":"Via", + "documentation":"

The list of network function groups and any edge overrides for the chosen service insertion action. Used for both send-to or send-via.

" + } + }, + "documentation":"

Describes the action that the service insertion will take for any segments associated with it.

" + }, + "ServiceInsertionActionList":{ + "type":"list", + "member":{"shape":"ServiceInsertionAction"} + }, + "ServiceInsertionSegments":{ + "type":"structure", + "members":{ + "SendVia":{ + "shape":"ConstrainedStringList", + "documentation":"

The list of segments associated with the send-via action.

" + }, + "SendTo":{ + "shape":"ConstrainedStringList", + "documentation":"

The list of segments associated with the send-to action.

" + } + }, + "documentation":"

Describes the segments associated with the service insertion action.

" + }, "ServiceQuotaExceededException":{ "type":"structure", "required":[ @@ -7007,6 +7340,20 @@ "Other" ] }, + "Via":{ + "type":"structure", + "members":{ + "NetworkFunctionGroups":{ + "shape":"NetworkFunctionGroupList", + "documentation":"

The list of network function groups associated with the service insertion action.

" + }, + "WithEdgeOverrides":{ + "shape":"WithEdgeOverridesList", + "documentation":"

Describes any edge overrides. An edge override is a specific edge to be used for traffic.

" + } + }, + "documentation":"

The list of network function groups and edge overrides for the service insertion action. Used for both the send-to and send-via actions.

" + }, "VpcArn":{ "type":"string", "max":500, @@ -7050,6 +7397,24 @@ "max":500, "min":0, "pattern":"^arn:[^:]{1,63}:ec2:[^:]{0,63}:[^:]{0,63}:vpn-connection\\/vpn-[0-9a-f]{8,17}$" + }, + "WhenSentTo":{ + "type":"structure", + "members":{ + "WhenSentToSegmentsList":{ + "shape":"WhenSentToSegmentsList", + "documentation":"

The list of destination segments when the service insertion action is send-to.

" + } + }, + "documentation":"

Displays a list of the destination segments. Used only when the service insertion action is send-to.

" + }, + "WhenSentToSegmentsList":{ + "type":"list", + "member":{"shape":"ConstrainedString"} + }, + "WithEdgeOverridesList":{ + "type":"list", + "member":{"shape":"EdgeOverride"} } }, "documentation":"

Amazon Web Services enables you to centrally manage your Amazon Web Services Cloud WAN core network and your Transit Gateway network across Amazon Web Services accounts, Regions, and on-premises locations.

" diff --git a/botocore/data/opensearch/2021-01-01/service-2.json b/botocore/data/opensearch/2021-01-01/service-2.json index 9e442ff3ea..733048f4c9 100644 --- a/botocore/data/opensearch/2021-01-01/service-2.json +++ b/botocore/data/opensearch/2021-01-01/service-2.json @@ -4,10 +4,12 @@ "apiVersion":"2021-01-01", "endpointPrefix":"es", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon OpenSearch Service", "serviceId":"OpenSearch", "signatureVersion":"v4", - "uid":"opensearch-2021-01-01" + "uid":"opensearch-2021-01-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptInboundConnection":{ @@ -1061,6 +1063,37 @@ } }, "shapes":{ + "AIMLOptionsInput":{ + "type":"structure", + "members":{ + "NaturalLanguageQueryGenerationOptions":{ + "shape":"NaturalLanguageQueryGenerationOptionsInput", + "documentation":"

Container for parameters required for natural language query generation on the specified domain.

" + } + }, + "documentation":"

Container for parameters required to enable all machine learning features.

" + }, + "AIMLOptionsOutput":{ + "type":"structure", + "members":{ + "NaturalLanguageQueryGenerationOptions":{ + "shape":"NaturalLanguageQueryGenerationOptionsOutput", + "documentation":"

Container for parameters required for natural language query generation on the specified domain.

" + } + }, + "documentation":"

Container for parameters representing the state of machine learning features on the specified domain.

" + }, + "AIMLOptionsStatus":{ + "type":"structure", + "members":{ + "Options":{ + "shape":"AIMLOptionsOutput", + "documentation":"

Machine learning options on the specified domain.

" + }, + "Status":{"shape":"OptionStatus"} + }, + "documentation":"

The status of machine learning options on the specified domain.

" + }, "ARN":{ "type":"string", "documentation":"

The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities in Using Amazon Web Services Identity and Access Management for more information.

", @@ -1281,6 +1314,10 @@ "shape":"SAMLOptionsOutput", "documentation":"

Container for information about the SAML configuration for OpenSearch Dashboards.

" }, + "JWTOptions":{ + "shape":"JWTOptionsOutput", + "documentation":"

Container for information about the JWT configuration of the Amazon OpenSearch Service.

" + }, "AnonymousAuthDisableDate":{ "shape":"DisableTimestamp", "documentation":"

Date and time when the migration period will be disabled. Only necessary when enabling fine-grained access control on an existing domain.

" @@ -1311,6 +1348,10 @@ "shape":"SAMLOptionsInput", "documentation":"

Container for information about the SAML configuration for OpenSearch Dashboards.

" }, + "JWTOptions":{ + "shape":"JWTOptionsInput", + "documentation":"

Container for information about the JWT configuration of the Amazon OpenSearch Service.

" + }, "AnonymousAuthEnabled":{ "shape":"Boolean", "documentation":"

True to enable a 30-day migration period during which administrators can create role mappings. Only necessary when enabling fine-grained access control on an existing domain.

" @@ -2145,6 +2186,10 @@ "SoftwareUpdateOptions":{ "shape":"SoftwareUpdateOptions", "documentation":"

Software update options for the domain.

" + }, + "AIMLOptions":{ + "shape":"AIMLOptionsInput", + "documentation":"

Options for all machine learning features for the specified domain.

" } } }, @@ -2321,6 +2366,10 @@ "Description":{ "shape":"DataSourceDescription", "documentation":"

A description of the data source.

" + }, + "Status":{ + "shape":"DataSourceStatus", + "documentation":"

The status of the data source.

" } }, "documentation":"

Details about a direct-query data source.

" @@ -2335,6 +2384,13 @@ "min":3, "pattern":"[a-z][a-z0-9_]+" }, + "DataSourceStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DISABLED" + ] + }, "DataSourceType":{ "type":"structure", "members":{ @@ -3186,6 +3242,10 @@ "ModifyingProperties":{ "shape":"ModifyingPropertiesList", "documentation":"

Information about the domain properties that are currently being modified.

" + }, + "AIMLOptions":{ + "shape":"AIMLOptionsStatus", + "documentation":"

Container for parameters required to enable all machine learning features.

" } }, "documentation":"

Container for the configuration of an OpenSearch Service domain.

" @@ -3500,7 +3560,7 @@ }, "DomainEndpointV2HostedZoneId":{ "shape":"HostedZoneId", - "documentation":"

The DualStack Hosted Zone Id for the domain.

" + "documentation":"

The dual stack hosted zone ID for the domain.

" }, "Processing":{ "shape":"Boolean", @@ -3593,6 +3653,10 @@ "ModifyingProperties":{ "shape":"ModifyingPropertiesList", "documentation":"

Information about the domain properties that are currently being modified.

" + }, + "AIMLOptions":{ + "shape":"AIMLOptionsOutput", + "documentation":"

Container for parameters required to enable all machine learning features.

" } }, "documentation":"

The current status of an OpenSearch Service domain.

" @@ -3889,6 +3953,10 @@ "Description":{ "shape":"DataSourceDescription", "documentation":"

A description of the data source.

" + }, + "Status":{ + "shape":"DataSourceStatus", + "documentation":"

The status of the data source.

" } }, "documentation":"

The result of a GetDataSource operation.

" @@ -4264,6 +4332,50 @@ "type":"list", "member":{"shape":"Issue"} }, + "JWTOptionsInput":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

True to enable JWT authentication and authorization for a domain.

" + }, + "SubjectKey":{ + "shape":"SubjectKey", + "documentation":"

Element of the JWT assertion to use for the user name.

" + }, + "RolesKey":{ + "shape":"RolesKey", + "documentation":"

Element of the JWT assertion to use for roles.

" + }, + "PublicKey":{ + "shape":"String", + "documentation":"

Element of the JWT assertion used by the cluster to verify JWT signatures.

" + } + }, + "documentation":"

The JWT authentication and authorization configuration for an Amazon OpenSearch Service domain.

" + }, + "JWTOptionsOutput":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

True if JWT use is enabled.

" + }, + "SubjectKey":{ + "shape":"String", + "documentation":"

The key used for matching the JWT subject attribute.

" + }, + "RolesKey":{ + "shape":"String", + "documentation":"

The key used for matching the JWT roles attribute.

" + }, + "PublicKey":{ + "shape":"String", + "documentation":"

The key used to verify the signature of incoming JWT requests.

" + } + }, + "documentation":"

Describes the JWT options configured for the domain.

" + }, "KmsKeyId":{ "type":"string", "max":500, @@ -4865,6 +4977,49 @@ "type":"list", "member":{"shape":"ModifyingProperties"} }, + "NaturalLanguageQueryGenerationCurrentState":{ + "type":"string", + "enum":[ + "NOT_ENABLED", + "ENABLE_COMPLETE", + "ENABLE_IN_PROGRESS", + "ENABLE_FAILED", + "DISABLE_COMPLETE", + "DISABLE_IN_PROGRESS", + "DISABLE_FAILED" + ] + }, + "NaturalLanguageQueryGenerationDesiredState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "NaturalLanguageQueryGenerationOptionsInput":{ + "type":"structure", + "members":{ + "DesiredState":{ + "shape":"NaturalLanguageQueryGenerationDesiredState", + "documentation":"

The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.

" + } + }, + "documentation":"

Container for parameters required to enable the natural language query generation feature.

" + }, + "NaturalLanguageQueryGenerationOptionsOutput":{ + "type":"structure", + "members":{ + "DesiredState":{ + "shape":"NaturalLanguageQueryGenerationDesiredState", + "documentation":"

The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.

" + }, + "CurrentState":{ + "shape":"NaturalLanguageQueryGenerationCurrentState", + "documentation":"

The current state of the natural language query generation feature, indicating completion, in progress, or failure.

" + } + }, + "documentation":"

Container for parameters representing the state of the natural language query generation feature on the specified domain.

" + }, "NextToken":{ "type":"string", "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" @@ -5688,6 +5843,11 @@ "min":20, "pattern":"arn:(aws|aws\\-cn|aws\\-us\\-gov|aws\\-iso|aws\\-iso\\-b):iam::[0-9]+:role\\/.*" }, + "RolesKey":{ + "type":"string", + "max":64, + "min":1 + }, "RollbackOnDisable":{ "type":"string", "documentation":"

The rollback state while disabling Auto-Tune for the domain.

", @@ -6160,6 +6320,11 @@ "type":"list", "member":{"shape":"String"} }, + "SubjectKey":{ + "type":"string", + "max":64, + "min":1 + }, "TLSSecurityPolicy":{ "type":"string", "enum":[ @@ -6243,6 +6408,10 @@ "Description":{ "shape":"DataSourceDescription", "documentation":"

A new description of the data source.

" + }, + "Status":{ + "shape":"DataSourceStatus", + "documentation":"

The status of the data source update.

" } }, "documentation":"

Container for the parameters to the UpdateDataSource operation.

" @@ -6338,6 +6507,10 @@ "SoftwareUpdateOptions":{ "shape":"SoftwareUpdateOptions", "documentation":"

Service software update options for the domain.

" + }, + "AIMLOptions":{ + "shape":"AIMLOptionsInput", + "documentation":"

Options for all machine learning features for the specified domain.

" } }, "documentation":"

Container for the request parameters to the UpdateDomain operation.

" diff --git a/botocore/data/opsworks/2013-02-18/endpoint-rule-set-1.json b/botocore/data/opsworks/2013-02-18/endpoint-rule-set-1.json index 3c6f5315cd..3d8ec45721 100644 --- a/botocore/data/opsworks/2013-02-18/endpoint-rule-set-1.json +++ b/botocore/data/opsworks/2013-02-18/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/opsworks/2013-02-18/service-2.json b/botocore/data/opsworks/2013-02-18/service-2.json index 68e9434a8d..72dfc5e47c 100644 --- a/botocore/data/opsworks/2013-02-18/service-2.json +++ b/botocore/data/opsworks/2013-02-18/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"opsworks", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS OpsWorks", "serviceId":"OpsWorks", "signatureVersion":"v4", @@ -23,7 +24,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Assign a registered instance to a layer.

  • You can assign registered on-premises instances to any layer type.

  • You can assign registered Amazon EC2 instances only to custom layers.

  • You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an AWS Identity and Access Management (IAM) user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Assign a registered instance to a layer.

  • You can assign registered on-premises instances to any layer type.

  • You can assign registered Amazon EC2 instances only to custom layers.

  • You cannot use this action with instances that were created with OpsWorks Stacks.

Required Permissions: To use this action, an Identity and Access Management (IAM) user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" }, "AssignVolume":{ "name":"AssignVolume", @@ -62,7 +63,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Attaches an Elastic Load Balancing load balancer to a specified layer. AWS OpsWorks Stacks does not support Application Load Balancer. You can only use Classic Load Balancer with AWS OpsWorks Stacks. For more information, see Elastic Load Balancing.

You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For more information, see Elastic Load Balancing Developer Guide.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Attaches an Elastic Load Balancing load balancer to a specified layer. OpsWorks Stacks does not support Application Load Balancer. You can only use Classic Load Balancer with OpsWorks Stacks. For more information, see Elastic Load Balancing.

You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For more information, see the Elastic Load Balancing Developer Guide.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" }, "CloneStack":{ "name":"CloneStack", @@ -249,7 +250,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deregisters a specified Elastic IP address. The address can then be registered by another stack. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Deregisters a specified Elastic IP address. The address can be registered by another stack after it is deregistered. For more information, see Resource Management.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" }, "DeregisterInstance":{ "name":"DeregisterInstance", @@ -262,7 +263,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deregister a registered Amazon EC2 or on-premises instance. This action removes the instance from the stack and returns it to your control. This action cannot be used with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Deregister an instance from OpsWorks Stacks. The instance can be a registered instance (Amazon EC2 or on-premises) or an instance created with OpsWorks. This action removes the instance from the stack and returns it to your control.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" }, "DeregisterRdsDbInstance":{ "name":"DeregisterRdsDbInstance", @@ -302,7 +303,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the available AWS OpsWorks Stacks agent versions. You must specify a stack ID or a configuration manager. DescribeAgentVersions returns a list of available agent versions for the specified stack or configuration manager.

" + "documentation":"

Describes the available OpsWorks Stacks agent versions. You must specify a stack ID or a configuration manager. DescribeAgentVersions returns a list of available agent versions for the specified stack or configuration manager.

" }, "DescribeApps":{ "name":"DescribeApps", @@ -358,7 +359,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, AWS OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

" + "documentation":"

Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

" }, "DescribeElasticIps":{ "name":"DescribeElasticIps", @@ -446,7 +447,7 @@ "requestUri":"/" }, "output":{"shape":"DescribeOperatingSystemsResponse"}, - "documentation":"

Describes the operating systems that are supported by AWS OpsWorks Stacks.

" + "documentation":"

Describes the operating systems that are supported by OpsWorks Stacks.

" }, "DescribePermissions":{ "name":"DescribePermissions", @@ -502,7 +503,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes AWS OpsWorks Stacks service errors.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

" + "documentation":"

Describes OpsWorks Stacks service errors.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

" }, "DescribeStackProvisioningParameters":{ "name":"DescribeStackProvisioningParameters", @@ -708,7 +709,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Registers instances that were created outside of AWS OpsWorks Stacks with a specified stack.

We do not recommend using this action to register instances. The complete registration operation includes two tasks: installing the AWS OpsWorks Stacks agent on the instance, and registering the instance with the stack. RegisterInstance handles only the second step. You should instead use the AWS CLI register command, which performs the entire registration operation. For more information, see Registering an Instance with an AWS OpsWorks Stacks Stack.

Registered instances have the same requirements as instances that are created by using the CreateInstance API. For example, registered instances must be running a supported Linux-based operating system, and they must have a supported instance type. For more information about requirements for instances that you want to register, see Preparing the Instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Registers instances that were created outside of OpsWorks Stacks with a specified stack.

We do not recommend using this action to register instances. The complete registration operation includes two tasks: installing the OpsWorks Stacks agent on the instance, and registering the instance with the stack. RegisterInstance handles only the second step. You should instead use the CLI register command, which performs the entire registration operation. For more information, see Registering an Instance with an OpsWorks Stacks Stack.

Registered instances have the same requirements as instances that are created by using the CreateInstance API. For example, registered instances must be running a supported Linux-based operating system, and they must have a supported instance type. For more information about requirements for instances that you want to register, see Preparing the Instance.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" }, "RegisterRdsDbInstance":{ "name":"RegisterRdsDbInstance", @@ -839,7 +840,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks Stacks. For more information about how tagging works, see Tags in the AWS OpsWorks User Guide.

" + "documentation":"

Apply cost-allocation tags to a specified stack or layer in OpsWorks Stacks. For more information about how tagging works, see Tags in the OpsWorks User Guide.

" }, "UnassignInstance":{ "name":"UnassignInstance", @@ -852,7 +853,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Unassigns a registered instance from all layers that are using the instance. The instance remains in the stack as an unassigned instance, and can be assigned to another layer as needed. You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" + "documentation":"

Unassigns a registered instance from all layers that are using the instance. The instance remains in the stack as an unassigned instance, and can be assigned to another layer as needed. You cannot use this action with instances that were created with OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "UnassignVolume":{ "name":"UnassignVolume", @@ -1190,7 +1191,7 @@ }, "IgnoreMetricsTime":{ "shape":"Minute", - "documentation":"

The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks Stacks should ignore metrics and suppress additional scaling events. For example, AWS OpsWorks Stacks adds new instances following an upscaling event but the instances won't start reducing the load until they have been booted and configured. There is no point in raising additional scaling events during that operation, which typically takes several minutes. IgnoreMetricsTime allows you to direct AWS OpsWorks Stacks to suppress scaling events long enough to get the new instances online.

" + "documentation":"

The amount of time (in minutes) after a scaling event occurs that OpsWorks Stacks should ignore metrics and suppress additional scaling events. For example, OpsWorks Stacks adds new instances following an upscaling event but the instances won't start reducing the load until they have been booted and configured. There is no point in raising additional scaling events during that operation, which typically takes several minutes. IgnoreMetricsTime allows you to direct OpsWorks Stacks to suppress scaling events long enough to get the new instances online.

" }, "CpuThreshold":{ "shape":"Double", @@ -1206,10 +1207,10 @@ }, "Alarms":{ "shape":"Strings", - "documentation":"

Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack.

To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. You can either have AWS OpsWorks Stacks update the role for you when you first use this feature or you can edit the role manually. For more information, see Allowing AWS OpsWorks Stacks to Act on Your Behalf.

" + "documentation":"

Custom CloudWatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack.

To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. You can either have OpsWorks Stacks update the role for you when you first use this feature or you can edit the role manually. For more information, see Allowing OpsWorks Stacks to Act on Your Behalf.

" } }, - "documentation":"

Describes a load-based auto scaling upscaling or downscaling threshold configuration, which specifies when AWS OpsWorks Stacks starts or stops load-based instances.

" + "documentation":"

Describes a load-based auto scaling upscaling or downscaling threshold configuration, which specifies when OpsWorks Stacks starts or stops load-based instances.

" }, "AutoScalingType":{ "type":"string", @@ -1223,7 +1224,7 @@ "members":{ "DeviceName":{ "shape":"String", - "documentation":"

The device name that is exposed to the instance, such as /dev/sdh. For the root device, you can use the explicit device name or you can set this parameter to ROOT_DEVICE and AWS OpsWorks Stacks will provide the correct device name.

" + "documentation":"

The device name that is exposed to the instance, such as /dev/sdh. For the root device, you can use the explicit device name or you can set this parameter to ROOT_DEVICE and OpsWorks Stacks will provide the correct device name.

" }, "NoDevice":{ "shape":"String", @@ -1275,15 +1276,15 @@ }, "Name":{ "shape":"String", - "documentation":"

The cloned stack name.

" + "documentation":"

The cloned stack name. Stack names can be a maximum of 64 characters.

" }, "Region":{ "shape":"String", - "documentation":"

The cloned stack AWS region, such as \"ap-northeast-2\". For more information about AWS regions, see Regions and Endpoints.

" + "documentation":"

The cloned stack Amazon Web Services Region, such as ap-northeast-2. For more information about Amazon Web Services Regions, see Regions and Endpoints.

" }, "VpcId":{ "shape":"String", - "documentation":"

The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later.

  • If your account supports EC2 Classic, the default value is no VPC.

  • If your account does not support EC2 Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2 Classic, see Supported Platforms.

" + "documentation":"

The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later.

  • If your account supports EC2 Classic, the default value is no VPC.

  • If your account does not support EC2 Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information about how to use OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2 Classic, see Supported Platforms.

" }, "Attributes":{ "shape":"StackAttributes", @@ -1291,7 +1292,7 @@ }, "ServiceRoleArn":{ "shape":"String", - "documentation":"

The stack AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. If you create a stack by using the AWS OpsWorks Stacks console, it creates the role for you. You can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. For more information about IAM ARNs, see Using Identifiers.

You must set this parameter to a valid service role ARN or the action will fail; there is no default value. You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly.

" + "documentation":"

The stack Identity and Access Management (IAM) role, which allows OpsWorks Stacks to work with Amazon Web Services resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. If you create a stack by using the OpsWorkss Stacks console, it creates the role for you. You can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. For more information about IAM ARNs, see Using Identifiers.

You must set this parameter to a valid service role ARN or the action will fail; there is no default value. You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly.

" }, "DefaultInstanceProfileArn":{ "shape":"String", @@ -1299,7 +1300,7 @@ }, "DefaultOs":{ "shape":"String", - "documentation":"

The stack's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the parent stack's operating system. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux.

" + "documentation":"

The stack's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the parent stack's operating system. Not all operating systems are supported with all versions of Chef. For more information about supported operating systems, see OpsWorks Stacks Operating Systems.

You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux.

" }, "HostnameTheme":{ "shape":"String", @@ -1331,7 +1332,7 @@ }, "UseOpsworksSecurityGroups":{ "shape":"Boolean", - "documentation":"

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

  • True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot delete the built-in security group.

  • False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon EC2) security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

" + "documentation":"

Whether to associate the OpsWorks Stacks built-in security groups with the stack's layers.

OpsWorks Stacks provides a standard set of security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

  • True - OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot delete the built-in security group.

  • False - OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate Amazon EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

" }, "CustomCookbooksSource":{ "shape":"Source", @@ -1339,7 +1340,7 @@ }, "DefaultSshKeyName":{ "shape":"String", - "documentation":"

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

" + "documentation":"

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

" }, "ClonePermissions":{ "shape":"Boolean", @@ -1355,7 +1356,7 @@ }, "AgentVersion":{ "shape":"String", - "documentation":"

The default AWS OpsWorks Stacks agent version. You have the following options:

  • Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available.

  • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the stack's instances.

The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

" + "documentation":"

The default OpsWorks Stacks agent version. You have the following options:

  • Auto-update - Set this parameter to LATEST. OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available.

  • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. OpsWorks Stacks automatically installs that version on the stack's instances.

The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

" } } }, @@ -1381,7 +1382,7 @@ "documentation":"

A list of configuration options for CloudWatch Logs.

" } }, - "documentation":"

Describes the Amazon CloudWatch logs configuration for a layer.

" + "documentation":"

Describes the Amazon CloudWatch Logs configuration for a layer.

" }, "CloudWatchLogsEncoding":{ "type":"string", @@ -1537,12 +1538,12 @@ "documentation":"

Specifies the maximum size of log events in a batch, in bytes, up to 1048576 bytes. The default value is 32768 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.

" } }, - "documentation":"

Describes the Amazon CloudWatch logs configuration for a layer. For detailed information about members of this data type, see the CloudWatch Logs Agent Reference.

" + "documentation":"

Describes the CloudWatch Logs configuration for a layer. For detailed information about members of this data type, see the CloudWatch Logs Agent Reference.

" }, "CloudWatchLogsLogStreams":{ "type":"list", "member":{"shape":"CloudWatchLogsLogStream"}, - "documentation":"

Describes the Amazon CloudWatch logs configuration for a layer.

" + "documentation":"

Describes the Amazon CloudWatch Logs configuration for a layer.

" }, "CloudWatchLogsTimeZone":{ "type":"string", @@ -1632,7 +1633,7 @@ }, "Type":{ "shape":"AppType", - "documentation":"

The app type. Each supported type is associated with a particular layer. For example, PHP applications are associated with a PHP layer. AWS OpsWorks Stacks deploys an application to those instances that are members of the corresponding layer. If your app isn't one of the standard types, or you prefer to implement your own Deploy recipes, specify other.

" + "documentation":"

The app type. Each supported type is associated with a particular layer. For example, PHP applications are associated with a PHP layer. OpsWorks Stacks deploys an application to those instances that are members of the corresponding layer. If your app isn't one of the standard types, or you prefer to implement your own Deploy recipes, specify other.

" }, "AppSource":{ "shape":"Source", @@ -1743,11 +1744,11 @@ }, "Hostname":{ "shape":"String", - "documentation":"

The instance host name.

" + "documentation":"

The instance host name. The following are character limits for instance host names.

  • Linux-based instances: 63 characters

  • Windows-based instances: 15 characters

" }, "Os":{ "shape":"String", - "documentation":"

The instance's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom.

For more information about the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information about supported operating systems, see Operating SystemsFor more information about how to use custom AMIs with AWS OpsWorks Stacks, see Using Custom AMIs.

" + "documentation":"

The instance's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom.

Not all operating systems are supported with all versions of Chef. For more information about the supported operating systems, see OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information about how to use custom AMIs with OpsWorks Stacks, see Using Custom AMIs.

" }, "AmiId":{ "shape":"String", @@ -1767,7 +1768,7 @@ }, "SubnetId":{ "shape":"String", - "documentation":"

The ID of the instance's subnet. If the stack is running in a VPC, you can use this parameter to override the stack's default subnet ID value and direct AWS OpsWorks Stacks to launch the instance in a different subnet.

" + "documentation":"

The ID of the instance's subnet. If the stack is running in a VPC, you can use this parameter to override the stack's default subnet ID value and direct OpsWorks Stacks to launch the instance in a different subnet.

" }, "Architecture":{ "shape":"Architecture", @@ -1791,7 +1792,7 @@ }, "AgentVersion":{ "shape":"String", - "documentation":"

The default AWS OpsWorks Stacks agent version. You have the following options:

  • INHERIT - Use the stack's default agent version setting.

  • version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, edit the instance configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the instance.

The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

" + "documentation":"

The default OpsWorks Stacks agent version. You have the following options:

  • INHERIT - Use the stack's default agent version setting.

  • version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, edit the instance configuration and specify a new version. OpsWorks Stacks installs that version on the instance.

The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

" }, "Tenancy":{ "shape":"String", @@ -1828,11 +1829,11 @@ }, "Name":{ "shape":"String", - "documentation":"

The layer name, which is used by the console.

" + "documentation":"

The layer name, which is used by the console. Layer names can be a maximum of 32 characters.

" }, "Shortname":{ "shape":"String", - "documentation":"

For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef recipes. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters, which are limited to the alphanumeric characters, '-', '_', and '.'.

The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference.

" + "documentation":"

For custom layers only, use this parameter to specify the layer's short name, which is used internally by OpsWorks Stacks and by Chef recipes. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 32 characters, which are limited to the alphanumeric characters, '-', '_', and '.'.

Built-in layer short names are defined by OpsWorks Stacks. For more information, see the Layer Reference.

" }, "Attributes":{ "shape":"LayerAttributes", @@ -1848,7 +1849,7 @@ }, "CustomJson":{ "shape":"String", - "documentation":"

A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON. This feature is supported as of version 1.7.42 of the AWS CLI.

" + "documentation":"

A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON. This feature is supported as of version 1.7.42 of the CLI.

" }, "CustomSecurityGroupIds":{ "shape":"Strings", @@ -1913,15 +1914,15 @@ "members":{ "Name":{ "shape":"String", - "documentation":"

The stack name.

" + "documentation":"

The stack name. Stack names can be a maximum of 64 characters.

" }, "Region":{ "shape":"String", - "documentation":"

The stack's AWS region, such as ap-south-1. For more information about Amazon regions, see Regions and Endpoints.

In the AWS CLI, this API maps to the --stack-region parameter. If the --stack-region parameter and the AWS CLI common parameter --region are set to the same value, the stack uses a regional endpoint. If the --stack-region parameter is not set, but the AWS CLI --region parameter is, this also results in a stack with a regional endpoint. However, if the --region parameter is set to us-east-1, and the --stack-region parameter is set to one of the following, then the stack uses a legacy or classic region: us-west-1, us-west-2, sa-east-1, eu-central-1, eu-west-1, ap-northeast-1, ap-southeast-1, ap-southeast-2. In this case, the actual API endpoint of the stack is in us-east-1. Only the preceding regions are supported as classic regions in the us-east-1 API endpoint. Because it is a best practice to choose the regional endpoint that is closest to where you manage AWS, we recommend that you use regional endpoints for new stacks. The AWS CLI common --region parameter always specifies a regional API endpoint; it cannot be used to specify a classic AWS OpsWorks Stacks region.

" + "documentation":"

The stack's Amazon Web Services Region, such as ap-south-1. For more information about Amazon Web Services Regions, see Regions and Endpoints.

In the CLI, this API maps to the --stack-region parameter. If the --stack-region parameter and the CLI common parameter --region are set to the same value, the stack uses a regional endpoint. If the --stack-region parameter is not set, but the CLI --region parameter is, this also results in a stack with a regional endpoint. However, if the --region parameter is set to us-east-1, and the --stack-region parameter is set to one of the following, then the stack uses a legacy or classic region: us-west-1, us-west-2, sa-east-1, eu-central-1, eu-west-1, ap-northeast-1, ap-southeast-1, ap-southeast-2. In this case, the actual API endpoint of the stack is in us-east-1. Only the preceding regions are supported as classic regions in the us-east-1 API endpoint. Because it is a best practice to choose the regional endpoint that is closest to where you manage Amazon Web Services, we recommend that you use regional endpoints for new stacks. The CLI common --region parameter always specifies a regional API endpoint; it cannot be used to specify a classic OpsWorks Stacks region.

" }, "VpcId":{ "shape":"String", - "documentation":"

The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later.

  • If your account supports EC2-Classic, the default value is no VPC.

  • If your account does not support EC2-Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2-Classic, see Supported Platforms.

" + "documentation":"

The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later.

  • If your account supports EC2-Classic, the default value is no VPC.

  • If your account does not support EC2-Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information about how to use OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2-Classic, see Supported Platforms.

" }, "Attributes":{ "shape":"StackAttributes", @@ -1929,7 +1930,7 @@ }, "ServiceRoleArn":{ "shape":"String", - "documentation":"

The stack's AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. For more information about IAM ARNs, see Using Identifiers.

" + "documentation":"

The stack's IAM role, which allows OpsWorks Stacks to work with Amazon Web Services resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. For more information about IAM ARNs, see Using Identifiers.

" }, "DefaultInstanceProfileArn":{ "shape":"String", @@ -1937,7 +1938,7 @@ }, "DefaultOs":{ "shape":"String", - "documentation":"

The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs.

The default option is the current Amazon Linux version. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

" + "documentation":"

The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs.

The default option is the current Amazon Linux version. Not all operating systems are supported with all versions of Chef. For more information about supported operating systems, see OpsWorks Stacks Operating Systems.

" }, "HostnameTheme":{ "shape":"String", @@ -1969,7 +1970,7 @@ }, "UseOpsworksSecurityGroups":{ "shape":"Boolean", - "documentation":"

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

  • True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

  • False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

" + "documentation":"

Whether to associate the OpsWorks Stacks built-in security groups with the stack's layers.

OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

  • True - OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

  • False - OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

" }, "CustomCookbooksSource":{ "shape":"Source", @@ -1977,7 +1978,7 @@ }, "DefaultSshKeyName":{ "shape":"String", - "documentation":"

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

" + "documentation":"

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

" }, "DefaultRootDeviceType":{ "shape":"RootDeviceType", @@ -1985,7 +1986,7 @@ }, "AgentVersion":{ "shape":"String", - "documentation":"

The default AWS OpsWorks Stacks agent version. You have the following options:

  • Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available.

  • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the stack's instances.

The default setting is the most recent release of the agent. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

" + "documentation":"

The default OpsWorks Stacks agent version. You have the following options:

  • Auto-update - Set this parameter to LATEST. OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available.

  • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. OpsWorks Stacks installs that version on the stack's instances.

The default setting is the most recent release of the agent. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

" } } }, @@ -2009,7 +2010,7 @@ }, "SshUsername":{ "shape":"String", - "documentation":"

The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, AWS OpsWorks Stacks removes them. For example, my.name will be changed to myname. If you do not specify an SSH user name, AWS OpsWorks Stacks generates one from the IAM user name.

" + "documentation":"

The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, OpsWorks Stacks removes them. For example, my.name is changed to myname. If you do not specify an SSH user name, OpsWorks Stacks generates one from the IAM user name.

" }, "SshPublicKey":{ "shape":"String", @@ -2177,11 +2178,11 @@ "members":{ "Name":{ "shape":"DeploymentCommandName", - "documentation":"

Specifies the operation. You can specify only one command.

For stacks, the following commands are available:

  • execute_recipes: Execute one or more recipes. To specify the recipes, set an Args parameter named recipes to the list of recipes to be executed. For example, to execute phpapp::appsetup, set Args to {\"recipes\":[\"phpapp::appsetup\"]}.

  • install_dependencies: Install the stack's dependencies.

  • update_custom_cookbooks: Update the stack's custom cookbooks.

  • update_dependencies: Update the stack's dependencies.

The update_dependencies and install_dependencies commands are supported only for Linux instances. You can run the commands successfully on Windows instances, but they do nothing.

For apps, the following commands are available:

  • deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter named migrate. Set Args to {\"migrate\":[\"true\"]} to migrate the database. The default setting is {\"migrate\":[\"false\"]}.

  • rollback Roll the app back to the previous version. When you update an app, AWS OpsWorks Stacks stores the previous version, up to a maximum of five versions. You can use this command to roll an app back as many as four versions.

  • start: Start the app's web or application server.

  • stop: Stop the app's web or application server.

  • restart: Restart the app's web or application server.

  • undeploy: Undeploy the app.

" + "documentation":"

Specifies the operation. You can specify only one command.

For stacks, the following commands are available:

  • execute_recipes: Execute one or more recipes. To specify the recipes, set an Args parameter named recipes to the list of recipes to be executed. For example, to execute phpapp::appsetup, set Args to {\"recipes\":[\"phpapp::appsetup\"]}.

  • install_dependencies: Install the stack's dependencies.

  • update_custom_cookbooks: Update the stack's custom cookbooks.

  • update_dependencies: Update the stack's dependencies.

The update_dependencies and install_dependencies commands are supported only for Linux instances. You can run the commands successfully on Windows instances, but they do nothing.

For apps, the following commands are available:

  • deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter named migrate. Set Args to {\"migrate\":[\"true\"]} to migrate the database. The default setting is {\"migrate\":[\"false\"]}.

  • rollback Roll the app back to the previous version. When you update an app, OpsWorks Stacks stores the previous version, up to a maximum of five versions. You can use this command to roll an app back as many as four versions.

  • start: Start the app's web or application server.

  • stop: Stop the app's web or application server.

  • restart: Restart the app's web or application server.

  • undeploy: Undeploy the app.

" }, "Args":{ "shape":"DeploymentCommandArgs", - "documentation":"

The arguments of those commands that take arguments. It should be set to a JSON object with the following format:

{\"arg_name1\" : [\"value1\", \"value2\", ...], \"arg_name2\" : [\"value1\", \"value2\", ...], ...}

The update_dependencies command takes two arguments:

  • upgrade_os_to - Specifies the desired Amazon Linux version for instances whose OS you want to upgrade, such as Amazon Linux 2016.09. You must also set the allow_reboot argument to true.

  • allow_reboot - Specifies whether to allow AWS OpsWorks Stacks to reboot the instances if necessary, after installing the updates. This argument can be set to either true or false. The default value is false.

For example, to upgrade an instance to Amazon Linux 2016.09, set Args to the following.

{ \"upgrade_os_to\":[\"Amazon Linux 2016.09\"], \"allow_reboot\":[\"true\"] }

" + "documentation":"

The arguments of those commands that take arguments. It should be set to a JSON object with the following format:

{\"arg_name1\" : [\"value1\", \"value2\", ...], \"arg_name2\" : [\"value1\", \"value2\", ...], ...}

The update_dependencies command takes two arguments:

  • upgrade_os_to - Specifies the Amazon Linux version that you want instances to run, such as Amazon Linux 2. You must also set the allow_reboot argument to true.

  • allow_reboot - Specifies whether to allow OpsWorks Stacks to reboot the instances if necessary, after installing the updates. This argument can be set to either true or false. The default value is false.

For example, to upgrade an instance to Amazon Linux 2018.03, set Args to the following.

{ \"upgrade_os_to\":[\"Amazon Linux 2018.03\"], \"allow_reboot\":[\"true\"] }

" } }, "documentation":"

Used to specify a stack or deployment command.

" @@ -2258,7 +2259,7 @@ "members":{ "VolumeId":{ "shape":"String", - "documentation":"

The AWS OpsWorks Stacks volume ID, which is the GUID that AWS OpsWorks Stacks assigned to the instance when you registered the volume with the stack, not the Amazon EC2 volume ID.

" + "documentation":"

The OpsWorks Stacks volume ID, which is the GUID that OpsWorks Stacks assigned to the instance when you registered the volume with the stack, not the Amazon EC2 volume ID.

" } } }, @@ -2653,7 +2654,7 @@ "members":{ "AgentInstallerUrl":{ "shape":"String", - "documentation":"

The AWS OpsWorks Stacks agent installer's URL.

" + "documentation":"

The OpsWorks Stacks agent installer's URL.

" }, "Parameters":{ "shape":"Parameters", @@ -2687,7 +2688,7 @@ "members":{ "StackIds":{ "shape":"Strings", - "documentation":"

An array of stack IDs that specify the stacks to be described. If you omit this parameter, DescribeStacks returns a description of every stack.

" + "documentation":"

An array of stack IDs that specify the stacks to be described. If you omit this parameter, and have permissions to get information about all stacks, DescribeStacks returns a description of every stack. If the IAM policy that is attached to an IAM user limits the DescribeStacks action to specific stack ARNs, this parameter is required, and the user must specify a stack ARN that is allowed by the policy. Otherwise, DescribeStacks returns an AccessDenied error.

" } } }, @@ -2819,7 +2820,7 @@ }, "VolumeType":{ "shape":"VolumeType", - "documentation":"

The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for Throughput Optimized hard disk drives (HDD), sc1 for Cold HDD,and standard for Magnetic volumes.

If you specify the io1 volume type, you must also specify a value for the Iops attribute. The maximum ratio of provisioned IOPS to requested volume size (in GiB) is 50:1. AWS uses the default volume size (in GiB) specified in the AMI attributes to set IOPS to 50 x (volume size).

" + "documentation":"

The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for Throughput Optimized hard disk drives (HDD), sc1 for Cold HDD,and standard for Magnetic volumes.

If you specify the io1 volume type, you must also specify a value for the Iops attribute. The maximum ratio of provisioned IOPS to requested volume size (in GiB) is 50:1. Amazon Web Services uses the default volume size (in GiB) specified in the AMI attributes to set IOPS to 50 x (volume size).

" }, "DeleteOnTermination":{ "shape":"Boolean", @@ -2863,7 +2864,7 @@ }, "Name":{ "shape":"String", - "documentation":"

The name.

" + "documentation":"

The name, which can be a maximum of 32 characters.

" }, "Domain":{ "shape":"String", @@ -2871,7 +2872,7 @@ }, "Region":{ "shape":"String", - "documentation":"

The AWS region. For more information, see Regions and Endpoints.

" + "documentation":"

The Amazon Web Services Region. For more information, see Regions and Endpoints.

" }, "InstanceId":{ "shape":"String", @@ -2889,11 +2890,11 @@ "members":{ "ElasticLoadBalancerName":{ "shape":"String", - "documentation":"

The Elastic Load Balancing instance's name.

" + "documentation":"

The Elastic Load Balancing instance name.

" }, "Region":{ "shape":"String", - "documentation":"

The instance's AWS region.

" + "documentation":"

The instance's Amazon Web Services Region.

" }, "DnsName":{ "shape":"String", @@ -2901,11 +2902,11 @@ }, "StackId":{ "shape":"String", - "documentation":"

The ID of the stack that the instance is associated with.

" + "documentation":"

The ID of the stack with which the instance is associated.

" }, "LayerId":{ "shape":"String", - "documentation":"

The ID of the layer that the instance is attached to.

" + "documentation":"

The ID of the layer to which the instance is attached.

" }, "VpcId":{ "shape":"String", @@ -2921,7 +2922,7 @@ }, "Ec2InstanceIds":{ "shape":"Strings", - "documentation":"

A list of the EC2 instances that the Elastic Load Balancing instance is managing traffic for.

" + "documentation":"

A list of the EC2 instances for which the Elastic Load Balancing instance is managing traffic.

" } }, "documentation":"

Describes an Elastic Load Balancing instance.

" @@ -2947,7 +2948,7 @@ }, "Secure":{ "shape":"Boolean", - "documentation":"

(Optional) Whether the variable's value will be returned by the DescribeApps action. To conceal an environment variable's value, set Secure to true. DescribeApps then returns *****FILTERED***** instead of the actual value. The default value for Secure is false.

" + "documentation":"

(Optional) Whether the variable's value is returned by the DescribeApps action. To hide an environment variable's value, set Secure to true. DescribeApps returns *****FILTERED***** instead of the actual value. The default value for Secure is false.

" } }, "documentation":"

Represents an app's environment variable.

" @@ -2986,11 +2987,11 @@ "members":{ "InstanceId":{ "shape":"String", - "documentation":"

The instance's AWS OpsWorks Stacks ID.

" + "documentation":"

The instance's OpsWorks Stacks ID.

" }, "ValidForInMinutes":{ "shape":"ValidForInMinutes", - "documentation":"

The length of time (in minutes) that the grant is valid. When the grant expires at the end of this period, the user will no longer be able to use the credentials to log in. If the user is logged in at the time, he or she automatically will be logged out.

" + "documentation":"

The length of time (in minutes) that the grant is valid. When the grant expires at the end of this period, the user will no longer be able to use the credentials to log in. If the user is logged in at the time, they are logged out.

" } } }, @@ -3058,11 +3059,11 @@ }, "ElasticIp":{ "shape":"String", - "documentation":"

The instance Elastic IP address .

" + "documentation":"

The instance Elastic IP address.

" }, "Hostname":{ "shape":"String", - "documentation":"

The instance host name.

" + "documentation":"

The instance host name. The following are character limits for instance host names.

  • Linux-based instances: 63 characters

  • Windows-based instances: 15 characters

" }, "InfrastructureClass":{ "shape":"String", @@ -3070,7 +3071,7 @@ }, "InstallUpdatesOnBoot":{ "shape":"Boolean", - "documentation":"

Whether to install operating system and package updates when the instance boots. The default value is true. If this value is set to false, you must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

We strongly recommend using the default value of true, to ensure that your instances have the latest security updates.

" + "documentation":"

Whether to install operating system and package updates when the instance boots. The default value is true. If this value is set to false, you must update instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

We strongly recommend using the default value of true to ensure that your instances have the latest security updates.

" }, "InstanceId":{ "shape":"String", @@ -3122,7 +3123,7 @@ }, "ReportedAgentVersion":{ "shape":"String", - "documentation":"

The instance's reported AWS OpsWorks Stacks agent version.

" + "documentation":"

The instance's reported OpsWorks Stacks agent version.

" }, "ReportedOs":{ "shape":"ReportedOs", @@ -3304,7 +3305,7 @@ }, "Name":{ "shape":"String", - "documentation":"

The layer name.

" + "documentation":"

The layer name. Layer names can be a maximum of 32 characters.

" }, "Shortname":{ "shape":"String", @@ -3312,7 +3313,7 @@ }, "Attributes":{ "shape":"LayerAttributes", - "documentation":"

The layer attributes.

For the HaproxyStatsPassword, MysqlRootPassword, and GangliaPassword attributes, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value

For an ECS Cluster layer, AWS OpsWorks Stacks the EcsClusterArn attribute is set to the cluster's ARN.

" + "documentation":"

The layer attributes.

For the HaproxyStatsPassword, MysqlRootPassword, and GangliaPassword attributes, OpsWorks Stacks returns *****FILTERED***** instead of the actual value

For an ECS Cluster layer, OpsWorks Stacks the EcsClusterArn attribute is set to the cluster's ARN.

" }, "CloudWatchLogsConfiguration":{ "shape":"CloudWatchLogsConfiguration", @@ -3356,7 +3357,7 @@ }, "DefaultRecipes":{ "shape":"Recipes", - "documentation":"

AWS OpsWorks Stacks supports five lifecycle events: setup, configuration, deploy, undeploy, and shutdown. For each layer, AWS OpsWorks Stacks runs a set of standard recipes for each event. You can also provide custom recipes for any or all layers and events. AWS OpsWorks Stacks runs custom event recipes after the standard recipes. LayerCustomRecipes specifies the custom recipes for a particular layer to be run in response to each of the five events.

To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb recipe in the repository's phpapp2 folder.

" + "documentation":"

OpsWorks Stacks supports five lifecycle events: setup, configuration, deploy, undeploy, and shutdown. For each layer, OpsWorks Stacks runs a set of standard recipes for each event. You can also provide custom recipes for any or all layers and events. OpsWorks Stacks runs custom event recipes after the standard recipes. LayerCustomRecipes specifies the custom recipes for a particular layer to be run in response to each of the five events.

To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb recipe in the repository's phpapp2 folder.

" }, "CustomRecipes":{ "shape":"Recipes", @@ -3492,11 +3493,11 @@ }, "UpScaling":{ "shape":"AutoScalingThresholds", - "documentation":"

An AutoScalingThresholds object that describes the upscaling configuration, which defines how and when AWS OpsWorks Stacks increases the number of instances.

" + "documentation":"

An AutoScalingThresholds object that describes the upscaling configuration, which defines how and when OpsWorks Stacks increases the number of instances.

" }, "DownScaling":{ "shape":"AutoScalingThresholds", - "documentation":"

An AutoScalingThresholds object that describes the downscaling configuration, which defines how and when AWS OpsWorks Stacks reduces the number of instances.

" + "documentation":"

An AutoScalingThresholds object that describes the downscaling configuration, which defines how and when OpsWorks Stacks reduces the number of instances.

" } }, "documentation":"

Describes a layer's load-based auto scaling configuration.

" @@ -3518,11 +3519,11 @@ "members":{ "Name":{ "shape":"String", - "documentation":"

The name of the operating system, such as Amazon Linux 2018.03.

" + "documentation":"

The name of the operating system, such as Amazon Linux 2.

" }, "Id":{ "shape":"String", - "documentation":"

The ID of a supported operating system, such as Amazon Linux 2018.03.

" + "documentation":"

The ID of a supported operating system, such as Amazon Linux 2.

" }, "Type":{ "shape":"String", @@ -3530,7 +3531,7 @@ }, "ConfigurationManagers":{ "shape":"OperatingSystemConfigurationManagers", - "documentation":"

Supported configuration manager name and versions for an AWS OpsWorks Stacks operating system.

" + "documentation":"

Supported configuration manager name and versions for an OpsWorks Stacks operating system.

" }, "ReportedName":{ "shape":"String", @@ -3545,7 +3546,7 @@ "documentation":"

Indicates that an operating system is not supported for new instances.

" } }, - "documentation":"

Describes supported operating systems in AWS OpsWorks Stacks.

" + "documentation":"

Describes supported operating systems in OpsWorks Stacks.

" }, "OperatingSystemConfigurationManager":{ "type":"structure", @@ -3583,7 +3584,7 @@ }, "IamUserArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for an AWS Identity and Access Management (IAM) role. For more information about IAM ARNs, see Using Identifiers.

" + "documentation":"

The Amazon Resource Name (ARN) for an Identity and Access Management (IAM) role. For more information about IAM ARNs, see Using Identifiers.

" }, "AllowSsh":{ "shape":"Boolean", @@ -3675,7 +3676,7 @@ }, "DbInstanceIdentifier":{ "shape":"String", - "documentation":"

The DB instance identifier.

" + "documentation":"

The database instance identifier.

" }, "DbUser":{ "shape":"String", @@ -3683,11 +3684,11 @@ }, "DbPassword":{ "shape":"String", - "documentation":"

AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" + "documentation":"

OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" }, "Region":{ "shape":"String", - "documentation":"

The instance's AWS region.

" + "documentation":"

The instance's Amazon Web Services Region.

" }, "Address":{ "shape":"String", @@ -3703,7 +3704,7 @@ }, "MissingOnRds":{ "shape":"Boolean", - "documentation":"

Set to true if AWS OpsWorks Stacks is unable to discover the Amazon RDS instance. AWS OpsWorks Stacks attempts to discover the instance only once. If this value is set to true, you must deregister the instance, and then register it again.

" + "documentation":"

Set to true if OpsWorks Stacks is unable to discover the Amazon RDS instance. OpsWorks Stacks attempts to discover the instance only once. If this value is set to true, you must deregister the instance, and then register it again.

" } }, "documentation":"

Describes an Amazon RDS instance.

" @@ -3746,7 +3747,7 @@ "documentation":"

An array of custom recipe names to be run following a shutdown event.

" } }, - "documentation":"

AWS OpsWorks Stacks supports five lifecycle events: setup, configuration, deploy, undeploy, and shutdown. For each layer, AWS OpsWorks Stacks runs a set of standard recipes for each event. In addition, you can provide custom recipes for any or all layers and events. AWS OpsWorks Stacks runs custom event recipes after the standard recipes. LayerCustomRecipes specifies the custom recipes for a particular layer to be run in response to each of the five events.

To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb recipe in the repository's phpapp2 folder.

" + "documentation":"

OpsWorks Stacks supports five lifecycle events: setup, configuration, deploy, undeploy, and shutdown. For each layer, OpsWorks Stacks runs a set of standard recipes for each event. In addition, you can provide custom recipes for any or all layers and events. OpsWorks Stacks runs custom event recipes after the standard recipes. LayerCustomRecipes specifies the custom recipes for a particular layer to be run in response to each of the five events.

To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb recipe in the repository's phpapp2 folder.

" }, "RegisterEcsClusterRequest":{ "type":"structure", @@ -3812,7 +3813,7 @@ }, "Hostname":{ "shape":"String", - "documentation":"

The instance's hostname.

" + "documentation":"

The instance's host name. The following are character limits for instance host names.

  • Linux-based instances: 63 characters

  • Windows-based instances: 15 characters

" }, "PublicIp":{ "shape":"String", @@ -3841,7 +3842,7 @@ "members":{ "InstanceId":{ "shape":"String", - "documentation":"

The registered instance's AWS OpsWorks Stacks ID.

" + "documentation":"

The registered instance's OpsWorks Stacks ID.

" } }, "documentation":"

Contains the response to a RegisterInstanceResult request.

" @@ -3984,7 +3985,7 @@ "documentation":"

When the error occurred.

" } }, - "documentation":"

Describes an AWS OpsWorks Stacks service error.

" + "documentation":"

Describes an OpsWorks Stacks service error.

" }, "ServiceErrors":{ "type":"list", @@ -4004,11 +4005,11 @@ }, "UpScaling":{ "shape":"AutoScalingThresholds", - "documentation":"

An AutoScalingThresholds object with the upscaling threshold configuration. If the load exceeds these thresholds for a specified amount of time, AWS OpsWorks Stacks starts a specified number of instances.

" + "documentation":"

An AutoScalingThresholds object with the upscaling threshold configuration. If the load exceeds these thresholds for a specified amount of time, OpsWorks Stacks starts a specified number of instances.

" }, "DownScaling":{ "shape":"AutoScalingThresholds", - "documentation":"

An AutoScalingThresholds object with the downscaling threshold configuration. If the load falls below these thresholds for a specified amount of time, AWS OpsWorks Stacks stops a specified number of instances.

" + "documentation":"

An AutoScalingThresholds object with the downscaling threshold configuration. If the load falls below these thresholds for a specified amount of time, OpsWorks Stacks stops a specified number of instances.

" } } }, @@ -4060,7 +4061,7 @@ "members":{ "ExecutionTimeout":{ "shape":"Integer", - "documentation":"

The time, in seconds, that AWS OpsWorks Stacks will wait after triggering a Shutdown event before shutting down an instance.

" + "documentation":"

The time, in seconds, that OpsWorks Stacks waits after triggering a Shutdown event before shutting down an instance.

" }, "DelayUntilElbConnectionsDrained":{ "shape":"Boolean", @@ -4086,15 +4087,15 @@ }, "Password":{ "shape":"String", - "documentation":"

When included in a request, the parameter depends on the repository type.

  • For Amazon S3 bundles, set Password to the appropriate IAM secret access key.

  • For HTTP bundles and Subversion repositories, set Password to the password.

For more information on how to safely handle IAM credentials, see https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html.

In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" + "documentation":"

When included in a request, the parameter depends on the repository type.

  • For Amazon S3 bundles, set Password to the appropriate IAM secret access key.

  • For HTTP bundles and Subversion repositories, set Password to the password.

For more information on how to safely handle IAM credentials, see https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html.

In responses, OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" }, "SshKey":{ "shape":"String", - "documentation":"

In requests, the repository's SSH key.

In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" + "documentation":"

In requests, the repository's SSH key.

In responses, OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" }, "Revision":{ "shape":"String", - "documentation":"

The application's version. AWS OpsWorks Stacks enables you to easily deploy new versions of an application. One of the simplest approaches is to have branches or revisions in your repository that represent different versions that can potentially be deployed.

" + "documentation":"

The application's version. OpsWorks Stacks enables you to easily deploy new versions of an application. One of the simplest approaches is to have branches or revisions in your repository that represent different versions that can potentially be deployed.

" } }, "documentation":"

Contains the information required to retrieve an app or cookbook from a repository. For more information, see Creating Apps or Custom Recipes and Cookbooks.

" @@ -4139,7 +4140,7 @@ }, "Name":{ "shape":"String", - "documentation":"

The stack name.

" + "documentation":"

The stack name. Stack names can be a maximum of 64 characters.

" }, "Arn":{ "shape":"String", @@ -4147,7 +4148,7 @@ }, "Region":{ "shape":"String", - "documentation":"

The stack AWS region, such as \"ap-northeast-2\". For more information about AWS regions, see Regions and Endpoints.

" + "documentation":"

The stack Amazon Web Services Region, such as ap-northeast-2. For more information about Amazon Web Services Regions, see Regions and Endpoints.

" }, "VpcId":{ "shape":"String", @@ -4159,7 +4160,7 @@ }, "ServiceRoleArn":{ "shape":"String", - "documentation":"

The stack AWS Identity and Access Management (IAM) role.

" + "documentation":"

The stack Identity and Access Management (IAM) role.

" }, "DefaultInstanceProfileArn":{ "shape":"String", @@ -4199,7 +4200,7 @@ }, "UseOpsworksSecurityGroups":{ "shape":"Boolean", - "documentation":"

Whether the stack automatically associates the AWS OpsWorks Stacks built-in security groups with the stack's layers.

" + "documentation":"

Whether the stack automatically associates the OpsWorks Stacks built-in security groups with the stack's layers.

" }, "CustomCookbooksSource":{ "shape":"Source", @@ -4215,7 +4216,7 @@ }, "DefaultRootDeviceType":{ "shape":"RootDeviceType", - "documentation":"

The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

" + "documentation":"

The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

" }, "AgentVersion":{ "shape":"String", @@ -4238,11 +4239,11 @@ "members":{ "Name":{ "shape":"String", - "documentation":"

The name. This parameter must be set to \"Chef\".

" + "documentation":"

The name. This parameter must be set to Chef.

" }, "Version":{ "shape":"String", - "documentation":"

The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux stacks, and to 12.2 for Windows stacks. The default value for Linux stacks is 11.4.

" + "documentation":"

The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux stacks, and to 12.2 for Windows stacks. The default value for Linux stacks is 12.

" } }, "documentation":"

Describes the configuration manager.

" @@ -4311,7 +4312,7 @@ }, "Force":{ "shape":"Boolean", - "documentation":"

Specifies whether to force an instance to stop. If the instance's root device type is ebs, or EBS-backed, adding the Force parameter to the StopInstances API call disassociates the AWS OpsWorks Stacks instance from EC2, and forces deletion of only the OpsWorks Stacks instance. You must also delete the formerly-associated instance in EC2 after troubleshooting and replacing the AWS OpsWorks Stacks instance with a new one.

" + "documentation":"

Specifies whether to force an instance to stop. If the instance's root device type is ebs, or EBS-backed, adding the Force parameter to the StopInstances API call disassociates the OpsWorks Stacks instance from EC2, and forces deletion of only the OpsWorks Stacks instance. You must also delete the formerly-associated instance in EC2 after troubleshooting and replacing the OpsWorks Stacks instance with a new one.

" } } }, @@ -4372,11 +4373,11 @@ }, "ValidForInMinutes":{ "shape":"Integer", - "documentation":"

The length of time (in minutes) that the grant is valid. When the grant expires, at the end of this period, the user will no longer be able to use the credentials to log in. If they are logged in at the time, they will be automatically logged out.

" + "documentation":"

The length of time (in minutes) that the grant is valid. When the grant expires, at the end of this period, the user will no longer be able to use the credentials to log in. If they are logged in at the time, they are automatically logged out.

" }, "InstanceId":{ "shape":"String", - "documentation":"

The instance's AWS OpsWorks Stacks ID.

" + "documentation":"

The instance's OpsWorks Stacks ID.

" } }, "documentation":"

Contains the data needed by RDP clients such as the Microsoft Remote Desktop Connection to log in to the instance.

" @@ -4496,7 +4497,7 @@ }, "Name":{ "shape":"String", - "documentation":"

The new name.

" + "documentation":"

The new name, which can be a maximum of 32 characters.

" } } }, @@ -4522,11 +4523,11 @@ }, "Hostname":{ "shape":"String", - "documentation":"

The instance host name.

" + "documentation":"

The instance host name. The following are character limits for instance host names.

  • Linux-based instances: 63 characters

  • Windows-based instances: 15 characters

" }, "Os":{ "shape":"String", - "documentation":"

The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information about supported operating systems, see Operating Systems. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.

" + "documentation":"

The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

Not all operating systems are supported with all versions of Chef. For more information about supported operating systems, see OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.

" }, "AmiId":{ "shape":"String", @@ -4550,7 +4551,7 @@ }, "AgentVersion":{ "shape":"String", - "documentation":"

The default AWS OpsWorks Stacks agent version. You have the following options:

  • INHERIT - Use the stack's default agent version setting.

  • version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, you must edit the instance configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the instance.

The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

AgentVersion cannot be set to Chef 12.2.

" + "documentation":"

The default OpsWorks Stacks agent version. You have the following options:

  • INHERIT - Use the stack's default agent version setting.

  • version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, you must edit the instance configuration and specify a new version. OpsWorks Stacks installs that version on the instance.

The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

AgentVersion cannot be set to Chef 12.2.

" } } }, @@ -4564,11 +4565,11 @@ }, "Name":{ "shape":"String", - "documentation":"

The layer name, which is used by the console.

" + "documentation":"

The layer name, which is used by the console. Layer names can be a maximum of 32 characters.

" }, "Shortname":{ "shape":"String", - "documentation":"

For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters and must be in the following format: /\\A[a-z0-9\\-\\_\\.]+\\Z/.

The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference

" + "documentation":"

For custom layers only, use this parameter to specify the layer's short name, which is used internally by OpsWorks Stacks and by Chef. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 32 characters and must be in the following format: /\\A[a-z0-9\\-\\_\\.]+\\Z/.

Built-in layer short names are defined by OpsWorks Stacks. For more information, see the Layer reference in the OpsWorks User Guide.

" }, "Attributes":{ "shape":"LayerAttributes", @@ -4665,7 +4666,7 @@ }, "Name":{ "shape":"String", - "documentation":"

The stack's new name.

" + "documentation":"

The stack's new name. Stack names can be a maximum of 64 characters.

" }, "Attributes":{ "shape":"StackAttributes", @@ -4681,7 +4682,7 @@ }, "DefaultOs":{ "shape":"String", - "documentation":"

The stack's operating system, which must be set to one of the following:

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the stack's current operating system. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

" + "documentation":"

The stack's operating system, which must be set to one of the following:

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the stack's current operating system. Not all operating systems are supported with all versions of Chef. For more information about supported operating systems, see OpsWorks Stacks Operating Systems.

" }, "HostnameTheme":{ "shape":"String", @@ -4717,7 +4718,7 @@ }, "DefaultSshKeyName":{ "shape":"String", - "documentation":"

A default Amazon EC2 key-pair name. The default value is none. If you specify a key-pair name, AWS OpsWorks Stacks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

" + "documentation":"

A default Amazon EC2 key-pair name. The default value is none. If you specify a key-pair name, OpsWorks Stacks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

" }, "DefaultRootDeviceType":{ "shape":"RootDeviceType", @@ -4725,11 +4726,11 @@ }, "UseOpsworksSecurityGroups":{ "shape":"Boolean", - "documentation":"

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. UseOpsworksSecurityGroups allows you to provide your own custom security groups instead of using the built-in groups. UseOpsworksSecurityGroups has the following settings:

  • True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

  • False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on. Custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

" + "documentation":"

Whether to associate the OpsWorks Stacks built-in security groups with the stack's layers.

OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. UseOpsworksSecurityGroups allows you to provide your own custom security groups instead of using the built-in groups. UseOpsworksSecurityGroups has the following settings:

  • True - OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

  • False - OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on. Custom security groups are required only for those layers that need custom settings.

For more information, see Create a New Stack.

" }, "AgentVersion":{ "shape":"String", - "documentation":"

The default AWS OpsWorks Stacks agent version. You have the following options:

  • Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available.

  • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the stack's instances.

The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

" + "documentation":"

The default OpsWorks Stacks agent version. You have the following options:

  • Auto-update - Set this parameter to LATEST. OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available.

  • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. OpsWorks Stacks installs that version on the stack's instances.

The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

" } } }, @@ -4743,7 +4744,7 @@ }, "SshUsername":{ "shape":"String", - "documentation":"

The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, AWS OpsWorks Stacks removes them. For example, my.name will be changed to myname. If you do not specify an SSH user name, AWS OpsWorks Stacks generates one from the IAM user name.

" + "documentation":"

The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, OpsWorks Stacks removes them. For example, my.name will be changed to myname. If you do not specify an SSH user name, OpsWorks Stacks generates one from the IAM user name.

" }, "SshPublicKey":{ "shape":"String", @@ -4765,7 +4766,7 @@ }, "Name":{ "shape":"String", - "documentation":"

The new name.

" + "documentation":"

The new name. Volume names can be a maximum of 128 characters.

" }, "MountPoint":{ "shape":"String", @@ -4840,7 +4841,7 @@ }, "Name":{ "shape":"String", - "documentation":"

The volume name.

" + "documentation":"

The volume name. Volume names are a maximum of 128 characters.

" }, "RaidArrayId":{ "shape":"String", @@ -4868,7 +4869,7 @@ }, "Region":{ "shape":"String", - "documentation":"

The AWS region. For more information about AWS regions, see Regions and Endpoints.

" + "documentation":"

The Amazon Web Services Region. For more information about Amazon Web Services Regions, see Regions and Endpoints.

" }, "AvailabilityZone":{ "shape":"String", @@ -4876,7 +4877,7 @@ }, "VolumeType":{ "shape":"String", - "documentation":"

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

  • io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

  • gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

  • st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

  • sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

" + "documentation":"

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

  • io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

  • gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

  • st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 125 GiB and a maximum size of 16384 GiB.

  • sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 125 GiB and a maximum size of 16384 GiB.

" }, "Iops":{ "shape":"Integer", @@ -4915,7 +4916,7 @@ }, "VolumeType":{ "shape":"String", - "documentation":"

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

  • io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

  • gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

  • st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

  • sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

" + "documentation":"

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

  • io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

  • gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

  • st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 125 GiB and a maximum size of 16384 GiB.

  • sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 125 GiB and a maximum size of 16384 GiB.

" }, "Iops":{ "shape":"Integer", @@ -4979,5 +4980,5 @@ "documentation":"

Describes a time-based instance's auto scaling schedule. The schedule consists of a set of key-value pairs.

  • The key is the time period (a UTC hour) and must be an integer from 0 - 23.

  • The value indicates whether the instance should be online or offline for the specified period, and must be set to \"on\" or \"off\"

The default setting for all time periods is off, so you use the following parameters primarily to specify the online periods. You don't have to explicitly specify offline periods unless you want to change an online period to an offline period.

The following example specifies that the instance should be online for four hours, from UTC 1200 - 1600. It will be off for the remainder of the day.

{ \"12\":\"on\", \"13\":\"on\", \"14\":\"on\", \"15\":\"on\" }

" } }, - "documentation":"AWS OpsWorks

Welcome to the AWS OpsWorks Stacks API Reference. This guide provides descriptions, syntax, and usage examples for AWS OpsWorks Stacks actions and data types, including common parameters and error codes.

AWS OpsWorks Stacks is an application management service that provides an integrated experience for overseeing the complete application lifecycle. For information about this product, go to the AWS OpsWorks details page.

SDKs and CLI

The most common way to use the AWS OpsWorks Stacks API is by using the AWS Command Line Interface (CLI) or by using one of the AWS SDKs to implement applications in your preferred language. For more information, see:

Endpoints

AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Stacks can only be accessed or managed within the endpoint in which they are created.

  • opsworks.us-east-1.amazonaws.com

  • opsworks.us-east-2.amazonaws.com

  • opsworks.us-west-1.amazonaws.com

  • opsworks.us-west-2.amazonaws.com

  • opsworks.ca-central-1.amazonaws.com (API only; not available in the AWS console)

  • opsworks.eu-west-1.amazonaws.com

  • opsworks.eu-west-2.amazonaws.com

  • opsworks.eu-west-3.amazonaws.com

  • opsworks.eu-central-1.amazonaws.com

  • opsworks.ap-northeast-1.amazonaws.com

  • opsworks.ap-northeast-2.amazonaws.com

  • opsworks.ap-south-1.amazonaws.com

  • opsworks.ap-southeast-1.amazonaws.com

  • opsworks.ap-southeast-2.amazonaws.com

  • opsworks.sa-east-1.amazonaws.com

Chef Versions

When you call CreateStack, CloneStack, or UpdateStack we recommend you use the ConfigurationManager parameter to specify the Chef version. The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information, see Chef Versions.

You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible.

" + "documentation":"OpsWorks

Welcome to the OpsWorks Stacks API Reference. This guide provides descriptions, syntax, and usage examples for OpsWorks Stacks actions and data types, including common parameters and error codes.

OpsWorks Stacks is an application management service that provides an integrated experience for managing the complete application lifecycle. For information about OpsWorks, see the OpsWorks information page.

SDKs and CLI

Use the OpsWorks Stacks API by using the Command Line Interface (CLI) or by using one of the Amazon Web Services SDKs to implement applications in your preferred language. For more information, see:

Endpoints

OpsWorks Stacks supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Stacks can only be accessed or managed within the endpoint in which they are created.

  • opsworks.us-east-1.amazonaws.com

  • opsworks.us-east-2.amazonaws.com

  • opsworks.us-west-1.amazonaws.com

  • opsworks.us-west-2.amazonaws.com

  • opsworks.ca-central-1.amazonaws.com (API only; not available in the Amazon Web Services Management Console)

  • opsworks.eu-west-1.amazonaws.com

  • opsworks.eu-west-2.amazonaws.com

  • opsworks.eu-west-3.amazonaws.com

  • opsworks.eu-central-1.amazonaws.com

  • opsworks.ap-northeast-1.amazonaws.com

  • opsworks.ap-northeast-2.amazonaws.com

  • opsworks.ap-south-1.amazonaws.com

  • opsworks.ap-southeast-1.amazonaws.com

  • opsworks.ap-southeast-2.amazonaws.com

  • opsworks.sa-east-1.amazonaws.com

Chef Versions

When you call CreateStack, CloneStack, or UpdateStack we recommend you use the ConfigurationManager parameter to specify the Chef version. The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information, see Chef Versions.

You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible.

" } diff --git a/botocore/data/organizations/2016-11-28/service-2.json b/botocore/data/organizations/2016-11-28/service-2.json index 20f4a85aa9..c51aeb4466 100644 --- a/botocore/data/organizations/2016-11-28/service-2.json +++ b/botocore/data/organizations/2016-11-28/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"organizations", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Organizations", "serviceFullName":"AWS Organizations", "serviceId":"Organizations", "signatureVersion":"v4", "targetPrefix":"AWSOrganizationsV20161128", - "uid":"organizations-2016-11-28" + "uid":"organizations-2016-11-28", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptHandshake":{ @@ -121,7 +123,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

  • Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

  • Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide.

If the request includes tags, then the requester must have the organizations:TagResource permission.

Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account.

This operation can be called only from the organization's management account.

For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide.

  • When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.

  • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools.

" + "documentation":"

Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

  • Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

  • Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide.

If the request includes tags, then the requester must have the organizations:TagResource permission.

Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account.

This operation can be called only from the organization's management account.

For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide.

  • When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.

  • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools.

" }, "CreateGovCloudAccount":{ "name":"CreateGovCloudAccount", @@ -1378,7 +1380,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"ConstraintViolationExceptionReason"} }, - "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation.

  • ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization.

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support.

  • CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot register a suspended account as a delegated administrator.

  • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator.

  • CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​

  • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator.

  • CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days.

  • CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​

  • CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode.

  • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service.

  • EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again.

  • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

  • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size.

  • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

  • SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first.

  • TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account.

  • WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, there is a waiting period before you can remove it from the organization. If you get an error that indicates that a wait period is required, try again in a few days.

", + "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation.

  • ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization.

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support.

  • CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot register a suspended account as a delegated administrator.

  • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator.

  • CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​

  • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator.

  • CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days.

  • CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​

  • CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode.

  • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service.

  • EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again.

  • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

  • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size.

  • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

  • SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first.

  • TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account.

  • WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, there is a waiting period before you can remove it from the organization. If you get an error that indicates that a wait period is required, try again in a few days.

", "exception":true }, "ConstraintViolationExceptionReason":{ @@ -1417,7 +1419,8 @@ "CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED", "SERVICE_ACCESS_NOT_ENABLED", "INVALID_PAYMENT_INSTRUMENT", - "ACCOUNT_CREATION_NOT_COMPLETE" + "ACCOUNT_CREATION_NOT_COMPLETE", + "CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR" ] }, "CreateAccountFailureReason":{ diff --git a/botocore/data/osis/2022-01-01/service-2.json b/botocore/data/osis/2022-01-01/service-2.json index c9f06d9caa..aa6244273d 100644 --- a/botocore/data/osis/2022-01-01/service-2.json +++ b/botocore/data/osis/2022-01-01/service-2.json @@ -4,10 +4,12 @@ "apiVersion":"2022-01-01", "endpointPrefix":"osis", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon OpenSearch Ingestion", "serviceId":"OSIS", "signatureVersion":"v4", - "uid":"osis-2022-01-01" + "uid":"osis-2022-01-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreatePipeline":{ @@ -19,6 +21,7 @@ "input":{"shape":"CreatePipelineRequest"}, "output":{"shape":"CreatePipelineResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"LimitExceededException"}, {"shape":"ValidationException"}, {"shape":"InternalException"}, @@ -37,6 +40,7 @@ "input":{"shape":"DeletePipelineRequest"}, "output":{"shape":"DeletePipelineResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"ValidationException"}, {"shape":"InternalException"}, {"shape":"AccessDeniedException"}, @@ -54,6 +58,7 @@ "input":{"shape":"GetPipelineRequest"}, "output":{"shape":"GetPipelineResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"ValidationException"}, {"shape":"InternalException"}, {"shape":"AccessDeniedException"}, @@ -70,6 +75,7 @@ "input":{"shape":"GetPipelineBlueprintRequest"}, "output":{"shape":"GetPipelineBlueprintResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalException"}, {"shape":"ValidationException"}, @@ -86,6 +92,7 @@ "input":{"shape":"GetPipelineChangeProgressRequest"}, "output":{"shape":"GetPipelineChangeProgressResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"ValidationException"}, {"shape":"InternalException"}, {"shape":"AccessDeniedException"}, @@ -102,6 +109,7 @@ "input":{"shape":"ListPipelineBlueprintsRequest"}, "output":{"shape":"ListPipelineBlueprintsResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"ValidationException"}, {"shape":"InternalException"}, {"shape":"AccessDeniedException"}, @@ -118,6 +126,7 @@ "input":{"shape":"ListPipelinesRequest"}, "output":{"shape":"ListPipelinesResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"ValidationException"}, {"shape":"InternalException"}, {"shape":"AccessDeniedException"}, @@ -134,6 +143,7 @@ "input":{"shape":"ListTagsForResourceRequest"}, "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InternalException"}, @@ -150,6 +160,7 @@ "input":{"shape":"StartPipelineRequest"}, "output":{"shape":"StartPipelineResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"AccessDeniedException"}, {"shape":"ConflictException"}, {"shape":"InternalException"}, @@ -167,6 +178,7 @@ "input":{"shape":"StopPipelineRequest"}, "output":{"shape":"StopPipelineResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"AccessDeniedException"}, {"shape":"ConflictException"}, {"shape":"InternalException"}, @@ -184,6 +196,7 @@ "input":{"shape":"TagResourceRequest"}, "output":{"shape":"TagResourceResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"LimitExceededException"}, {"shape":"ValidationException"}, {"shape":"InternalException"}, @@ -201,6 +214,7 @@ "input":{"shape":"UntagResourceRequest"}, "output":{"shape":"UntagResourceResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InternalException"}, @@ -217,6 +231,7 @@ "input":{"shape":"UpdatePipelineRequest"}, "output":{"shape":"UpdatePipelineResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"ValidationException"}, {"shape":"InternalException"}, {"shape":"AccessDeniedException"}, @@ -234,6 +249,7 @@ "input":{"shape":"ValidatePipelineRequest"}, "output":{"shape":"ValidatePipelineResponse"}, "errors":[ + {"shape":"DisabledOperationException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalException"}, {"shape":"ValidationException"} @@ -250,6 +266,10 @@ "error":{"httpStatusCode":403}, "exception":true }, + "BlueprintFormat":{ + "type":"string", + "pattern":"(YAML|JSON)" + }, "Boolean":{"type":"boolean"}, "BufferOptions":{ "type":"structure", @@ -260,7 +280,7 @@ "documentation":"

Whether persistent buffering should be enabled.

" } }, - "documentation":"

Options that specify the configuration of a persistent buffer. To configure how OpenSearch Ingestion encrypts this data, set the EncryptionAtRestOptions.

" + "documentation":"

Options that specify the configuration of a persistent buffer. To configure how OpenSearch Ingestion encrypts this data, set the EncryptionAtRestOptions. For more information, see Persistent buffering.

" }, "ChangeProgressStage":{ "type":"structure", @@ -332,13 +352,17 @@ "FAILED" ] }, + "CidrBlock":{ + "type":"string", + "pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(3[0-2]|[12]?[0-9])$" + }, "CloudWatchLogDestination":{ "type":"structure", "required":["LogGroup"], "members":{ "LogGroup":{ "shape":"LogGroup", - "documentation":"

The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, /aws/OpenSearchService/IngestionService/my-pipeline.

" + "documentation":"

The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, /aws/vendedlogs/OpenSearchService/pipelines.

" } }, "documentation":"

The destination for OpenSearch Ingestion logs sent to Amazon CloudWatch.

" @@ -424,16 +448,24 @@ "members":{ } }, + "DisabledOperationException":{ + "type":"structure", + "members":{ + }, + "documentation":"

Exception is thrown when an operation has been disabled.

", + "error":{"httpStatusCode":409}, + "exception":true + }, "EncryptionAtRestOptions":{ "type":"structure", "required":["KmsKeyArn"], "members":{ "KmsKeyArn":{ "shape":"KmsKeyArn", - "documentation":"

The ARN of the KMS key used to encrypt data-at-rest in OpenSearch Ingestion. By default, data is encrypted using an AWS owned key.

" + "documentation":"

The ARN of the KMS key used to encrypt buffer data. By default, data is encrypted using an Amazon Web Services owned key.

" } }, - "documentation":"

Options to control how OpenSearch encrypts all data-at-rest.

" + "documentation":"

Options to control how OpenSearch encrypts buffer data.

" }, "GetPipelineBlueprintRequest":{ "type":"structure", @@ -444,6 +476,12 @@ "documentation":"

The name of the blueprint to retrieve.

", "location":"uri", "locationName":"BlueprintName" + }, + "Format":{ + "shape":"BlueprintFormat", + "documentation":"

The format format of the blueprint to retrieve.

", + "location":"querystring", + "locationName":"format" } } }, @@ -453,6 +491,10 @@ "Blueprint":{ "shape":"PipelineBlueprint", "documentation":"

The requested blueprint in YAML format.

" + }, + "Format":{ + "shape":"String", + "documentation":"

The format of the blueprint.

" } } }, @@ -483,7 +525,7 @@ "members":{ "PipelineName":{ "shape":"PipelineName", - "documentation":"

The name of the pipeline to get information about.

", + "documentation":"

The name of the pipeline.

", "location":"uri", "locationName":"PipelineName" } @@ -681,9 +723,17 @@ }, "BufferOptions":{"shape":"BufferOptions"}, "EncryptionAtRestOptions":{"shape":"EncryptionAtRestOptions"}, + "VpcEndpointService":{ + "shape":"String", + "documentation":"

The VPC endpoint service name for the pipeline.

" + }, "ServiceVpcEndpoints":{ "shape":"ServiceVpcEndpointsList", - "documentation":"

A list of VPC endpoints that OpenSearch Ingestion has created to other AWS services.

" + "documentation":"

A list of VPC endpoints that OpenSearch Ingestion has created to other Amazon Web Services services.

" + }, + "Destinations":{ + "shape":"PipelineDestinationList", + "documentation":"

Destinations to which the pipeline writes data.

" }, "Tags":{ "shape":"TagList", @@ -708,6 +758,22 @@ "PipelineConfigurationBody":{ "shape":"String", "documentation":"

The YAML configuration of the blueprint.

" + }, + "DisplayName":{ + "shape":"String", + "documentation":"

The display name of the blueprint.

" + }, + "DisplayDescription":{ + "shape":"String", + "documentation":"

A description of the blueprint.

" + }, + "Service":{ + "shape":"String", + "documentation":"

The name of the service that the blueprint is associated with.

" + }, + "UseCase":{ + "shape":"String", + "documentation":"

The use case that the blueprint relates to.

" } }, "documentation":"

Container for information about an OpenSearch Ingestion blueprint.

" @@ -718,6 +784,22 @@ "BlueprintName":{ "shape":"String", "documentation":"

The name of the blueprint.

" + }, + "DisplayName":{ + "shape":"String", + "documentation":"

The display name of the blueprint.

" + }, + "DisplayDescription":{ + "shape":"String", + "documentation":"

A description of the blueprint.

" + }, + "Service":{ + "shape":"String", + "documentation":"

The name of the service that the blueprint is associated with.

" + }, + "UseCase":{ + "shape":"String", + "documentation":"

The use case that the blueprint relates to.

" } }, "documentation":"

A summary of an OpenSearch Ingestion blueprint.

" @@ -731,6 +813,24 @@ "max":24000, "min":1 }, + "PipelineDestination":{ + "type":"structure", + "members":{ + "ServiceName":{ + "shape":"String", + "documentation":"

The name of the service receiving data from the pipeline.

" + }, + "Endpoint":{ + "shape":"String", + "documentation":"

The endpoint receiving data from the pipeline.

" + } + }, + "documentation":"

An object representing the destination of a pipeline.

" + }, + "PipelineDestinationList":{ + "type":"list", + "member":{"shape":"PipelineDestination"} + }, "PipelineName":{ "type":"string", "max":28, @@ -794,6 +894,10 @@ "shape":"Timestamp", "documentation":"

The date and time when the pipeline was last updated.

" }, + "Destinations":{ + "shape":"PipelineDestinationList", + "documentation":"

A list of destinations to which the pipeline writes data.

" + }, "Tags":{ "shape":"TagList", "documentation":"

A list of tags associated with the given pipeline.

" @@ -846,7 +950,7 @@ }, "VpcEndpointId":{ "shape":"String", - "documentation":"

The ID of the VPC endpoint that was created.

" + "documentation":"

The unique identifier of the VPC endpoint that was created.

" } }, "documentation":"

A container for information about VPC endpoints that were created to other services

" @@ -1081,6 +1185,21 @@ "type":"list", "member":{"shape":"ValidationMessage"} }, + "VpcAttachmentOptions":{ + "type":"structure", + "required":["AttachToVpc"], + "members":{ + "AttachToVpc":{ + "shape":"Boolean", + "documentation":"

Whether a VPC is attached to the pipeline.

" + }, + "CidrBlock":{ + "shape":"CidrBlock", + "documentation":"

The CIDR block to be reserved for OpenSearch Ingestion to create elastic network interfaces (ENIs).

" + } + }, + "documentation":"

Options for attaching a VPC to pipeline.

" + }, "VpcEndpoint":{ "type":"structure", "members":{ @@ -1099,6 +1218,13 @@ }, "documentation":"

An OpenSearch Ingestion-managed VPC endpoint that will access one or more pipelines.

" }, + "VpcEndpointManagement":{ + "type":"string", + "enum":[ + "CUSTOMER", + "SERVICE" + ] + }, "VpcEndpointServiceName":{ "type":"string", "enum":["OPENSEARCH_SERVERLESS"] @@ -1118,6 +1244,14 @@ "SecurityGroupIds":{ "shape":"SecurityGroupIds", "documentation":"

A list of security groups associated with the VPC endpoint.

" + }, + "VpcAttachmentOptions":{ + "shape":"VpcAttachmentOptions", + "documentation":"

Options for attaching a VPC to a pipeline.

" + }, + "VpcEndpointManagement":{ + "shape":"VpcEndpointManagement", + "documentation":"

Defines whether you or Amazon OpenSearch Ingestion service create and manage the VPC endpoint configured for the pipeline.

" } }, "documentation":"

Options that specify the subnets and security groups for an OpenSearch Ingestion VPC endpoint.

" diff --git a/botocore/data/outposts/2019-12-03/service-2.json b/botocore/data/outposts/2019-12-03/service-2.json index 0e8874b4f1..185ac60df2 100644 --- a/botocore/data/outposts/2019-12-03/service-2.json +++ b/botocore/data/outposts/2019-12-03/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"outposts", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"Outposts", "serviceFullName":"AWS Outposts", "serviceId":"Outposts", "signatureVersion":"v4", "signingName":"outposts", - "uid":"outposts-2019-12-03" + "uid":"outposts-2019-12-03", + "auth":["aws.auth#sigv4"] }, "operations":{ "CancelCapacityTask":{ @@ -1529,7 +1531,11 @@ "InstanceTypeItem":{ "type":"structure", "members":{ - "InstanceType":{"shape":"InstanceType"} + "InstanceType":{"shape":"InstanceType"}, + "VCPUs":{ + "shape":"VCPUCount", + "documentation":"

The number of default VCPUs in an instance type.

" + } }, "documentation":"

Information about an instance type.

" }, @@ -2822,6 +2828,10 @@ "UPLINK_100G" ] }, + "VCPUCount":{ + "type":"integer", + "box":true + }, "ValidationException":{ "type":"structure", "members":{ diff --git a/botocore/data/partitions.json b/botocore/data/partitions.json index f376f6908a..7a28569c3d 100644 --- a/botocore/data/partitions.json +++ b/botocore/data/partitions.json @@ -198,7 +198,11 @@ "supportsFIPS" : true }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", - "regions" : { } + "regions" : { + "eu-isoe-west-1" : { + "description" : "EU ISOE West" + } + } }, { "id" : "aws-iso-f", "outputs" : { diff --git a/botocore/data/payment-cryptography-data/2022-02-03/service-2.json b/botocore/data/payment-cryptography-data/2022-02-03/service-2.json index 01666625e5..38b9b967e5 100644 --- a/botocore/data/payment-cryptography-data/2022-02-03/service-2.json +++ b/botocore/data/payment-cryptography-data/2022-02-03/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2022-02-03", + "auth":["aws.auth#sigv4"], "endpointPrefix":"dataplane.payment-cryptography", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Payment Cryptography Data Plane", "serviceId":"Payment Cryptography Data", "signatureVersion":"v4", @@ -118,7 +119,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Re-encrypt ciphertext using DUKPT, Symmetric and Asymmetric Data Encryption Keys.

You can either generate an encryption key within Amazon Web Services Payment Cryptography by calling CreateKey or import your own encryption key by calling ImportKey. The KeyArn for use with this operation must be in a compatible key state with KeyModesOfUse set to Encrypt. In asymmetric encryption, ciphertext is encrypted using public component (imported by calling ImportKey) of the asymmetric key pair created outside of Amazon Web Services Payment Cryptography.

For symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. For asymmetric encryption, Amazon Web Services Payment Cryptography supports RSA. To encrypt using DUKPT, a DUKPT key must already exist within your account with KeyModesOfUse set to DeriveKey or a new DUKPT can be generated by calling CreateKey.

For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" + "documentation":"

Re-encrypt ciphertext using DUKPT or Symmetric data encryption keys.

You can either generate an encryption key within Amazon Web Services Payment Cryptography by calling CreateKey or import your own encryption key by calling ImportKey. The KeyArn for use with this operation must be in a compatible key state with KeyModesOfUse set to Encrypt.

For symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. To encrypt using DUKPT, a DUKPT key must already exist within your account with KeyModesOfUse set to DeriveKey or a new DUKPT can be generated by calling CreateKey.

For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" }, "TranslatePinData":{ "name":"TranslatePinData", @@ -233,7 +234,7 @@ "required":["CardExpiryDate"], "members":{ "CardExpiryDate":{ - "shape":"NumberLengthEquals4", + "shape":"CardExpiryDateType", "documentation":"

The expiry date of a payment card.

" } }, @@ -247,16 +248,23 @@ ], "members":{ "CardExpiryDate":{ - "shape":"NumberLengthEquals4", + "shape":"CardExpiryDateType", "documentation":"

The expiry date of a payment card.

" }, "ServiceCode":{ - "shape":"NumberLengthEquals3", + "shape":"ServiceCodeType", "documentation":"

The service code of the AMEX payment card. This is different from the Card Security Code (CSC).

" } }, "documentation":"

Card data parameters that are required to generate a Card Security Code (CSC2) for an AMEX payment card.

" }, + "ApplicationCryptogramType":{ + "type":"string", + "max":16, + "min":16, + "pattern":"[0-9a-fA-F]+", + "sensitive":true + }, "AsymmetricEncryptionAttributes":{ "type":"structure", "members":{ @@ -267,6 +275,27 @@ }, "documentation":"

Parameters for plaintext encryption using asymmetric keys.

" }, + "AuthRequestCryptogramType":{ + "type":"string", + "max":16, + "min":16, + "pattern":"[0-9a-fA-F]+", + "sensitive":true + }, + "AuthResponseValueType":{ + "type":"string", + "max":16, + "min":1, + "pattern":"[0-9a-fA-F]+", + "sensitive":true + }, + "CardExpiryDateType":{ + "type":"string", + "max":4, + "min":4, + "pattern":"[0-9]+", + "sensitive":true + }, "CardGenerationAttributes":{ "type":"structure", "members":{ @@ -275,10 +304,6 @@ "shape":"AmexCardSecurityCodeVersion2", "documentation":"

Card data parameters that are required to generate a Card Security Code (CSC2) for an AMEX payment card.

" }, - "CardHolderVerificationValue":{ - "shape":"CardHolderVerificationValue", - "documentation":"

Card data parameters that are required to generate a cardholder verification value for the payment card.

" - }, "CardVerificationValue1":{ "shape":"CardVerificationValue1", "documentation":"

Card data parameters that are required to generate Card Verification Value (CVV) for the payment card.

" @@ -287,6 +312,10 @@ "shape":"CardVerificationValue2", "documentation":"

Card data parameters that are required to generate Card Verification Value (CVV2) for the payment card.

" }, + "CardHolderVerificationValue":{ + "shape":"CardHolderVerificationValue", + "documentation":"

Card data parameters that are required to generate a cardholder verification value for the payment card.

" + }, "DynamicCardVerificationCode":{ "shape":"DynamicCardVerificationCode", "documentation":"

Card data parameters that are required to generate CDynamic Card Verification Code (dCVC) for the payment card.

" @@ -302,22 +331,22 @@ "CardHolderVerificationValue":{ "type":"structure", "required":[ - "ApplicationTransactionCounter", + "UnpredictableNumber", "PanSequenceNumber", - "UnpredictableNumber" + "ApplicationTransactionCounter" ], "members":{ - "ApplicationTransactionCounter":{ - "shape":"HexLengthBetween2And4", - "documentation":"

The transaction counter value that comes from a point of sale terminal.

" + "UnpredictableNumber":{ + "shape":"HexLengthBetween2And8", + "documentation":"

A random number generated by the issuer.

" }, "PanSequenceNumber":{ - "shape":"HexLengthEquals2", + "shape":"NumberLengthEquals2", "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" }, - "UnpredictableNumber":{ - "shape":"HexLengthBetween2And8", - "documentation":"

A random number generated by the issuer.

" + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

The transaction counter value that comes from a point of sale terminal.

" } }, "documentation":"

Card data parameters that are required to generate a cardholder verification value for the payment card.

" @@ -330,10 +359,6 @@ "shape":"AmexCardSecurityCodeVersion2", "documentation":"

Card data parameters that are required to verify a Card Security Code (CSC2) for an AMEX payment card.

" }, - "CardHolderVerificationValue":{ - "shape":"CardHolderVerificationValue", - "documentation":"

Card data parameters that are required to verify a cardholder verification value for the payment card.

" - }, "CardVerificationValue1":{ "shape":"CardVerificationValue1", "documentation":"

Card data parameters that are required to verify Card Verification Value (CVV) for the payment card.

" @@ -342,9 +367,9 @@ "shape":"CardVerificationValue2", "documentation":"

Card data parameters that are required to verify Card Verification Value (CVV2) for the payment card.

" }, - "DiscoverDynamicCardVerificationCode":{ - "shape":"DiscoverDynamicCardVerificationCode", - "documentation":"

Card data parameters that are required to verify CDynamic Card Verification Code (dCVC) for the payment card.

" + "CardHolderVerificationValue":{ + "shape":"CardHolderVerificationValue", + "documentation":"

Card data parameters that are required to verify a cardholder verification value for the payment card.

" }, "DynamicCardVerificationCode":{ "shape":"DynamicCardVerificationCode", @@ -353,6 +378,10 @@ "DynamicCardVerificationValue":{ "shape":"DynamicCardVerificationValue", "documentation":"

Card data parameters that are required to verify CDynamic Card Verification Value (dCVV) for the payment card.

" + }, + "DiscoverDynamicCardVerificationCode":{ + "shape":"DiscoverDynamicCardVerificationCode", + "documentation":"

Card data parameters that are required to verify CDynamic Card Verification Code (dCVC) for the payment card.

" } }, "documentation":"

Card data parameters that are requried to verify Card Verification Values (CVV/CVV2), Dynamic Card Verification Values (dCVV/dCVV2), or Card Security Codes (CSC).

", @@ -366,11 +395,11 @@ ], "members":{ "CardExpiryDate":{ - "shape":"NumberLengthEquals4", + "shape":"CardExpiryDateType", "documentation":"

The expiry date of a payment card.

" }, "ServiceCode":{ - "shape":"NumberLengthEquals3", + "shape":"ServiceCodeType", "documentation":"

The service code of the payment card. This is different from Card Security Code (CSC).

" } }, @@ -381,12 +410,19 @@ "required":["CardExpiryDate"], "members":{ "CardExpiryDate":{ - "shape":"NumberLengthEquals4", + "shape":"CardExpiryDateType", "documentation":"

The expiry date of a payment card.

" } }, "documentation":"

Card data parameters that are required to verify Card Verification Value (CVV2) for the payment card.

" }, + "CipherTextType":{ + "type":"string", + "max":4096, + "min":16, + "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+", + "sensitive":true + }, "CryptogramAuthResponse":{ "type":"structure", "members":{ @@ -422,33 +458,44 @@ "documentation":"

The data indicating whether the issuer approves or declines an online transaction using an EMV chip card.

" }, "ProprietaryAuthenticationData":{ - "shape":"HexLengthBetween1And16", + "shape":"ProprietaryAuthenticationDataType", "documentation":"

The proprietary authentication data used by issuer for communication during online transaction using an EMV chip card.

" } }, "documentation":"

Parameters that are required for ARPC response generation using method2 after ARQC verification is successful.

" }, + "DecimalizationTableType":{ + "type":"string", + "max":16, + "min":16, + "pattern":"[0-9]+", + "sensitive":true + }, "DecryptDataInput":{ "type":"structure", "required":[ + "KeyIdentifier", "CipherText", - "DecryptionAttributes", - "KeyIdentifier" + "DecryptionAttributes" ], "members":{ + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for ciphertext decryption.

When a WrappedKeyBlock is provided, this value will be the identifier to the key wrapping key. Otherwise, it is the key identifier used to perform the operation.

", + "location":"uri", + "locationName":"KeyIdentifier" + }, "CipherText":{ - "shape":"HexEvenLengthBetween16And4096", + "shape":"CipherTextType", "documentation":"

The ciphertext to decrypt.

" }, "DecryptionAttributes":{ "shape":"EncryptionDecryptionAttributes", "documentation":"

The encryption key type and attributes for ciphertext decryption.

" }, - "KeyIdentifier":{ - "shape":"KeyArnOrKeyAliasType", - "documentation":"

The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for ciphertext decryption.

", - "location":"uri", - "locationName":"KeyIdentifier" + "WrappedKey":{ + "shape":"WrappedKey", + "documentation":"

The WrappedKeyBlock containing the encryption key for ciphertext decryption.

" } } }, @@ -469,7 +516,7 @@ "documentation":"

The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed.

Amazon Web Services Payment Cryptography computes the KCV according to the CMAC specification.

" }, "PlainText":{ - "shape":"HexEvenLengthBetween16And4096", + "shape":"PlainTextOutputType", "documentation":"

The decrypted plaintext data in hexBinary format.

" } } @@ -477,22 +524,22 @@ "DiscoverDynamicCardVerificationCode":{ "type":"structure", "required":[ - "ApplicationTransactionCounter", "CardExpiryDate", - "UnpredictableNumber" + "UnpredictableNumber", + "ApplicationTransactionCounter" ], "members":{ - "ApplicationTransactionCounter":{ - "shape":"HexLengthBetween2And4", - "documentation":"

The transaction counter value that comes from the terminal.

" - }, "CardExpiryDate":{ - "shape":"NumberLengthEquals4", + "shape":"CardExpiryDateType", "documentation":"

The expiry date of a payment card.

" }, "UnpredictableNumber":{ "shape":"HexLengthBetween2And8", "documentation":"

A random number that is generated by the issuer.

" + }, + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

The transaction counter value that comes from the terminal.

" } }, "documentation":"

Parameters that are required to generate or verify dCVC (Dynamic Card Verification Code).

" @@ -500,17 +547,17 @@ "DukptAttributes":{ "type":"structure", "required":[ - "DukptDerivationType", - "KeySerialNumber" + "KeySerialNumber", + "DukptDerivationType" ], "members":{ - "DukptDerivationType":{ - "shape":"DukptDerivationType", - "documentation":"

The key type derived using DUKPT from a Base Derivation Key (BDK) and Key Serial Number (KSN). This must be less than or equal to the strength of the BDK. For example, you can't use AES_128 as a derivation type for a BDK of AES_128 or TDES_2KEY.

" - }, "KeySerialNumber":{ "shape":"HexLengthBetween10And24", "documentation":"

The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

" + }, + "DukptDerivationType":{ + "shape":"DukptDerivationType", + "documentation":"

The key type derived using DUKPT from a Base Derivation Key (BDK) and Key Serial Number (KSN). This must be less than or equal to the strength of the BDK. For example, you can't use AES_128 as a derivation type for a BDK of AES_128 or TDES_2KEY.

" } }, "documentation":"

Parameters that are used for Derived Unique Key Per Transaction (DUKPT) derivation algorithm.

" @@ -519,6 +566,10 @@ "type":"structure", "required":["KeySerialNumber"], "members":{ + "KeySerialNumber":{ + "shape":"HexLengthBetween10And24", + "documentation":"

The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

" + }, "DukptKeyDerivationType":{ "shape":"DukptDerivationType", "documentation":"

The key type derived using DUKPT from a Base Derivation Key (BDK) and Key Serial Number (KSN). This must be less than or equal to the strength of the BDK. For example, you can't use AES_128 as a derivation type for a BDK of AES_128 or TDES_2KEY

" @@ -526,10 +577,6 @@ "DukptKeyVariant":{ "shape":"DukptKeyVariant", "documentation":"

The type of use of DUKPT, which can be for incoming data decryption, outgoing data encryption, or both.

" - }, - "KeySerialNumber":{ - "shape":"HexLengthBetween10And24", - "documentation":"

The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

" } }, "documentation":"

Parameters required for encryption or decryption of data using DUKPT.

" @@ -548,6 +595,14 @@ "type":"structure", "required":["KeySerialNumber"], "members":{ + "KeySerialNumber":{ + "shape":"HexLengthBetween10And24", + "documentation":"

The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

" + }, + "Mode":{ + "shape":"DukptEncryptionMode", + "documentation":"

The block cipher method to use for encryption.

The default is CBC.

" + }, "DukptKeyDerivationType":{ "shape":"DukptDerivationType", "documentation":"

The key type encrypted using DUKPT from a Base Derivation Key (BDK) and Key Serial Number (KSN). This must be less than or equal to the strength of the BDK. For example, you can't use AES_128 as a derivation type for a BDK of AES_128 or TDES_2KEY

" @@ -557,16 +612,8 @@ "documentation":"

The type of use of DUKPT, which can be incoming data decryption, outgoing data encryption, or both.

" }, "InitializationVector":{ - "shape":"HexLength16Or32", + "shape":"InitializationVectorType", "documentation":"

An input used to provide the intial state. If no value is provided, Amazon Web Services Payment Cryptography defaults it to zero.

" - }, - "KeySerialNumber":{ - "shape":"HexLengthBetween10And24", - "documentation":"

The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

" - }, - "Mode":{ - "shape":"DukptEncryptionMode", - "documentation":"

The block cipher method to use for encryption.

The default is CBC.

" } }, "documentation":"

Parameters that are required to encrypt plaintext data using DUKPT.

" @@ -589,27 +636,27 @@ "DynamicCardVerificationCode":{ "type":"structure", "required":[ - "ApplicationTransactionCounter", + "UnpredictableNumber", "PanSequenceNumber", - "TrackData", - "UnpredictableNumber" + "ApplicationTransactionCounter", + "TrackData" ], "members":{ - "ApplicationTransactionCounter":{ - "shape":"HexLengthBetween2And4", - "documentation":"

The transaction counter value that comes from the terminal.

" + "UnpredictableNumber":{ + "shape":"HexLengthBetween2And8", + "documentation":"

A random number generated by the issuer.

" }, "PanSequenceNumber":{ - "shape":"HexLengthEquals2", + "shape":"NumberLengthEquals2", "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" }, + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

The transaction counter value that comes from the terminal.

" + }, "TrackData":{ - "shape":"HexLengthBetween2And160", + "shape":"TrackDataType", "documentation":"

The data on the two tracks of magnetic cards used for financial transactions. This includes the cardholder name, PAN, expiration date, bank ID (BIN) and several other numbers the issuing bank uses to validate the data received.

" - }, - "UnpredictableNumber":{ - "shape":"HexLengthBetween2And8", - "documentation":"

A random number generated by the issuer.

" } }, "documentation":"

Parameters that are required to generate or verify Dynamic Card Verification Value (dCVV).

" @@ -617,27 +664,27 @@ "DynamicCardVerificationValue":{ "type":"structure", "required":[ - "ApplicationTransactionCounter", - "CardExpiryDate", "PanSequenceNumber", - "ServiceCode" + "CardExpiryDate", + "ServiceCode", + "ApplicationTransactionCounter" ], "members":{ - "ApplicationTransactionCounter":{ - "shape":"HexLengthBetween2And4", - "documentation":"

The transaction counter value that comes from the terminal.

" + "PanSequenceNumber":{ + "shape":"NumberLengthEquals2", + "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" }, "CardExpiryDate":{ - "shape":"NumberLengthEquals4", + "shape":"CardExpiryDateType", "documentation":"

The expiry date of a payment card.

" }, - "PanSequenceNumber":{ - "shape":"HexLengthEquals2", - "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" - }, "ServiceCode":{ - "shape":"NumberLengthEquals3", + "shape":"ServiceCodeType", "documentation":"

The service code of the payment card. This is different from Card Security Code (CSC).

" + }, + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

The transaction counter value that comes from the terminal.

" } }, "documentation":"

Parameters that are required to generate or verify Dynamic Card Verification Value (dCVV).

" @@ -646,34 +693,34 @@ "type":"structure", "required":[ "MajorKeyDerivationMode", - "PanSequenceNumber", "PrimaryAccountNumber", + "PanSequenceNumber", "SessionDerivationData" ], "members":{ - "InitializationVector":{ - "shape":"HexLength16Or32", - "documentation":"

An input used to provide the intial state. If no value is provided, Amazon Web Services Payment Cryptography defaults it to zero.

" - }, "MajorKeyDerivationMode":{ "shape":"EmvMajorKeyDerivationMode", "documentation":"

The EMV derivation mode to use for ICC master key derivation as per EMV version 4.3 book 2.

" }, - "Mode":{ - "shape":"EmvEncryptionMode", - "documentation":"

The block cipher method to use for encryption.

" + "PrimaryAccountNumber":{ + "shape":"PrimaryAccountNumberType", + "documentation":"

The Primary Account Number (PAN), a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" }, "PanSequenceNumber":{ - "shape":"HexLengthEquals2", + "shape":"NumberLengthEquals2", "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" }, - "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", - "documentation":"

The Primary Account Number (PAN), a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" - }, "SessionDerivationData":{ - "shape":"HexLengthEquals16", + "shape":"SessionDerivationDataType", "documentation":"

The derivation value used to derive the ICC session key. It is typically the application transaction counter value padded with zeros or previous ARQC value padded with zeros as per EMV version 4.3 book 2.

" + }, + "Mode":{ + "shape":"EmvEncryptionMode", + "documentation":"

The block cipher method to use for encryption.

" + }, + "InitializationVector":{ + "shape":"InitializationVectorType", + "documentation":"

An input used to provide the intial state. If no value is provided, Amazon Web Services Payment Cryptography defaults it to zero.

" } }, "documentation":"

Parameters for plaintext encryption using EMV keys.

" @@ -695,38 +742,38 @@ "EncryptDataInput":{ "type":"structure", "required":[ - "EncryptionAttributes", "KeyIdentifier", - "PlainText" + "PlainText", + "EncryptionAttributes" ], "members":{ - "EncryptionAttributes":{ - "shape":"EncryptionDecryptionAttributes", - "documentation":"

The encryption key type and attributes for plaintext encryption.

" - }, "KeyIdentifier":{ "shape":"KeyArnOrKeyAliasType", - "documentation":"

The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for plaintext encryption.

", + "documentation":"

The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for plaintext encryption.

When a WrappedKeyBlock is provided, this value will be the identifier to the key wrapping key. Otherwise, it is the key identifier used to perform the operation.

", "location":"uri", "locationName":"KeyIdentifier" }, "PlainText":{ - "shape":"HexEvenLengthBetween16And4064", + "shape":"PlainTextType", "documentation":"

The plaintext to be encrypted.

For encryption using asymmetric keys, plaintext data length is constrained by encryption key strength that you define in KeyAlgorithm and padding type that you define in AsymmetricEncryptionAttributes. For more information, see Encrypt data in the Amazon Web Services Payment Cryptography User Guide.

" + }, + "EncryptionAttributes":{ + "shape":"EncryptionDecryptionAttributes", + "documentation":"

The encryption key type and attributes for plaintext encryption.

" + }, + "WrappedKey":{ + "shape":"WrappedKey", + "documentation":"

The WrappedKeyBlock containing the encryption key for plaintext encryption.

" } } }, "EncryptDataOutput":{ "type":"structure", "required":[ - "CipherText", - "KeyArn" + "KeyArn", + "CipherText" ], "members":{ - "CipherText":{ - "shape":"HexEvenLengthBetween16And4096", - "documentation":"

The encrypted ciphertext.

" - }, "KeyArn":{ "shape":"KeyArn", "documentation":"

The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for plaintext encryption.

" @@ -734,21 +781,32 @@ "KeyCheckValue":{ "shape":"KeyCheckValue", "documentation":"

The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed.

Amazon Web Services Payment Cryptography computes the KCV according to the CMAC specification.

" + }, + "CipherText":{ + "shape":"CipherTextType", + "documentation":"

The encrypted ciphertext.

" } } }, + "EncryptedPinBlockType":{ + "type":"string", + "max":32, + "min":16, + "pattern":"[0-9a-fA-F]+", + "sensitive":true + }, "EncryptionDecryptionAttributes":{ "type":"structure", "members":{ + "Symmetric":{ + "shape":"SymmetricEncryptionAttributes", + "documentation":"

Parameters that are required to perform encryption and decryption using symmetric keys.

" + }, "Asymmetric":{"shape":"AsymmetricEncryptionAttributes"}, "Dukpt":{"shape":"DukptEncryptionAttributes"}, "Emv":{ "shape":"EmvEncryptionAttributes", "documentation":"

Parameters for plaintext encryption using EMV keys.

" - }, - "Symmetric":{ - "shape":"SymmetricEncryptionAttributes", - "documentation":"

Parameters that are required to perform encryption and decryption using symmetric keys.

" } }, "documentation":"

Parameters that are required to perform encryption and decryption operations.

", @@ -770,23 +828,23 @@ "GenerateCardValidationDataInput":{ "type":"structure", "required":[ - "GenerationAttributes", "KeyIdentifier", - "PrimaryAccountNumber" + "PrimaryAccountNumber", + "GenerationAttributes" ], "members":{ - "GenerationAttributes":{ - "shape":"CardGenerationAttributes", - "documentation":"

The algorithm for generating CVV or CSC values for the card within Amazon Web Services Payment Cryptography.

" - }, "KeyIdentifier":{ "shape":"KeyArnOrKeyAliasType", "documentation":"

The keyARN of the CVK encryption key that Amazon Web Services Payment Cryptography uses to generate card data.

" }, "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", + "shape":"PrimaryAccountNumberType", "documentation":"

The Primary Account Number (PAN), a unique identifier for a payment credit or debit card that associates the card with a specific account holder.

" }, + "GenerationAttributes":{ + "shape":"CardGenerationAttributes", + "documentation":"

The algorithm for generating CVV or CSC values for the card within Amazon Web Services Payment Cryptography.

" + }, "ValidationDataLength":{ "shape":"IntegerRangeBetween3And5Type", "documentation":"

The length of the CVV or CSC to be generated. The default value is 3.

" @@ -810,7 +868,7 @@ "documentation":"

The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed.

Amazon Web Services Payment Cryptography computes the KCV according to the CMAC specification.

" }, "ValidationData":{ - "shape":"NumberLengthBetween3And5", + "shape":"ValidationDataType", "documentation":"

The CVV or CSC value that Amazon Web Services Payment Cryptography generates for the card.

" } } @@ -818,26 +876,26 @@ "GenerateMacInput":{ "type":"structure", "required":[ - "GenerationAttributes", "KeyIdentifier", - "MessageData" + "MessageData", + "GenerationAttributes" ], "members":{ - "GenerationAttributes":{ - "shape":"MacAttributes", - "documentation":"

The attributes and data values to use for MAC generation within Amazon Web Services Payment Cryptography.

" - }, "KeyIdentifier":{ "shape":"KeyArnOrKeyAliasType", "documentation":"

The keyARN of the MAC generation encryption key.

" }, + "MessageData":{ + "shape":"MessageDataType", + "documentation":"

The data for which a MAC is under generation. This value must be hexBinary.

" + }, + "GenerationAttributes":{ + "shape":"MacAttributes", + "documentation":"

The attributes and data values to use for MAC generation within Amazon Web Services Payment Cryptography.

" + }, "MacLength":{ "shape":"IntegerRangeBetween4And16", "documentation":"

The length of a MAC under generation.

" - }, - "MessageData":{ - "shape":"HexEvenLengthBetween2And4096", - "documentation":"

The data for which a MAC is under generation. This value must be hexBinary.

" } } }, @@ -858,7 +916,7 @@ "documentation":"

The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed.

Amazon Web Services Payment Cryptography computes the KCV according to the CMAC specification.

" }, "Mac":{ - "shape":"HexLengthBetween4And128", + "shape":"MacOutputType", "documentation":"

The MAC cryptogram generated within Amazon Web Services Payment Cryptography.

" } } @@ -866,13 +924,17 @@ "GeneratePinDataInput":{ "type":"structure", "required":[ + "GenerationKeyIdentifier", "EncryptionKeyIdentifier", "GenerationAttributes", - "GenerationKeyIdentifier", - "PinBlockFormat", - "PrimaryAccountNumber" + "PrimaryAccountNumber", + "PinBlockFormat" ], "members":{ + "GenerationKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

The keyARN of the PEK that Amazon Web Services Payment Cryptography uses for pin data generation.

" + }, "EncryptionKeyIdentifier":{ "shape":"KeyArnOrKeyAliasType", "documentation":"

The keyARN of the PEK that Amazon Web Services Payment Cryptography uses to encrypt the PIN Block.

" @@ -881,55 +943,51 @@ "shape":"PinGenerationAttributes", "documentation":"

The attributes and values to use for PIN, PVV, or PIN Offset generation.

" }, - "GenerationKeyIdentifier":{ - "shape":"KeyArnOrKeyAliasType", - "documentation":"

The keyARN of the PEK that Amazon Web Services Payment Cryptography uses for pin data generation.

" - }, - "PinBlockFormat":{ - "shape":"PinBlockFormatForPinData", - "documentation":"

The PIN encoding format for pin data generation as specified in ISO 9564. Amazon Web Services Payment Cryptography supports ISO_Format_0 and ISO_Format_3.

The ISO_Format_0 PIN block format is equivalent to the ANSI X9.8, VISA-1, and ECI-1 PIN block formats. It is similar to a VISA-4 PIN block format. It supports a PIN from 4 to 12 digits in length.

The ISO_Format_3 PIN block format is the same as ISO_Format_0 except that the fill digits are random values from 10 to 15.

" - }, "PinDataLength":{ "shape":"IntegerRangeBetween4And12", "documentation":"

The length of PIN under generation.

" }, "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", + "shape":"PrimaryAccountNumberType", "documentation":"

The Primary Account Number (PAN), a unique identifier for a payment credit or debit card that associates the card with a specific account holder.

" + }, + "PinBlockFormat":{ + "shape":"PinBlockFormatForPinData", + "documentation":"

The PIN encoding format for pin data generation as specified in ISO 9564. Amazon Web Services Payment Cryptography supports ISO_Format_0 and ISO_Format_3.

The ISO_Format_0 PIN block format is equivalent to the ANSI X9.8, VISA-1, and ECI-1 PIN block formats. It is similar to a VISA-4 PIN block format. It supports a PIN from 4 to 12 digits in length.

The ISO_Format_3 PIN block format is the same as ISO_Format_0 except that the fill digits are random values from 10 to 15.

" } } }, "GeneratePinDataOutput":{ "type":"structure", "required":[ - "EncryptedPinBlock", - "EncryptionKeyArn", - "EncryptionKeyCheckValue", "GenerationKeyArn", "GenerationKeyCheckValue", + "EncryptionKeyArn", + "EncryptionKeyCheckValue", + "EncryptedPinBlock", "PinData" ], "members":{ - "EncryptedPinBlock":{ - "shape":"HexLengthBetween16And32", - "documentation":"

The PIN block encrypted under PEK from Amazon Web Services Payment Cryptography. The encrypted PIN block is a composite of PAN (Primary Account Number) and PIN (Personal Identification Number), generated in accordance with ISO 9564 standard.

" - }, - "EncryptionKeyArn":{ + "GenerationKeyArn":{ "shape":"KeyArn", - "documentation":"

The keyARN of the PEK that Amazon Web Services Payment Cryptography uses for encrypted pin block generation.

" + "documentation":"

The keyARN of the pin data generation key that Amazon Web Services Payment Cryptography uses for PIN, PVV or PIN Offset generation.

" }, - "EncryptionKeyCheckValue":{ + "GenerationKeyCheckValue":{ "shape":"KeyCheckValue", "documentation":"

The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed.

Amazon Web Services Payment Cryptography computes the KCV according to the CMAC specification.

" }, - "GenerationKeyArn":{ + "EncryptionKeyArn":{ "shape":"KeyArn", - "documentation":"

The keyARN of the pin data generation key that Amazon Web Services Payment Cryptography uses for PIN, PVV or PIN Offset generation.

" + "documentation":"

The keyARN of the PEK that Amazon Web Services Payment Cryptography uses for encrypted pin block generation.

" }, - "GenerationKeyCheckValue":{ + "EncryptionKeyCheckValue":{ "shape":"KeyCheckValue", "documentation":"

The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed.

Amazon Web Services Payment Cryptography computes the KCV according to the CMAC specification.

" }, + "EncryptedPinBlock":{ + "shape":"EncryptedPinBlockType", + "documentation":"

The PIN block encrypted under PEK from Amazon Web Services Payment Cryptography. The encrypted PIN block is a composite of PAN (Primary Account Number) and PIN (Personal Identification Number), generated in accordance with ISO 9564 standard.

" + }, "PinData":{ "shape":"PinData", "documentation":"

The attributes and values Amazon Web Services Payment Cryptography uses for pin data generation.

" @@ -940,141 +998,64 @@ "type":"string", "max":32, "min":16, - "pattern":"^(?:[0-9a-fA-F][0-9a-fA-F])+$", - "sensitive":true - }, - "HexEvenLengthBetween16And4064":{ - "type":"string", - "max":4064, - "min":16, - "pattern":"^(?:[0-9a-fA-F][0-9a-fA-F])+$", - "sensitive":true - }, - "HexEvenLengthBetween16And4096":{ - "type":"string", - "max":4096, - "min":16, - "pattern":"^(?:[0-9a-fA-F][0-9a-fA-F])+$", - "sensitive":true - }, - "HexEvenLengthBetween2And4096":{ - "type":"string", - "max":4096, - "min":2, - "pattern":"^(?:[0-9a-fA-F][0-9a-fA-F])+$", - "sensitive":true - }, - "HexEvenLengthBetween4And128":{ - "type":"string", - "max":128, - "min":4, - "pattern":"^(?:[0-9a-fA-F][0-9a-fA-F])+$", - "sensitive":true - }, - "HexLength16Or32":{ - "type":"string", - "max":32, - "min":16, - "pattern":"^(?:[0-9a-fA-F]{16}|[0-9a-fA-F]{32})$", + "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+", "sensitive":true }, "HexLengthBetween10And24":{ "type":"string", "max":24, "min":10, - "pattern":"^[0-9a-fA-F]+$" - }, - "HexLengthBetween16And32":{ - "type":"string", - "max":32, - "min":16, - "pattern":"^[0-9a-fA-F]+$" - }, - "HexLengthBetween1And16":{ - "type":"string", - "max":16, - "min":1, - "pattern":"^[0-9a-fA-F]+$" - }, - "HexLengthBetween2And1024":{ - "type":"string", - "max":1024, - "min":2, - "pattern":"^[0-9a-fA-F]+$" - }, - "HexLengthBetween2And160":{ - "type":"string", - "max":160, - "min":2, - "pattern":"^[0-9a-fA-F]+$" + "pattern":"[0-9a-fA-F]+" }, "HexLengthBetween2And4":{ "type":"string", "max":4, "min":2, - "pattern":"^[0-9a-fA-F]+$" + "pattern":"[0-9a-fA-F]+" }, "HexLengthBetween2And8":{ "type":"string", "max":8, "min":2, - "pattern":"^[0-9a-fA-F]+$" - }, - "HexLengthBetween4And128":{ - "type":"string", - "max":128, - "min":4, - "pattern":"^[0-9a-fA-F]+$" + "pattern":"[0-9a-fA-F]+" }, "HexLengthEquals1":{ "type":"string", "max":1, "min":1, - "pattern":"^[0-9A-F]+$" - }, - "HexLengthEquals16":{ - "type":"string", - "max":16, - "min":16, - "pattern":"^[0-9a-fA-F]+$" - }, - "HexLengthEquals2":{ - "type":"string", - "max":2, - "min":2, - "pattern":"^[0-9a-fA-F]+$" + "pattern":"[0-9A-F]+" }, "HexLengthEquals4":{ "type":"string", "max":4, "min":4, - "pattern":"^[0-9a-fA-F]+$" + "pattern":"[0-9a-fA-F]+" }, "HexLengthEquals8":{ "type":"string", "max":8, "min":8, - "pattern":"^[0-9a-fA-F]+$" + "pattern":"[0-9a-fA-F]+" }, "Ibm3624NaturalPin":{ "type":"structure", "required":[ "DecimalizationTable", - "PinValidationData", - "PinValidationDataPadCharacter" + "PinValidationDataPadCharacter", + "PinValidationData" ], "members":{ "DecimalizationTable":{ - "shape":"NumberLengthEquals16", + "shape":"DecimalizationTableType", "documentation":"

The decimalization table to use for IBM 3624 PIN algorithm. The table is used to convert the algorithm intermediate result from hexadecimal characters to decimal.

" }, - "PinValidationData":{ - "shape":"NumberLengthBetween4And16", - "documentation":"

The unique data for cardholder identification.

" - }, "PinValidationDataPadCharacter":{ "shape":"HexLengthEquals1", "documentation":"

The padding character for validation data.

" + }, + "PinValidationData":{ + "shape":"PinValidationDataType", + "documentation":"

The unique data for cardholder identification.

" } }, "documentation":"

Parameters that are required to generate or verify Ibm3624 natural PIN.

" @@ -1083,26 +1064,26 @@ "type":"structure", "required":[ "DecimalizationTable", - "PinOffset", + "PinValidationDataPadCharacter", "PinValidationData", - "PinValidationDataPadCharacter" + "PinOffset" ], "members":{ "DecimalizationTable":{ - "shape":"NumberLengthEquals16", + "shape":"DecimalizationTableType", "documentation":"

The decimalization table to use for IBM 3624 PIN algorithm. The table is used to convert the algorithm intermediate result from hexadecimal characters to decimal.

" }, - "PinOffset":{ - "shape":"NumberLengthBetween4And12", - "documentation":"

The PIN offset value.

" + "PinValidationDataPadCharacter":{ + "shape":"HexLengthEquals1", + "documentation":"

The padding character for validation data.

" }, "PinValidationData":{ - "shape":"NumberLengthBetween4And16", + "shape":"PinValidationDataType", "documentation":"

The unique data for cardholder identification.

" }, - "PinValidationDataPadCharacter":{ - "shape":"HexLengthEquals1", - "documentation":"

The padding character for validation data.

" + "PinOffset":{ + "shape":"PinOffsetType", + "documentation":"

The PIN offset value.

" } }, "documentation":"

Parameters that are required to generate or verify Ibm3624 PIN from offset PIN.

" @@ -1110,27 +1091,27 @@ "Ibm3624PinOffset":{ "type":"structure", "required":[ - "DecimalizationTable", "EncryptedPinBlock", - "PinValidationData", - "PinValidationDataPadCharacter" + "DecimalizationTable", + "PinValidationDataPadCharacter", + "PinValidationData" ], "members":{ - "DecimalizationTable":{ - "shape":"NumberLengthEquals16", - "documentation":"

The decimalization table to use for IBM 3624 PIN algorithm. The table is used to convert the algorithm intermediate result from hexadecimal characters to decimal.

" - }, "EncryptedPinBlock":{ - "shape":"HexLengthBetween16And32", + "shape":"EncryptedPinBlockType", "documentation":"

The encrypted PIN block data. According to ISO 9564 standard, a PIN Block is an encoded representation of a payment card Personal Account Number (PAN) and the cardholder Personal Identification Number (PIN).

" }, - "PinValidationData":{ - "shape":"NumberLengthBetween4And16", - "documentation":"

The unique data for cardholder identification.

" + "DecimalizationTable":{ + "shape":"DecimalizationTableType", + "documentation":"

The decimalization table to use for IBM 3624 PIN algorithm. The table is used to convert the algorithm intermediate result from hexadecimal characters to decimal.

" }, "PinValidationDataPadCharacter":{ "shape":"HexLengthEquals1", "documentation":"

The padding character for validation data.

" + }, + "PinValidationData":{ + "shape":"PinValidationDataType", + "documentation":"

The unique data for cardholder identification.

" } }, "documentation":"

Pparameters that are required to generate or verify Ibm3624 PIN offset PIN.

" @@ -1139,26 +1120,26 @@ "type":"structure", "required":[ "DecimalizationTable", - "PinOffset", + "PinValidationDataPadCharacter", "PinValidationData", - "PinValidationDataPadCharacter" + "PinOffset" ], "members":{ "DecimalizationTable":{ - "shape":"NumberLengthEquals16", + "shape":"DecimalizationTableType", "documentation":"

The decimalization table to use for IBM 3624 PIN algorithm. The table is used to convert the algorithm intermediate result from hexadecimal characters to decimal.

" }, - "PinOffset":{ - "shape":"NumberLengthBetween4And12", - "documentation":"

The PIN offset value.

" + "PinValidationDataPadCharacter":{ + "shape":"HexLengthEquals1", + "documentation":"

The padding character for validation data.

" }, "PinValidationData":{ - "shape":"NumberLengthBetween4And16", + "shape":"PinValidationDataType", "documentation":"

The unique data for cardholder identification.

" }, - "PinValidationDataPadCharacter":{ - "shape":"HexLengthEquals1", - "documentation":"

The padding character for validation data.

" + "PinOffset":{ + "shape":"PinOffsetType", + "documentation":"

The PIN offset value.

" } }, "documentation":"

Parameters that are required to generate or verify Ibm3624 PIN verification PIN.

" @@ -1167,29 +1148,36 @@ "type":"structure", "required":[ "DecimalizationTable", - "PinValidationData", - "PinValidationDataPadCharacter" + "PinValidationDataPadCharacter", + "PinValidationData" ], "members":{ "DecimalizationTable":{ - "shape":"NumberLengthEquals16", + "shape":"DecimalizationTableType", "documentation":"

The decimalization table to use for IBM 3624 PIN algorithm. The table is used to convert the algorithm intermediate result from hexadecimal characters to decimal.

" }, - "PinValidationData":{ - "shape":"NumberLengthBetween4And16", - "documentation":"

The unique data for cardholder identification.

" - }, "PinValidationDataPadCharacter":{ "shape":"HexLengthEquals1", "documentation":"

The padding character for validation data.

" + }, + "PinValidationData":{ + "shape":"PinValidationDataType", + "documentation":"

The unique data for cardholder identification.

" } }, "documentation":"

Parameters that are required to generate or verify Ibm3624 random PIN.

" }, - "IntegerRangeBetween0And9":{ + "InitializationVectorType":{ + "type":"string", + "max":32, + "min":16, + "pattern":"(?:[0-9a-fA-F]{16}|[0-9a-fA-F]{32})", + "sensitive":true + }, + "IntegerRangeBetween0And6":{ "type":"integer", "box":true, - "max":9, + "max":6, "min":0 }, "IntegerRangeBetween3And5Type":{ @@ -1224,19 +1212,26 @@ "type":"string", "max":150, "min":70, - "pattern":"^arn:aws:payment-cryptography:[a-z]{2}-[a-z]{1,16}-[0-9]+:[0-9]{12}:key/[0-9a-zA-Z]{16,64}$" + "pattern":"arn:aws:payment-cryptography:[a-z]{2}-[a-z]{1,16}-[0-9]+:[0-9]{12}:key/[0-9a-zA-Z]{16,64}" }, "KeyArnOrKeyAliasType":{ "type":"string", "max":322, "min":7, - "pattern":"^arn:aws:payment-cryptography:[a-z]{2}-[a-z]{1,16}-[0-9]+:[0-9]{12}:(key/[0-9a-zA-Z]{16,64}|alias/[a-zA-Z0-9/_-]+)$|^alias/[a-zA-Z0-9/_-]+$" + "pattern":"arn:aws:payment-cryptography:[a-z]{2}-[a-z]{1,16}-[0-9]+:[0-9]{12}:(key/[0-9a-zA-Z]{16,64}|alias/[a-zA-Z0-9/_-]+)$|^alias/[a-zA-Z0-9/_-]+" }, "KeyCheckValue":{ "type":"string", "max":16, "min":4, - "pattern":"^[0-9a-fA-F]+$" + "pattern":"[0-9a-fA-F]+" + }, + "KeyCheckValueAlgorithm":{ + "type":"string", + "enum":[ + "CMAC", + "ANSI_X9_24" + ] }, "MacAlgorithm":{ "type":"string", @@ -1253,21 +1248,21 @@ "MacAlgorithmDukpt":{ "type":"structure", "required":[ - "DukptKeyVariant", - "KeySerialNumber" + "KeySerialNumber", + "DukptKeyVariant" ], "members":{ - "DukptDerivationType":{ - "shape":"DukptDerivationType", - "documentation":"

The key type derived using DUKPT from a Base Derivation Key (BDK) and Key Serial Number (KSN). This must be less than or equal to the strength of the BDK. For example, you can't use AES_128 as a derivation type for a BDK of AES_128 or TDES_2KEY.

" + "KeySerialNumber":{ + "shape":"HexLengthBetween10And24", + "documentation":"

The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

" }, "DukptKeyVariant":{ "shape":"DukptKeyVariant", "documentation":"

The type of use of DUKPT, which can be MAC generation, MAC verification, or both.

" }, - "KeySerialNumber":{ - "shape":"HexLengthBetween10And24", - "documentation":"

The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

" + "DukptDerivationType":{ + "shape":"DukptDerivationType", + "documentation":"

The key type derived using DUKPT from a Base Derivation Key (BDK) and Key Serial Number (KSN). This must be less than or equal to the strength of the BDK. For example, you can't use AES_128 as a derivation type for a BDK of AES_128 or TDES_2KEY.

" } }, "documentation":"

Parameters required for DUKPT MAC generation and verification.

" @@ -1276,8 +1271,8 @@ "type":"structure", "required":[ "MajorKeyDerivationMode", - "PanSequenceNumber", "PrimaryAccountNumber", + "PanSequenceNumber", "SessionKeyDerivationMode", "SessionKeyDerivationValue" ], @@ -1286,14 +1281,14 @@ "shape":"MajorKeyDerivationMode", "documentation":"

The method to use when deriving the master key for EMV MAC generation or verification.

" }, - "PanSequenceNumber":{ - "shape":"HexLengthEquals2", - "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" - }, "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", + "shape":"PrimaryAccountNumberType", "documentation":"

The Primary Account Number (PAN), a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" }, + "PanSequenceNumber":{ + "shape":"NumberLengthEquals2", + "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" + }, "SessionKeyDerivationMode":{ "shape":"SessionKeyDerivationMode", "documentation":"

The method of deriving a session key for EMV MAC generation or verification.

" @@ -1312,9 +1307,9 @@ "shape":"MacAlgorithm", "documentation":"

The encryption algorithm for MAC generation or verification.

" }, - "DukptCmac":{ - "shape":"MacAlgorithmDukpt", - "documentation":"

Parameters that are required for MAC generation or verification using DUKPT CMAC algorithm.

" + "EmvMac":{ + "shape":"MacAlgorithmEmv", + "documentation":"

Parameters that are required for MAC generation or verification using EMV MAC algorithm.

" }, "DukptIso9797Algorithm1":{ "shape":"MacAlgorithmDukpt", @@ -1324,63 +1319,47 @@ "shape":"MacAlgorithmDukpt", "documentation":"

Parameters that are required for MAC generation or verification using DUKPT ISO 9797 algorithm3.

" }, - "EmvMac":{ - "shape":"MacAlgorithmEmv", - "documentation":"

Parameters that are required for MAC generation or verification using EMV MAC algorithm.

" + "DukptCmac":{ + "shape":"MacAlgorithmDukpt", + "documentation":"

Parameters that are required for MAC generation or verification using DUKPT CMAC algorithm.

" } }, "documentation":"

Parameters that are required for DUKPT, HMAC, or EMV MAC generation or verification.

", "union":true }, - "MajorKeyDerivationMode":{ - "type":"string", - "enum":[ - "EMV_OPTION_A", - "EMV_OPTION_B" - ] - }, - "NumberLengthBetween12And19":{ + "MacOutputType":{ "type":"string", - "max":19, - "min":12, - "pattern":"^[0-9]+$", - "sensitive":true - }, - "NumberLengthBetween3And5":{ - "type":"string", - "max":5, - "min":3, - "pattern":"^[0-9]+$" - }, - "NumberLengthBetween4And12":{ - "type":"string", - "max":12, + "max":128, "min":4, - "pattern":"^[0-9]+$" + "pattern":"[0-9a-fA-F]+", + "sensitive":true }, - "NumberLengthBetween4And16":{ + "MacType":{ "type":"string", - "max":16, + "max":128, "min":4, - "pattern":"^[0-9]+$" + "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+", + "sensitive":true }, - "NumberLengthEquals16":{ + "MajorKeyDerivationMode":{ "type":"string", - "max":16, - "min":16, - "pattern":"^[0-9]+$" + "enum":[ + "EMV_OPTION_A", + "EMV_OPTION_B" + ] }, - "NumberLengthEquals3":{ + "MessageDataType":{ "type":"string", - "max":3, - "min":3, - "pattern":"^[0-9]+$" + "max":4096, + "min":2, + "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+", + "sensitive":true }, - "NumberLengthEquals4":{ + "NumberLengthEquals2":{ "type":"string", - "max":4, - "min":4, - "pattern":"^[0-9]+$" + "max":2, + "min":2, + "pattern":"[0-9]+" }, "PaddingType":{ "type":"string", @@ -1402,11 +1381,11 @@ "type":"structure", "members":{ "PinOffset":{ - "shape":"NumberLengthBetween4And12", + "shape":"PinOffsetType", "documentation":"

The PIN offset value.

" }, "VerificationValue":{ - "shape":"NumberLengthBetween4And12", + "shape":"VerificationValueType", "documentation":"

The unique data to identify a cardholder. In most cases, this is the same as cardholder's Primary Account Number (PAN). If a value is not provided, it defaults to PAN.

" } }, @@ -1416,95 +1395,141 @@ "PinGenerationAttributes":{ "type":"structure", "members":{ - "Ibm3624NaturalPin":{ - "shape":"Ibm3624NaturalPin", - "documentation":"

Parameters that are required to generate or verify Ibm3624 natural PIN.

" + "VisaPin":{ + "shape":"VisaPin", + "documentation":"

Parameters that are required to generate or verify Visa PIN.

" }, - "Ibm3624PinFromOffset":{ - "shape":"Ibm3624PinFromOffset", - "documentation":"

Parameters that are required to generate or verify Ibm3624 PIN from offset PIN.

" + "VisaPinVerificationValue":{ + "shape":"VisaPinVerificationValue", + "documentation":"

Parameters that are required to generate or verify Visa PIN Verification Value (PVV).

" }, "Ibm3624PinOffset":{ "shape":"Ibm3624PinOffset", "documentation":"

Parameters that are required to generate or verify Ibm3624 PIN offset PIN.

" }, + "Ibm3624NaturalPin":{ + "shape":"Ibm3624NaturalPin", + "documentation":"

Parameters that are required to generate or verify Ibm3624 natural PIN.

" + }, "Ibm3624RandomPin":{ "shape":"Ibm3624RandomPin", "documentation":"

Parameters that are required to generate or verify Ibm3624 random PIN.

" }, - "VisaPin":{ - "shape":"VisaPin", - "documentation":"

Parameters that are required to generate or verify Visa PIN.

" - }, - "VisaPinVerificationValue":{ - "shape":"VisaPinVerificationValue", - "documentation":"

Parameters that are required to generate or verify Visa PIN Verification Value (PVV).

" + "Ibm3624PinFromOffset":{ + "shape":"Ibm3624PinFromOffset", + "documentation":"

Parameters that are required to generate or verify Ibm3624 PIN from offset PIN.

" } }, "documentation":"

Parameters that are required for PIN data generation.

", "union":true }, + "PinOffsetType":{ + "type":"string", + "max":12, + "min":4, + "pattern":"[0-9]+", + "sensitive":true + }, + "PinValidationDataType":{ + "type":"string", + "max":16, + "min":4, + "pattern":"[0-9]+", + "sensitive":true + }, "PinVerificationAttributes":{ "type":"structure", "members":{ - "Ibm3624Pin":{ - "shape":"Ibm3624PinVerification", - "documentation":"

Parameters that are required to generate or verify Ibm3624 PIN.

" - }, "VisaPin":{ "shape":"VisaPinVerification", "documentation":"

Parameters that are required to generate or verify Visa PIN.

" + }, + "Ibm3624Pin":{ + "shape":"Ibm3624PinVerification", + "documentation":"

Parameters that are required to generate or verify Ibm3624 PIN.

" } }, "documentation":"

Parameters that are required for PIN data verification.

", "union":true }, + "PlainTextOutputType":{ + "type":"string", + "max":4096, + "min":16, + "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+", + "sensitive":true + }, + "PlainTextType":{ + "type":"string", + "max":4064, + "min":16, + "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+", + "sensitive":true + }, + "PrimaryAccountNumberType":{ + "type":"string", + "max":19, + "min":12, + "pattern":"[0-9]+", + "sensitive":true + }, + "ProprietaryAuthenticationDataType":{ + "type":"string", + "max":16, + "min":1, + "pattern":"[0-9a-fA-F]+", + "sensitive":true + }, "ReEncryptDataInput":{ "type":"structure", "required":[ + "IncomingKeyIdentifier", + "OutgoingKeyIdentifier", "CipherText", "IncomingEncryptionAttributes", - "IncomingKeyIdentifier", - "OutgoingEncryptionAttributes", - "OutgoingKeyIdentifier" + "OutgoingEncryptionAttributes" ], "members":{ + "IncomingKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

The keyARN of the encryption key of incoming ciphertext data.

When a WrappedKeyBlock is provided, this value will be the identifier to the key wrapping key. Otherwise, it is the key identifier used to perform the operation.

", + "location":"uri", + "locationName":"IncomingKeyIdentifier" + }, + "OutgoingKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

The keyARN of the encryption key of outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography.

" + }, "CipherText":{ - "shape":"HexEvenLengthBetween16And4096", + "shape":"CipherTextType", "documentation":"

Ciphertext to be encrypted. The minimum allowed length is 16 bytes and maximum allowed length is 4096 bytes.

" }, "IncomingEncryptionAttributes":{ "shape":"ReEncryptionAttributes", "documentation":"

The attributes and values for incoming ciphertext.

" }, - "IncomingKeyIdentifier":{ - "shape":"KeyArnOrKeyAliasType", - "documentation":"

The keyARN of the encryption key of incoming ciphertext data.

", - "location":"uri", - "locationName":"IncomingKeyIdentifier" - }, "OutgoingEncryptionAttributes":{ "shape":"ReEncryptionAttributes", "documentation":"

The attributes and values for outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography.

" }, - "OutgoingKeyIdentifier":{ - "shape":"KeyArnOrKeyAliasType", - "documentation":"

The keyARN of the encryption key of outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography.

" + "IncomingWrappedKey":{ + "shape":"WrappedKey", + "documentation":"

The WrappedKeyBlock containing the encryption key of incoming ciphertext data.

" + }, + "OutgoingWrappedKey":{ + "shape":"WrappedKey", + "documentation":"

The WrappedKeyBlock containing the encryption key of outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography.

" } } }, "ReEncryptDataOutput":{ "type":"structure", "required":[ - "CipherText", "KeyArn", - "KeyCheckValue" + "KeyCheckValue", + "CipherText" ], "members":{ - "CipherText":{ - "shape":"HexEvenLengthBetween16And4096", - "documentation":"

The encrypted ciphertext.

" - }, "KeyArn":{ "shape":"KeyArn", "documentation":"

The keyARN (Amazon Resource Name) of the encryption key that Amazon Web Services Payment Cryptography uses for plaintext encryption.

" @@ -1512,17 +1537,21 @@ "KeyCheckValue":{ "shape":"KeyCheckValue", "documentation":"

The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed.

Amazon Web Services Payment Cryptography computes the KCV according to the CMAC specification.

" + }, + "CipherText":{ + "shape":"CipherTextType", + "documentation":"

The encrypted ciphertext.

" } } }, "ReEncryptionAttributes":{ "type":"structure", "members":{ - "Dukpt":{"shape":"DukptEncryptionAttributes"}, "Symmetric":{ "shape":"SymmetricEncryptionAttributes", "documentation":"

Parameters that are required to encrypt data using symmetric keys.

" - } + }, + "Dukpt":{"shape":"DukptEncryptionAttributes"} }, "documentation":"

Parameters that are required to perform reencryption operation.

", "union":true @@ -1542,20 +1571,34 @@ }, "exception":true }, + "ServiceCodeType":{ + "type":"string", + "max":3, + "min":3, + "pattern":"[0-9]+", + "sensitive":true + }, + "SessionDerivationDataType":{ + "type":"string", + "max":16, + "min":16, + "pattern":"[0-9a-fA-F]+", + "sensitive":true + }, "SessionKeyAmex":{ "type":"structure", "required":[ - "PanSequenceNumber", - "PrimaryAccountNumber" + "PrimaryAccountNumber", + "PanSequenceNumber" ], "members":{ - "PanSequenceNumber":{ - "shape":"HexLengthEquals2", - "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" - }, "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", + "shape":"PrimaryAccountNumberType", "documentation":"

The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" + }, + "PanSequenceNumber":{ + "shape":"NumberLengthEquals2", + "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" } }, "documentation":"

Parameters to derive session key for an Amex payment card.

" @@ -1563,14 +1606,6 @@ "SessionKeyDerivation":{ "type":"structure", "members":{ - "Amex":{ - "shape":"SessionKeyAmex", - "documentation":"

Parameters to derive session key for an Amex payment card for ARQC verification.

" - }, - "Emv2000":{ - "shape":"SessionKeyEmv2000", - "documentation":"

Parameters to derive session key for an Emv2000 payment card for ARQC verification.

" - }, "EmvCommon":{ "shape":"SessionKeyEmvCommon", "documentation":"

Parameters to derive session key for an Emv common payment card for ARQC verification.

" @@ -1579,6 +1614,14 @@ "shape":"SessionKeyMastercard", "documentation":"

Parameters to derive session key for a Mastercard payment card for ARQC verification.

" }, + "Emv2000":{ + "shape":"SessionKeyEmv2000", + "documentation":"

Parameters to derive session key for an Emv2000 payment card for ARQC verification.

" + }, + "Amex":{ + "shape":"SessionKeyAmex", + "documentation":"

Parameters to derive session key for an Amex payment card for ARQC verification.

" + }, "Visa":{ "shape":"SessionKeyVisa", "documentation":"

Parameters to derive session key for a Visa payment cardfor ARQC verification.

" @@ -1601,7 +1644,7 @@ "type":"structure", "members":{ "ApplicationCryptogram":{ - "shape":"HexLengthEquals16", + "shape":"ApplicationCryptogramType", "documentation":"

The cryptogram provided by the terminal during transaction processing.

" }, "ApplicationTransactionCounter":{ @@ -1615,22 +1658,22 @@ "SessionKeyEmv2000":{ "type":"structure", "required":[ - "ApplicationTransactionCounter", + "PrimaryAccountNumber", "PanSequenceNumber", - "PrimaryAccountNumber" + "ApplicationTransactionCounter" ], "members":{ - "ApplicationTransactionCounter":{ - "shape":"HexLengthBetween2And4", - "documentation":"

The transaction counter that is provided by the terminal during transaction processing.

" + "PrimaryAccountNumber":{ + "shape":"PrimaryAccountNumberType", + "documentation":"

The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" }, "PanSequenceNumber":{ - "shape":"HexLengthEquals2", + "shape":"NumberLengthEquals2", "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" }, - "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", - "documentation":"

The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

The transaction counter that is provided by the terminal during transaction processing.

" } }, "documentation":"

Parameters to derive session key for an Emv2000 payment card for ARQC verification.

" @@ -1638,22 +1681,22 @@ "SessionKeyEmvCommon":{ "type":"structure", "required":[ - "ApplicationTransactionCounter", + "PrimaryAccountNumber", "PanSequenceNumber", - "PrimaryAccountNumber" + "ApplicationTransactionCounter" ], "members":{ - "ApplicationTransactionCounter":{ - "shape":"HexLengthBetween2And4", - "documentation":"

The transaction counter that is provided by the terminal during transaction processing.

" + "PrimaryAccountNumber":{ + "shape":"PrimaryAccountNumberType", + "documentation":"

The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" }, "PanSequenceNumber":{ - "shape":"HexLengthEquals2", + "shape":"NumberLengthEquals2", "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" }, - "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", - "documentation":"

The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

The transaction counter that is provided by the terminal during transaction processing.

" } }, "documentation":"

Parameters to derive session key for an Emv common payment card for ARQC verification.

" @@ -1661,23 +1704,23 @@ "SessionKeyMastercard":{ "type":"structure", "required":[ - "ApplicationTransactionCounter", - "PanSequenceNumber", "PrimaryAccountNumber", + "PanSequenceNumber", + "ApplicationTransactionCounter", "UnpredictableNumber" ], "members":{ - "ApplicationTransactionCounter":{ - "shape":"HexLengthBetween2And4", - "documentation":"

The transaction counter that is provided by the terminal during transaction processing.

" + "PrimaryAccountNumber":{ + "shape":"PrimaryAccountNumberType", + "documentation":"

The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" }, "PanSequenceNumber":{ - "shape":"HexLengthEquals2", + "shape":"NumberLengthEquals2", "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" }, - "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", - "documentation":"

The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

The transaction counter that is provided by the terminal during transaction processing.

" }, "UnpredictableNumber":{ "shape":"HexLengthBetween2And8", @@ -1689,17 +1732,17 @@ "SessionKeyVisa":{ "type":"structure", "required":[ - "PanSequenceNumber", - "PrimaryAccountNumber" + "PrimaryAccountNumber", + "PanSequenceNumber" ], "members":{ - "PanSequenceNumber":{ - "shape":"HexLengthEquals2", - "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" - }, "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", + "shape":"PrimaryAccountNumberType", "documentation":"

The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" + }, + "PanSequenceNumber":{ + "shape":"NumberLengthEquals2", + "documentation":"

A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

" } }, "documentation":"

Parameters to derive session key for Visa payment card for ARQC verification.

" @@ -1709,14 +1752,14 @@ "type":"structure", "required":["Mode"], "members":{ - "InitializationVector":{ - "shape":"HexLength16Or32", - "documentation":"

An input used to provide the intial state. If no value is provided, Amazon Web Services Payment Cryptography defaults it to zero.

" - }, "Mode":{ "shape":"EncryptionMode", "documentation":"

The block cipher method to use for encryption.

" }, + "InitializationVector":{ + "shape":"InitializationVectorType", + "documentation":"

An input used to provide the intial state. If no value is provided, Amazon Web Services Payment Cryptography defaults it to zero.

" + }, "PaddingType":{ "shape":"PaddingType", "documentation":"

The padding to be included with the data.

" @@ -1736,16 +1779,53 @@ }, "exception":true }, + "Tr31WrappedKeyBlock":{ + "type":"string", + "max":9984, + "min":56, + "pattern":"[0-9A-Z]+", + "sensitive":true + }, + "TrackDataType":{ + "type":"string", + "max":160, + "min":2, + "pattern":"[0-9a-fA-F]+", + "sensitive":true + }, + "TransactionDataType":{ + "type":"string", + "max":1024, + "min":2, + "pattern":"[0-9a-fA-F]+", + "sensitive":true + }, "TranslatePinDataInput":{ "type":"structure", "required":[ - "EncryptedPinBlock", "IncomingKeyIdentifier", - "IncomingTranslationAttributes", "OutgoingKeyIdentifier", - "OutgoingTranslationAttributes" + "IncomingTranslationAttributes", + "OutgoingTranslationAttributes", + "EncryptedPinBlock" ], "members":{ + "IncomingKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

The keyARN of the encryption key under which incoming PIN block data is encrypted. This key type can be PEK or BDK.

When a WrappedKeyBlock is provided, this value will be the identifier to the key wrapping key for PIN block. Otherwise, it is the key identifier used to perform the operation.

" + }, + "OutgoingKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

The keyARN of the encryption key for encrypting outgoing PIN block data. This key type can be PEK or BDK.

" + }, + "IncomingTranslationAttributes":{ + "shape":"TranslationIsoFormats", + "documentation":"

The format of the incoming PIN block data for translation within Amazon Web Services Payment Cryptography.

" + }, + "OutgoingTranslationAttributes":{ + "shape":"TranslationIsoFormats", + "documentation":"

The format of the outgoing PIN block data after translation by Amazon Web Services Payment Cryptography.

" + }, "EncryptedPinBlock":{ "shape":"HexEvenLengthBetween16And32", "documentation":"

The encrypted PIN block data that Amazon Web Services Payment Cryptography translates.

" @@ -1754,36 +1834,32 @@ "shape":"DukptDerivationAttributes", "documentation":"

The attributes and values to use for incoming DUKPT encryption key for PIN block translation.

" }, - "IncomingKeyIdentifier":{ - "shape":"KeyArnOrKeyAliasType", - "documentation":"

The keyARN of the encryption key under which incoming PIN block data is encrypted. This key type can be PEK or BDK.

" - }, - "IncomingTranslationAttributes":{ - "shape":"TranslationIsoFormats", - "documentation":"

The format of the incoming PIN block data for translation within Amazon Web Services Payment Cryptography.

" - }, "OutgoingDukptAttributes":{ "shape":"DukptDerivationAttributes", "documentation":"

The attributes and values to use for outgoing DUKPT encryption key after PIN block translation.

" }, - "OutgoingKeyIdentifier":{ - "shape":"KeyArnOrKeyAliasType", - "documentation":"

The keyARN of the encryption key for encrypting outgoing PIN block data. This key type can be PEK or BDK.

" + "IncomingWrappedKey":{ + "shape":"WrappedKey", + "documentation":"

The WrappedKeyBlock containing the encryption key under which incoming PIN block data is encrypted.

" }, - "OutgoingTranslationAttributes":{ - "shape":"TranslationIsoFormats", - "documentation":"

The format of the outgoing PIN block data after translation by Amazon Web Services Payment Cryptography.

" + "OutgoingWrappedKey":{ + "shape":"WrappedKey", + "documentation":"

The WrappedKeyBlock containing the encryption key for encrypting outgoing PIN block data.

" } } }, "TranslatePinDataOutput":{ "type":"structure", "required":[ + "PinBlock", "KeyArn", - "KeyCheckValue", - "PinBlock" + "KeyCheckValue" ], "members":{ + "PinBlock":{ + "shape":"EncryptedPinBlockType", + "documentation":"

The outgoing encrypted PIN block data after translation.

" + }, "KeyArn":{ "shape":"KeyArn", "documentation":"

The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses to encrypt outgoing PIN block data after translation.

" @@ -1791,10 +1867,6 @@ "KeyCheckValue":{ "shape":"KeyCheckValue", "documentation":"

The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed.

Amazon Web Services Payment Cryptography computes the KCV according to the CMAC specification.

" - }, - "PinBlock":{ - "shape":"HexLengthBetween16And32", - "documentation":"

The outgoing encrypted PIN block data after translation.

" } } }, @@ -1826,7 +1898,7 @@ "required":["PrimaryAccountNumber"], "members":{ "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", + "shape":"PrimaryAccountNumberType", "documentation":"

The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" } }, @@ -1838,15 +1910,22 @@ }, "documentation":"

Parameters that are required for ISO9564 PIN format 1 tranlation.

" }, + "ValidationDataType":{ + "type":"string", + "max":5, + "min":3, + "pattern":"[0-9]+", + "sensitive":true + }, "ValidationException":{ "type":"structure", "required":["message"], "members":{ + "message":{"shape":"String"}, "fieldList":{ "shape":"ValidationExceptionFieldList", "documentation":"

The request was denied due to an invalid request error.

" - }, - "message":{"shape":"String"} + } }, "documentation":"

The request was denied due to an invalid request error.

", "exception":true @@ -1854,15 +1933,15 @@ "ValidationExceptionField":{ "type":"structure", "required":[ - "message", - "path" + "path", + "message" ], "members":{ - "message":{ + "path":{ "shape":"String", "documentation":"

The request was denied due to an invalid request error.

" }, - "path":{ + "message":{ "shape":"String", "documentation":"

The request was denied due to an invalid request error.

" } @@ -1876,15 +1955,15 @@ "VerificationFailedException":{ "type":"structure", "required":[ - "Message", - "Reason" + "Reason", + "Message" ], "members":{ - "Message":{"shape":"String"}, "Reason":{ "shape":"VerificationFailedReason", "documentation":"

The reason for the exception.

" - } + }, + "Message":{"shape":"String"} }, "documentation":"

This request failed verification.

", "error":{ @@ -1902,28 +1981,35 @@ "INVALID_AUTH_REQUEST_CRYPTOGRAM" ] }, + "VerificationValueType":{ + "type":"string", + "max":12, + "min":4, + "pattern":"[0-9]+", + "sensitive":true + }, "VerifyAuthRequestCryptogramInput":{ "type":"structure", "required":[ - "AuthRequestCryptogram", "KeyIdentifier", + "TransactionData", + "AuthRequestCryptogram", "MajorKeyDerivationMode", - "SessionKeyDerivationAttributes", - "TransactionData" + "SessionKeyDerivationAttributes" ], "members":{ - "AuthRequestCryptogram":{ - "shape":"HexLengthEquals16", - "documentation":"

The auth request cryptogram imported into Amazon Web Services Payment Cryptography for ARQC verification using a major encryption key and transaction data.

" - }, - "AuthResponseAttributes":{ - "shape":"CryptogramAuthResponse", - "documentation":"

The attributes and values for auth request cryptogram verification. These parameters are required in case using ARPC Method 1 or Method 2 for ARQC verification.

" - }, "KeyIdentifier":{ "shape":"KeyArnOrKeyAliasType", "documentation":"

The keyARN of the major encryption key that Amazon Web Services Payment Cryptography uses for ARQC verification.

" }, + "TransactionData":{ + "shape":"TransactionDataType", + "documentation":"

The transaction data that Amazon Web Services Payment Cryptography uses for ARQC verification. The same transaction is used for ARQC generation outside of Amazon Web Services Payment Cryptography.

" + }, + "AuthRequestCryptogram":{ + "shape":"AuthRequestCryptogramType", + "documentation":"

The auth request cryptogram imported into Amazon Web Services Payment Cryptography for ARQC verification using a major encryption key and transaction data.

" + }, "MajorKeyDerivationMode":{ "shape":"MajorKeyDerivationMode", "documentation":"

The method to use when deriving the major encryption key for ARQC verification within Amazon Web Services Payment Cryptography. The same key derivation mode was used for ARQC generation outside of Amazon Web Services Payment Cryptography.

" @@ -1932,9 +2018,9 @@ "shape":"SessionKeyDerivation", "documentation":"

The attributes and values to use for deriving a session key for ARQC verification within Amazon Web Services Payment Cryptography. The same attributes were used for ARQC generation outside of Amazon Web Services Payment Cryptography.

" }, - "TransactionData":{ - "shape":"HexLengthBetween2And1024", - "documentation":"

The transaction data that Amazon Web Services Payment Cryptography uses for ARQC verification. The same transaction is used for ARQC generation outside of Amazon Web Services Payment Cryptography.

" + "AuthResponseAttributes":{ + "shape":"CryptogramAuthResponse", + "documentation":"

The attributes and values for auth request cryptogram verification. These parameters are required in case using ARPC Method 1 or Method 2 for ARQC verification.

" } } }, @@ -1945,10 +2031,6 @@ "KeyCheckValue" ], "members":{ - "AuthResponseValue":{ - "shape":"HexLengthBetween1And16", - "documentation":"

The result for ARQC verification or ARPC generation within Amazon Web Services Payment Cryptography.

" - }, "KeyArn":{ "shape":"KeyArn", "documentation":"

The keyARN of the major encryption key that Amazon Web Services Payment Cryptography uses for ARQC verification.

" @@ -1956,6 +2038,10 @@ "KeyCheckValue":{ "shape":"KeyCheckValue", "documentation":"

The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed.

Amazon Web Services Payment Cryptography computes the KCV according to the CMAC specification.

" + }, + "AuthResponseValue":{ + "shape":"AuthResponseValueType", + "documentation":"

The result for ARQC verification or ARPC generation within Amazon Web Services Payment Cryptography.

" } } }, @@ -1964,8 +2050,8 @@ "required":[ "KeyIdentifier", "PrimaryAccountNumber", - "ValidationData", - "VerificationAttributes" + "VerificationAttributes", + "ValidationData" ], "members":{ "KeyIdentifier":{ @@ -1973,16 +2059,16 @@ "documentation":"

The keyARN of the CVK encryption key that Amazon Web Services Payment Cryptography uses to verify card data.

" }, "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", + "shape":"PrimaryAccountNumberType", "documentation":"

The Primary Account Number (PAN), a unique identifier for a payment credit or debit card that associates the card with a specific account holder.

" }, - "ValidationData":{ - "shape":"NumberLengthBetween3And5", - "documentation":"

The CVV or CSC value for use for card data verification within Amazon Web Services Payment Cryptography.

" - }, "VerificationAttributes":{ "shape":"CardVerificationAttributes", "documentation":"

The algorithm to use for verification of card data within Amazon Web Services Payment Cryptography.

" + }, + "ValidationData":{ + "shape":"ValidationDataType", + "documentation":"

The CVV or CSC value for use for card data verification within Amazon Web Services Payment Cryptography.

" } } }, @@ -2007,8 +2093,8 @@ "type":"structure", "required":[ "KeyIdentifier", - "Mac", "MessageData", + "Mac", "VerificationAttributes" ], "members":{ @@ -2016,21 +2102,21 @@ "shape":"KeyArnOrKeyAliasType", "documentation":"

The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses to verify MAC data.

" }, - "Mac":{ - "shape":"HexEvenLengthBetween4And128", - "documentation":"

The MAC being verified.

" - }, - "MacLength":{ - "shape":"IntegerRangeBetween4And16", - "documentation":"

The length of the MAC.

" - }, "MessageData":{ - "shape":"HexEvenLengthBetween2And4096", + "shape":"MessageDataType", "documentation":"

The data on for which MAC is under verification. This value must be hexBinary.

" }, + "Mac":{ + "shape":"MacType", + "documentation":"

The MAC being verified.

" + }, "VerificationAttributes":{ "shape":"MacAttributes", "documentation":"

The attributes and data values to use for MAC verification within Amazon Web Services Payment Cryptography.

" + }, + "MacLength":{ + "shape":"IntegerRangeBetween4And16", + "documentation":"

The length of the MAC.

" } } }, @@ -2054,26 +2140,34 @@ "VerifyPinDataInput":{ "type":"structure", "required":[ - "EncryptedPinBlock", + "VerificationKeyIdentifier", "EncryptionKeyIdentifier", - "PinBlockFormat", - "PrimaryAccountNumber", "VerificationAttributes", - "VerificationKeyIdentifier" + "EncryptedPinBlock", + "PrimaryAccountNumber", + "PinBlockFormat" ], "members":{ - "DukptAttributes":{ - "shape":"DukptAttributes", - "documentation":"

The attributes and values for the DUKPT encrypted PIN block data.

" - }, - "EncryptedPinBlock":{ - "shape":"HexLengthBetween16And32", - "documentation":"

The encrypted PIN block data that Amazon Web Services Payment Cryptography verifies.

" + "VerificationKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

The keyARN of the PIN verification key.

" }, "EncryptionKeyIdentifier":{ "shape":"KeyArnOrKeyAliasType", "documentation":"

The keyARN of the encryption key under which the PIN block data is encrypted. This key type can be PEK or BDK.

" }, + "VerificationAttributes":{ + "shape":"PinVerificationAttributes", + "documentation":"

The attributes and values for PIN data verification.

" + }, + "EncryptedPinBlock":{ + "shape":"EncryptedPinBlockType", + "documentation":"

The encrypted PIN block data that Amazon Web Services Payment Cryptography verifies.

" + }, + "PrimaryAccountNumber":{ + "shape":"PrimaryAccountNumberType", + "documentation":"

The Primary Account Number (PAN), a unique identifier for a payment credit or debit card that associates the card with a specific account holder.

" + }, "PinBlockFormat":{ "shape":"PinBlockFormatForPinData", "documentation":"

The PIN encoding format for pin data generation as specified in ISO 9564. Amazon Web Services Payment Cryptography supports ISO_Format_0 and ISO_Format_3.

The ISO_Format_0 PIN block format is equivalent to the ANSI X9.8, VISA-1, and ECI-1 PIN block formats. It is similar to a VISA-4 PIN block format. It supports a PIN from 4 to 12 digits in length.

The ISO_Format_3 PIN block format is the same as ISO_Format_0 except that the fill digits are random values from 10 to 15.

" @@ -2082,42 +2176,34 @@ "shape":"IntegerRangeBetween4And12", "documentation":"

The length of PIN being verified.

" }, - "PrimaryAccountNumber":{ - "shape":"NumberLengthBetween12And19", - "documentation":"

The Primary Account Number (PAN), a unique identifier for a payment credit or debit card that associates the card with a specific account holder.

" - }, - "VerificationAttributes":{ - "shape":"PinVerificationAttributes", - "documentation":"

The attributes and values for PIN data verification.

" - }, - "VerificationKeyIdentifier":{ - "shape":"KeyArnOrKeyAliasType", - "documentation":"

The keyARN of the PIN verification key.

" + "DukptAttributes":{ + "shape":"DukptAttributes", + "documentation":"

The attributes and values for the DUKPT encrypted PIN block data.

" } } }, "VerifyPinDataOutput":{ "type":"structure", "required":[ - "EncryptionKeyArn", - "EncryptionKeyCheckValue", "VerificationKeyArn", - "VerificationKeyCheckValue" + "VerificationKeyCheckValue", + "EncryptionKeyArn", + "EncryptionKeyCheckValue" ], "members":{ - "EncryptionKeyArn":{ + "VerificationKeyArn":{ "shape":"KeyArn", - "documentation":"

The keyARN of the PEK that Amazon Web Services Payment Cryptography uses for encrypted pin block generation.

" + "documentation":"

The keyARN of the PIN encryption key that Amazon Web Services Payment Cryptography uses for PIN or PIN Offset verification.

" }, - "EncryptionKeyCheckValue":{ + "VerificationKeyCheckValue":{ "shape":"KeyCheckValue", "documentation":"

The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed.

Amazon Web Services Payment Cryptography computes the KCV according to the CMAC specification.

" }, - "VerificationKeyArn":{ + "EncryptionKeyArn":{ "shape":"KeyArn", - "documentation":"

The keyARN of the PIN encryption key that Amazon Web Services Payment Cryptography uses for PIN or PIN Offset verification.

" + "documentation":"

The keyARN of the PEK that Amazon Web Services Payment Cryptography uses for encrypted pin block generation.

" }, - "VerificationKeyCheckValue":{ + "EncryptionKeyCheckValue":{ "shape":"KeyCheckValue", "documentation":"

The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed.

Amazon Web Services Payment Cryptography computes the KCV according to the CMAC specification.

" } @@ -2128,7 +2214,7 @@ "required":["PinVerificationKeyIndex"], "members":{ "PinVerificationKeyIndex":{ - "shape":"IntegerRangeBetween0And9", + "shape":"IntegerRangeBetween0And6", "documentation":"

The value for PIN verification index. It is used in the Visa PIN algorithm to calculate the PVV (PIN Verification Value).

" } }, @@ -2142,11 +2228,11 @@ ], "members":{ "PinVerificationKeyIndex":{ - "shape":"IntegerRangeBetween0And9", + "shape":"IntegerRangeBetween0And6", "documentation":"

The value for PIN verification index. It is used in the Visa PIN algorithm to calculate the PVV (PIN Verification Value).

" }, "VerificationValue":{ - "shape":"NumberLengthBetween4And12", + "shape":"VerificationValueType", "documentation":"

Parameters that are required to generate or verify Visa PVV (PIN Verification Value).

" } }, @@ -2160,15 +2246,41 @@ ], "members":{ "EncryptedPinBlock":{ - "shape":"HexLengthBetween16And32", + "shape":"EncryptedPinBlockType", "documentation":"

The encrypted PIN block data to verify.

" }, "PinVerificationKeyIndex":{ - "shape":"IntegerRangeBetween0And9", + "shape":"IntegerRangeBetween0And6", "documentation":"

The value for PIN verification index. It is used in the Visa PIN algorithm to calculate the PVV (PIN Verification Value).

" } }, "documentation":"

Parameters that are required to generate or verify Visa PVV (PIN Verification Value).

" + }, + "WrappedKey":{ + "type":"structure", + "required":["WrappedKeyMaterial"], + "members":{ + "WrappedKeyMaterial":{ + "shape":"WrappedKeyMaterial", + "documentation":"

Parameter information of a WrappedKeyBlock for encryption key exchange.

" + }, + "KeyCheckValueAlgorithm":{ + "shape":"KeyCheckValueAlgorithm", + "documentation":"

The algorithm that Amazon Web Services Payment Cryptography uses to calculate the key check value (KCV). It is used to validate the key integrity.

For TDES keys, the KCV is computed by encrypting 8 bytes, each with value of zero, with the key to be checked and retaining the 3 highest order bytes of the encrypted result. For AES keys, the KCV is computed using a CMAC algorithm where the input data is 16 bytes of zero and retaining the 3 highest order bytes of the encrypted result.

" + } + }, + "documentation":"

Parameter information of a WrappedKeyBlock for encryption key exchange.

" + }, + "WrappedKeyMaterial":{ + "type":"structure", + "members":{ + "Tr31KeyBlock":{ + "shape":"Tr31WrappedKeyBlock", + "documentation":"

The TR-31 wrapped key block.

" + } + }, + "documentation":"

Parameter information of a WrappedKeyBlock for encryption key exchange.

", + "union":true } }, "documentation":"

You use the Amazon Web Services Payment Cryptography Data Plane to manage how encryption keys are used for payment-related transaction processing and associated cryptographic operations. You can encrypt, decrypt, generate, verify, and translate payment-related cryptographic operations in Amazon Web Services Payment Cryptography. For more information, see Data operations in the Amazon Web Services Payment Cryptography User Guide.

To manage your encryption keys, you use the Amazon Web Services Payment Cryptography Control Plane. You can create, import, export, share, manage, and delete keys. You can also manage Identity and Access Management (IAM) policies for keys.

" diff --git a/botocore/data/payment-cryptography-data/2022-02-03/waiters-2.json b/botocore/data/payment-cryptography-data/2022-02-03/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/payment-cryptography-data/2022-02-03/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/payment-cryptography/2021-09-14/service-2.json b/botocore/data/payment-cryptography/2021-09-14/service-2.json index dd3cd6abdc..fa667a29b9 100644 --- a/botocore/data/payment-cryptography/2021-09-14/service-2.json +++ b/botocore/data/payment-cryptography/2021-09-14/service-2.json @@ -2,9 +2,11 @@ "version":"2.0", "metadata":{ "apiVersion":"2021-09-14", + "auth":["aws.auth#sigv4"], "endpointPrefix":"controlplane.payment-cryptography", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceFullName":"Payment Cryptography Control Plane", "serviceId":"Payment Cryptography", "signatureVersion":"v4", @@ -561,6 +563,12 @@ } } }, + "EvenHexLengthBetween16And32":{ + "type":"string", + "max":32, + "min":16, + "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+" + }, "ExportAttributes":{ "type":"structure", "members":{ @@ -702,7 +710,7 @@ "documentation":"

The format of key block that Amazon Web Services Payment Cryptography will use during key export.

" }, "RandomNonce":{ - "shape":"HexLength16", + "shape":"EvenHexLengthBetween16And32", "documentation":"

A random number value that is unique to the TR-34 key block generated using 2 pass. The operation will fail, if a random nonce value is not provided for a TR-34 key block generated using 2 pass.

" }, "KeyBlockHeaders":{ @@ -877,12 +885,6 @@ } } }, - "HexLength16":{ - "type":"string", - "max":16, - "min":16, - "pattern":"[0-9A-F]+" - }, "HexLength20Or24":{ "type":"string", "max":24, @@ -1030,7 +1032,7 @@ "documentation":"

The key block format to use during key import. The only value allowed is X9_TR34_2012.

" }, "RandomNonce":{ - "shape":"HexLength16", + "shape":"EvenHexLengthBetween16And32", "documentation":"

A random number value that is unique to the TR-34 key block generated using 2 pass. The operation will fail, if a random nonce value is not provided for a TR-34 key block generated using 2 pass.

" } }, @@ -1674,7 +1676,8 @@ "type":"string", "max":9984, "min":56, - "pattern":"[0-9A-Z]+" + "pattern":"[0-9A-Z]+", + "sensitive":true }, "Tr34KeyBlockFormat":{ "type":"string", diff --git a/botocore/data/pca-connector-scep/2018-05-10/endpoint-rule-set-1.json b/botocore/data/pca-connector-scep/2018-05-10/endpoint-rule-set-1.json new file mode 100644 index 0000000000..bf010937df --- /dev/null +++ b/botocore/data/pca-connector-scep/2018-05-10/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-scep-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-scep-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-scep.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-scep.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/botocore/data/pca-connector-scep/2018-05-10/paginators-1.json b/botocore/data/pca-connector-scep/2018-05-10/paginators-1.json new file mode 100644 index 0000000000..7a913db529 --- /dev/null +++ b/botocore/data/pca-connector-scep/2018-05-10/paginators-1.json @@ -0,0 +1,16 @@ +{ + "pagination": { + "ListChallengeMetadata": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Challenges" + }, + "ListConnectors": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Connectors" + } + } +} diff --git a/botocore/data/pca-connector-scep/2018-05-10/service-2.json b/botocore/data/pca-connector-scep/2018-05-10/service-2.json new file mode 100644 index 0000000000..5939610584 --- /dev/null +++ b/botocore/data/pca-connector-scep/2018-05-10/service-2.json @@ -0,0 +1,981 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"pca-connector-scep", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"Private CA Connector for SCEP", + "serviceId":"Pca Connector Scep", + "signatureVersion":"v4", + "signingName":"pca-connector-scep", + "uid":"pca-connector-scep-2018-05-10" + }, + "operations":{ + "CreateChallenge":{ + "name":"CreateChallenge", + "http":{ + "method":"POST", + "requestUri":"/challenges", + "responseCode":202 + }, + "input":{"shape":"CreateChallengeRequest"}, + "output":{"shape":"CreateChallengeResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

For general-purpose connectors. Creates a challenge password for the specified connector. The SCEP protocol uses a challenge password to authenticate a request before issuing a certificate from a certificate authority (CA). Your SCEP clients include the challenge password as part of their certificate request to Connector for SCEP. To retrieve the connector Amazon Resource Names (ARNs) for the connectors in your account, call ListConnectors.

To create additional challenge passwords for the connector, call CreateChallenge again. We recommend frequently rotating your challenge passwords.

" + }, + "CreateConnector":{ + "name":"CreateConnector", + "http":{ + "method":"POST", + "requestUri":"/connectors", + "responseCode":202 + }, + "input":{"shape":"CreateConnectorRequest"}, + "output":{"shape":"CreateConnectorResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a SCEP connector. A SCEP connector links Amazon Web Services Private Certificate Authority to your SCEP-compatible devices and mobile device management (MDM) systems. Before you create a connector, you must complete a set of prerequisites, including creation of a private certificate authority (CA) to use with this connector. For more information, see Connector for SCEP prerequisites.

" + }, + "DeleteChallenge":{ + "name":"DeleteChallenge", + "http":{ + "method":"DELETE", + "requestUri":"/challenges/{ChallengeArn}", + "responseCode":202 + }, + "input":{"shape":"DeleteChallengeRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes the specified Challenge.

", + "idempotent":true + }, + "DeleteConnector":{ + "name":"DeleteConnector", + "http":{ + "method":"DELETE", + "requestUri":"/connectors/{ConnectorArn}", + "responseCode":202 + }, + "input":{"shape":"DeleteConnectorRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes the specified Connector. This operation also deletes any challenges associated with the connector.

", + "idempotent":true + }, + "GetChallengeMetadata":{ + "name":"GetChallengeMetadata", + "http":{ + "method":"GET", + "requestUri":"/challengeMetadata/{ChallengeArn}", + "responseCode":200 + }, + "input":{"shape":"GetChallengeMetadataRequest"}, + "output":{"shape":"GetChallengeMetadataResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves the metadata for the specified Challenge.

" + }, + "GetChallengePassword":{ + "name":"GetChallengePassword", + "http":{ + "method":"GET", + "requestUri":"/challengePasswords/{ChallengeArn}", + "responseCode":200 + }, + "input":{"shape":"GetChallengePasswordRequest"}, + "output":{"shape":"GetChallengePasswordResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves the challenge password for the specified Challenge.

" + }, + "GetConnector":{ + "name":"GetConnector", + "http":{ + "method":"GET", + "requestUri":"/connectors/{ConnectorArn}", + "responseCode":200 + }, + "input":{"shape":"GetConnectorRequest"}, + "output":{"shape":"GetConnectorResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves details about the specified Connector. Calling this action returns important details about the connector, such as the public SCEP URL where your clients can request certificates.

" + }, + "ListChallengeMetadata":{ + "name":"ListChallengeMetadata", + "http":{ + "method":"GET", + "requestUri":"/challengeMetadata", + "responseCode":200 + }, + "input":{"shape":"ListChallengeMetadataRequest"}, + "output":{"shape":"ListChallengeMetadataResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves the challenge metadata for the specified ARN.

" + }, + "ListConnectors":{ + "name":"ListConnectors", + "http":{ + "method":"GET", + "requestUri":"/connectors", + "responseCode":200 + }, + "input":{"shape":"ListConnectorsRequest"}, + "output":{"shape":"ListConnectorsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists the connectors belonging to your Amazon Web Services account.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves the tags associated with the specified resource. Tags are key-value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each Amazon Web Services resource, up to 50 tags for a resource.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Adds one or more tags to your resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Removes one or more tags from your resource.

", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

You can receive this error if you attempt to perform an operation and you don't have the required permissions. This can be caused by insufficient permissions in policies attached to your Amazon Web Services Identity and Access Management (IAM) principal. It can also happen because of restrictions in place from an Amazon Web Services Organizations service control policy (SCP) that affects your Amazon Web Services account.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AzureApplicationId":{ + "type":"string", + "max":100, + "min":15, + "pattern":"[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}" + }, + "AzureDomain":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9._-]+" + }, + "BadRequestException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The request is malformed or contains an error such as an invalid parameter value or a missing required parameter.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CertificateAuthorityArn":{ + "type":"string", + "max":200, + "min":5, + "pattern":"arn:aws(-[a-z]+)*:acm-pca:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:certificate-authority\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}" + }, + "Challenge":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ChallengeArn", + "documentation":"

The Amazon Resource Name (ARN) of the challenge.

" + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

The Amazon Resource Name (ARN) of the connector.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the challenge was created.

" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the challenge was updated.

" + }, + "Password":{ + "shape":"SensitiveString", + "documentation":"

The SCEP challenge password, in UUID format.

" + } + }, + "documentation":"

For Connector for SCEP for general-purpose. An object containing information about the specified connector's SCEP challenge passwords.

" + }, + "ChallengeArn":{ + "type":"string", + "max":200, + "min":5, + "pattern":"arn:aws(-[a-z]+)*:pca-connector-scep:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}\\/challenge\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}" + }, + "ChallengeMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ChallengeArn", + "documentation":"

The Amazon Resource Name (ARN) of the challenge.

" + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

The Amazon Resource Name (ARN) of the connector.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the connector was created.

" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the connector was updated.

" + } + }, + "documentation":"

Contains details about the connector's challenge.

" + }, + "ChallengeMetadataList":{ + "type":"list", + "member":{"shape":"ChallengeMetadataSummary"} + }, + "ChallengeMetadataSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ChallengeArn", + "documentation":"

The Amazon Resource Name (ARN) of the challenge.

" + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

The Amazon Resource Name (ARN) of the connector.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the challenge was created.

" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the challenge was updated.

" + } + }, + "documentation":"

Details about the specified challenge, returned by the GetChallengeMetadata action.

" + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[!-~]+" + }, + "ConflictException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"String"}, + "ResourceId":{ + "shape":"String", + "documentation":"

The identifier of the Amazon Web Services resource.

" + }, + "ResourceType":{ + "shape":"String", + "documentation":"

The resource type, which can be either Connector or Challenge.

" + } + }, + "documentation":"

This request can't be completed for one of the following reasons because the requested resource was being concurrently modified by another request.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "Connector":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ConnectorArn", + "documentation":"

The Amazon Resource Name (ARN) of the connector.

" + }, + "CertificateAuthorityArn":{ + "shape":"CertificateAuthorityArn", + "documentation":"

The Amazon Resource Name (ARN) of the certificate authority associated with the connector.

" + }, + "Type":{ + "shape":"ConnectorType", + "documentation":"

The connector type.

" + }, + "MobileDeviceManagement":{ + "shape":"MobileDeviceManagement", + "documentation":"

Contains settings relevant to the mobile device management system that you chose for the connector. If you didn't configure MobileDeviceManagement, then the connector is for general-purpose use and this object is empty.

" + }, + "OpenIdConfiguration":{ + "shape":"OpenIdConfiguration", + "documentation":"

Contains OpenID Connect (OIDC) parameters for use with Connector for SCEP for Microsoft Intune. For more information about using Connector for SCEP for Microsoft Intune, see Using Connector for SCEP for Microsoft Intune.

" + }, + "Status":{ + "shape":"ConnectorStatus", + "documentation":"

The connector's status.

" + }, + "StatusReason":{ + "shape":"ConnectorStatusReason", + "documentation":"

Information about why connector creation failed, if status is FAILED.

" + }, + "Endpoint":{ + "shape":"String", + "documentation":"

The connector's HTTPS public SCEP URL.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the connector was created.

" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the connector was updated.

" + } + }, + "documentation":"

Connector for SCEP is a service that links Amazon Web Services Private Certificate Authority to your SCEP-enabled devices. The connector brokers the exchange of certificates from Amazon Web Services Private CA to your SCEP-enabled devices and mobile device management systems. The connector is a complex type that contains the connector's configuration settings.

" + }, + "ConnectorArn":{ + "type":"string", + "max":200, + "min":5, + "pattern":"arn:aws(-[a-z]+)*:pca-connector-scep:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}" + }, + "ConnectorList":{ + "type":"list", + "member":{"shape":"ConnectorSummary"} + }, + "ConnectorStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "FAILED" + ] + }, + "ConnectorStatusReason":{ + "type":"string", + "enum":[ + "INTERNAL_FAILURE", + "PRIVATECA_ACCESS_DENIED", + "PRIVATECA_INVALID_STATE", + "PRIVATECA_RESOURCE_NOT_FOUND" + ] + }, + "ConnectorSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ConnectorArn", + "documentation":"

The Amazon Resource Name (ARN) of the connector.

" + }, + "CertificateAuthorityArn":{ + "shape":"CertificateAuthorityArn", + "documentation":"

The Amazon Resource Name (ARN) of the connector's associated certificate authority.

" + }, + "Type":{ + "shape":"ConnectorType", + "documentation":"

The connector type.

" + }, + "MobileDeviceManagement":{ + "shape":"MobileDeviceManagement", + "documentation":"

Contains settings relevant to the mobile device management system that you chose for the connector. If you didn't configure MobileDeviceManagement, then the connector is for general-purpose use and this object is empty.

" + }, + "OpenIdConfiguration":{ + "shape":"OpenIdConfiguration", + "documentation":"

Contains OpenID Connect (OIDC) parameters for use with Microsoft Intune.

" + }, + "Status":{ + "shape":"ConnectorStatus", + "documentation":"

The connector's status. Status can be creating, active, deleting, or failed.

" + }, + "StatusReason":{ + "shape":"ConnectorStatusReason", + "documentation":"

Information about why connector creation failed, if status is FAILED.

" + }, + "Endpoint":{ + "shape":"String", + "documentation":"

The connector's HTTPS public SCEP URL.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the challenge was created.

" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the challenge was updated.

" + } + }, + "documentation":"

Lists the Amazon Web Services Private CA SCEP connectors belonging to your Amazon Web Services account.

" + }, + "ConnectorType":{ + "type":"string", + "enum":[ + "GENERAL_PURPOSE", + "INTUNE" + ] + }, + "CreateChallengeRequest":{ + "type":"structure", + "required":["ConnectorArn"], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

The Amazon Resource Name (ARN) of the connector that you want to create a challenge for.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

Custom string that can be used to distinguish between calls to the CreateChallenge action. Client tokens for CreateChallenge time out after five minutes. Therefore, if you call CreateChallenge multiple times with the same client token within five minutes, Connector for SCEP recognizes that you are requesting only one challenge and will only respond with one. If you change the client token for each call, Connector for SCEP recognizes that you are requesting multiple challenge passwords.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The key-value pairs to associate with the resource.

" + } + } + }, + "CreateChallengeResponse":{ + "type":"structure", + "members":{ + "Challenge":{ + "shape":"Challenge", + "documentation":"

Returns the challenge details for the specified connector.

" + } + } + }, + "CreateConnectorRequest":{ + "type":"structure", + "required":["CertificateAuthorityArn"], + "members":{ + "CertificateAuthorityArn":{ + "shape":"CertificateAuthorityArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Private Certificate Authority certificate authority to use with this connector. Due to security vulnerabilities present in the SCEP protocol, we recommend using a private CA that's dedicated for use with the connector.

To retrieve the private CAs associated with your account, you can call ListCertificateAuthorities using the Amazon Web Services Private CA API.

" + }, + "MobileDeviceManagement":{ + "shape":"MobileDeviceManagement", + "documentation":"

If you don't supply a value, by default Connector for SCEP creates a connector for general-purpose use. A general-purpose connector is designed to work with clients or endpoints that support the SCEP protocol, except Connector for SCEP for Microsoft Intune. With connectors for general-purpose use, you manage SCEP challenge passwords using Connector for SCEP. For information about considerations and limitations with using Connector for SCEP, see Considerations and Limitations.

If you provide an IntuneConfiguration, Connector for SCEP creates a connector for use with Microsoft Intune, and you manage the challenge passwords using Microsoft Intune. For more information, see Using Connector for SCEP for Microsoft Intune.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

Custom string that can be used to distinguish between calls to the CreateChallenge action. Client tokens for CreateChallenge time out after five minutes. Therefore, if you call CreateChallenge multiple times with the same client token within five minutes, Connector for SCEP recognizes that you are requesting only one challenge and will only respond with one. If you change the client token for each call, Connector for SCEP recognizes that you are requesting multiple challenge passwords.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The key-value pairs to associate with the resource.

" + } + } + }, + "CreateConnectorResponse":{ + "type":"structure", + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

Returns the Amazon Resource Name (ARN) of the connector.

" + } + } + }, + "DeleteChallengeRequest":{ + "type":"structure", + "required":["ChallengeArn"], + "members":{ + "ChallengeArn":{ + "shape":"ChallengeArn", + "documentation":"

The Amazon Resource Name (ARN) of the challenge password to delete.

", + "location":"uri", + "locationName":"ChallengeArn" + } + } + }, + "DeleteConnectorRequest":{ + "type":"structure", + "required":["ConnectorArn"], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

The Amazon Resource Name (ARN) of the connector to delete.

", + "location":"uri", + "locationName":"ConnectorArn" + } + } + }, + "GetChallengeMetadataRequest":{ + "type":"structure", + "required":["ChallengeArn"], + "members":{ + "ChallengeArn":{ + "shape":"ChallengeArn", + "documentation":"

The Amazon Resource Name (ARN) of the challenge.

", + "location":"uri", + "locationName":"ChallengeArn" + } + } + }, + "GetChallengeMetadataResponse":{ + "type":"structure", + "members":{ + "ChallengeMetadata":{ + "shape":"ChallengeMetadata", + "documentation":"

The metadata for the challenge.

" + } + } + }, + "GetChallengePasswordRequest":{ + "type":"structure", + "required":["ChallengeArn"], + "members":{ + "ChallengeArn":{ + "shape":"ChallengeArn", + "documentation":"

The Amazon Resource Name (ARN) of the challenge.

", + "location":"uri", + "locationName":"ChallengeArn" + } + } + }, + "GetChallengePasswordResponse":{ + "type":"structure", + "members":{ + "Password":{ + "shape":"SensitiveString", + "documentation":"

The SCEP challenge password.

" + } + } + }, + "GetConnectorRequest":{ + "type":"structure", + "required":["ConnectorArn"], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

The Amazon Resource Name (ARN) of the connector.

", + "location":"uri", + "locationName":"ConnectorArn" + } + } + }, + "GetConnectorResponse":{ + "type":"structure", + "members":{ + "Connector":{ + "shape":"Connector", + "documentation":"

The properties of the connector.

" + } + } + }, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The request processing has failed because of an unknown error, exception or failure with an internal server.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "IntuneConfiguration":{ + "type":"structure", + "required":[ + "AzureApplicationId", + "Domain" + ], + "members":{ + "AzureApplicationId":{ + "shape":"AzureApplicationId", + "documentation":"

The directory (tenant) ID from your Microsoft Entra ID app registration.

" + }, + "Domain":{ + "shape":"AzureDomain", + "documentation":"

The primary domain from your Microsoft Entra ID app registration.

" + } + }, + "documentation":"

Contains configuration details for use with Microsoft Intune. For information about using Connector for SCEP for Microsoft Intune, see Using Connector for SCEP for Microsoft Intune.

When you use Connector for SCEP for Microsoft Intune, certain functionalities are enabled by accessing Microsoft Intune through the Microsoft API. Your use of the Connector for SCEP and accompanying Amazon Web Services services doesn't remove your need to have a valid license for your use of the Microsoft Intune service. You should also review the Microsoft Intune® App Protection Policies.

" + }, + "ListChallengeMetadataRequest":{ + "type":"structure", + "required":["ConnectorArn"], + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of objects that you want Connector for SCEP to return for this request. If more objects are available, in the response, Connector for SCEP provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

", + "location":"querystring", + "locationName":"NextToken" + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

The Amazon Resource Name (ARN) of the connector.

", + "location":"querystring", + "locationName":"ConnectorArn" + } + } + }, + "ListChallengeMetadataResponse":{ + "type":"structure", + "members":{ + "Challenges":{ + "shape":"ChallengeMetadataList", + "documentation":"

The challenge metadata for the challenges belonging to your Amazon Web Services account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + } + } + }, + "ListConnectorsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of objects that you want Connector for SCEP to return for this request. If more objects are available, in the response, Connector for SCEP provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListConnectorsResponse":{ + "type":"structure", + "members":{ + "Connectors":{ + "shape":"ConnectorList", + "documentation":"

The connectors belonging to your Amazon Web Services account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

The key-value pairs to associate with the resource.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "MobileDeviceManagement":{ + "type":"structure", + "members":{ + "Intune":{ + "shape":"IntuneConfiguration", + "documentation":"

Configuration settings for use with Microsoft Intune. For information about using Connector for SCEP for Microsoft Intune, see Using Connector for SCEP for Microsoft Intune.

" + } + }, + "documentation":"

If you don't supply a value, by default Connector for SCEP creates a connector for general-purpose use. A general-purpose connector is designed to work with clients or endpoints that support the SCEP protocol, except Connector for SCEP for Microsoft Intune. For information about considerations and limitations with using Connector for SCEP, see Considerations and Limitations.

If you provide an IntuneConfiguration, Connector for SCEP creates a connector for use with Microsoft Intune, and you manage the challenge passwords using Microsoft Intune. For more information, see Using Connector for SCEP for Microsoft Intune.

", + "union":true + }, + "NextToken":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"(?:[A-Za-z0-9_-]{4})*(?:[A-Za-z0-9_-]{2}==|[A-Za-z0-9_-]{3}=)?" + }, + "OpenIdConfiguration":{ + "type":"structure", + "members":{ + "Issuer":{ + "shape":"String", + "documentation":"

The issuer value to copy into your Microsoft Entra app registration's OIDC.

" + }, + "Subject":{ + "shape":"String", + "documentation":"

The subject value to copy into your Microsoft Entra app registration's OIDC.

" + }, + "Audience":{ + "shape":"String", + "documentation":"

The audience value to copy into your Microsoft Entra app registration's OIDC.

" + } + }, + "documentation":"

Contains OpenID Connect (OIDC) parameters for use with Microsoft Intune. For more information about using Connector for SCEP for Microsoft Intune, see Using Connector for SCEP for Microsoft Intune.

" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"String"}, + "ResourceId":{ + "shape":"String", + "documentation":"

The identifier of the Amazon Web Services resource.

" + }, + "ResourceType":{ + "shape":"String", + "documentation":"

The resource type, which can be either Connector or Challenge.

" + } + }, + "documentation":"

The operation tried to access a nonexistent resource. The resource might be incorrectly specified, or it might have a status other than ACTIVE.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SensitiveString":{ + "type":"string", + "sensitive":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "Message", + "ResourceType", + "ServiceCode", + "QuotaCode" + ], + "members":{ + "Message":{"shape":"String"}, + "ResourceType":{ + "shape":"String", + "documentation":"

The resource type, which can be either Connector or Challenge.

" + }, + "ServiceCode":{ + "shape":"String", + "documentation":"

Identifies the originating service.

" + }, + "QuotaCode":{ + "shape":"String", + "documentation":"

The quota identifier.

" + } + }, + "documentation":"

The request would cause a service quota to be exceeded.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The key-value pairs to associate with the resource.

" + } + } + }, + "Tags":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The limit on the number of requests per second was exceeded.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

Specifies a list of tag keys that you want to remove from the specified resources.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"}, + "Reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

The reason for the validation error, if available. The service doesn't return a reason for every validation exception.

" + } + }, + "documentation":"

An input validation error occurred. For example, invalid characters in a name tag, or an invalid pagination token.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "CA_CERT_VALIDITY_TOO_SHORT", + "INVALID_CA_USAGE_MODE", + "INVALID_CONNECTOR_TYPE", + "INVALID_STATE", + "NO_CLIENT_TOKEN", + "UNKNOWN_OPERATION", + "OTHER" + ] + } + }, + "documentation":"

Connector for SCEP (Preview) is in preview release for Amazon Web Services Private Certificate Authority and is subject to change.

Connector for SCEP (Preview) creates a connector between Amazon Web Services Private CA and your SCEP-enabled clients and devices. For more information, see Connector for SCEP in the Amazon Web Services Private CA User Guide.

" +} diff --git a/botocore/data/pca-connector-scep/2018-05-10/waiters-2.json b/botocore/data/pca-connector-scep/2018-05-10/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/pca-connector-scep/2018-05-10/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/pi/2018-02-27/service-2.json b/botocore/data/pi/2018-02-27/service-2.json index 5d997fdb28..1b1ff81e33 100644 --- a/botocore/data/pi/2018-02-27/service-2.json +++ b/botocore/data/pi/2018-02-27/service-2.json @@ -5,13 +5,15 @@ "endpointPrefix":"pi", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"AWS PI", "serviceFullName":"AWS Performance Insights", "serviceId":"PI", "signatureVersion":"v4", "signingName":"pi", "targetPrefix":"PerformanceInsightsv20180227", - "uid":"pi-2018-02-27" + "uid":"pi-2018-02-27", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreatePerformanceAnalysisReport":{ @@ -217,7 +219,7 @@ }, "AdditionalMetricsList":{ "type":"list", - "member":{"shape":"RequestString"}, + "member":{"shape":"SanitizedString"}, "max":30, "min":1 }, @@ -319,6 +321,12 @@ "FAILED" ] }, + "AuthorizedActionsList":{ + "type":"list", + "member":{"shape":"FineGrainedAction"}, + "max":3, + "min":0 + }, "Boolean":{"type":"boolean"}, "ContextType":{ "type":"string", @@ -479,7 +487,7 @@ }, "Filter":{ "shape":"MetricQueryFilterMap", - "documentation":"

One or more filters to apply in the request. Restrictions:

  • Any number of filters by the same dimension, as specified in the GroupBy or Partition parameters.

  • A single filter for any other dimension in this dimension group.

" + "documentation":"

One or more filters to apply in the request. Restrictions:

  • Any number of filters by the same dimension, as specified in the GroupBy or Partition parameters.

  • A single filter for any other dimension in this dimension group.

The db.sql.db_id filter isn't available for RDS for SQL Server DB instances.

" }, "MaxResults":{ "shape":"MaxResults", @@ -559,12 +567,12 @@ "required":["Group"], "members":{ "Group":{ - "shape":"RequestString", + "shape":"SanitizedString", "documentation":"

The name of the dimension group. Valid values are as follows:

  • db - The name of the database to which the client is connected. The following values are permitted:

    • Aurora PostgreSQL

    • Amazon RDS PostgreSQL

    • Aurora MySQL

    • Amazon RDS MySQL

    • Amazon RDS MariaDB

    • Amazon DocumentDB

  • db.application - The name of the application that is connected to the database. The following values are permitted:

    • Aurora PostgreSQL

    • Amazon RDS PostgreSQL

    • Amazon DocumentDB

  • db.host - The host name of the connected client (all engines).

  • db.query - The query that is currently running (only Amazon DocumentDB).

  • db.query_tokenized - The digest query (only Amazon DocumentDB).

  • db.session_type - The type of the current session (only Aurora PostgreSQL and RDS PostgreSQL).

  • db.sql - The text of the SQL statement that is currently running (all engines except Amazon DocumentDB).

  • db.sql_tokenized - The SQL digest (all engines except Amazon DocumentDB).

  • db.user - The user logged in to the database (all engines except Amazon DocumentDB).

  • db.wait_event - The event for which the database backend is waiting (all engines except Amazon DocumentDB).

  • db.wait_event_type - The type of event for which the database backend is waiting (all engines except Amazon DocumentDB).

  • db.wait_state - The event for which the database backend is waiting (only Amazon DocumentDB).

" }, "Dimensions":{ - "shape":"RequestStringList", - "documentation":"

A list of specific dimensions from a dimension group. If this parameter is not present, then it signifies that all of the dimensions in the group were requested, or are present in the response.

Valid values for elements in the Dimensions array are:

  • db.application.name - The name of the application that is connected to the database. Valid values are as follows:

    • Aurora PostgreSQL

    • Amazon RDS PostgreSQL

    • Amazon DocumentDB

  • db.host.id - The host ID of the connected client (all engines).

  • db.host.name - The host name of the connected client (all engines).

  • db.name - The name of the database to which the client is connected. Valid values are as follows:

    • Aurora PostgreSQL

    • Amazon RDS PostgreSQL

    • Aurora MySQL

    • Amazon RDS MySQL

    • Amazon RDS MariaDB

    • Amazon DocumentDB

  • db.query.id - The query ID generated by Performance Insights (only Amazon DocumentDB).

  • db.query.db_id - The query ID generated by the database (only Amazon DocumentDB).

  • db.query.statement - The text of the query that is being run (only Amazon DocumentDB).

  • db.query.tokenized_id

  • db.query.tokenized.id - The query digest ID generated by Performance Insights (only Amazon DocumentDB).

  • db.query.tokenized.db_id - The query digest ID generated by Performance Insights (only Amazon DocumentDB).

  • db.query.tokenized.statement - The text of the query digest (only Amazon DocumentDB).

  • db.session_type.name - The type of the current session (only Amazon DocumentDB).

  • db.sql.id - The hash of the full, non-tokenized SQL statement generated by Performance Insights (all engines except Amazon DocumentDB).

  • db.sql.db_id - Either the SQL ID generated by the database engine, or a value generated by Performance Insights that begins with pi- (all engines except Amazon DocumentDB).

  • db.sql.statement - The full text of the SQL statement that is running, as in SELECT * FROM employees (all engines except Amazon DocumentDB)

  • db.sql.tokenized_id

  • db.sql_tokenized.id - The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). In the console, db.sql_tokenized.id is called the Support ID because Amazon Web Services Support can look at this data to help you troubleshoot database issues.

  • db.sql_tokenized.db_id - Either the native database ID used to refer to the SQL statement, or a synthetic ID such as pi-2372568224 that Performance Insights generates if the native database ID isn't available (all engines except Amazon DocumentDB).

  • db.sql_tokenized.statement - The text of the SQL digest, as in SELECT * FROM employees WHERE employee_id = ? (all engines except Amazon DocumentDB)

  • db.user.id - The ID of the user logged in to the database (all engines except Amazon DocumentDB).

  • db.user.name - The name of the user logged in to the database (all engines except Amazon DocumentDB).

  • db.wait_event.name - The event for which the backend is waiting (all engines except Amazon DocumentDB).

  • db.wait_event.type - The type of event for which the backend is waiting (all engines except Amazon DocumentDB).

  • db.wait_event_type.name - The name of the event type for which the backend is waiting (all engines except Amazon DocumentDB).

  • db.wait_state.name - The event for which the backend is waiting (only Amazon DocumentDB).

" + "shape":"SanitizedStringList", + "documentation":"

A list of specific dimensions from a dimension group. If this parameter is not present, then it signifies that all of the dimensions in the group were requested, or are present in the response.

Valid values for elements in the Dimensions array are:

  • db.application.name - The name of the application that is connected to the database. Valid values are as follows:

    • Aurora PostgreSQL

    • Amazon RDS PostgreSQL

    • Amazon DocumentDB

  • db.host.id - The host ID of the connected client (all engines).

  • db.host.name - The host name of the connected client (all engines).

  • db.name - The name of the database to which the client is connected. Valid values are as follows:

    • Aurora PostgreSQL

    • Amazon RDS PostgreSQL

    • Aurora MySQL

    • Amazon RDS MySQL

    • Amazon RDS MariaDB

    • Amazon DocumentDB

  • db.query.id - The query ID generated by Performance Insights (only Amazon DocumentDB).

  • db.query.db_id - The query ID generated by the database (only Amazon DocumentDB).

  • db.query.statement - The text of the query that is being run (only Amazon DocumentDB).

  • db.query.tokenized_id

  • db.query.tokenized.id - The query digest ID generated by Performance Insights (only Amazon DocumentDB).

  • db.query.tokenized.db_id - The query digest ID generated by Performance Insights (only Amazon DocumentDB).

  • db.query.tokenized.statement - The text of the query digest (only Amazon DocumentDB).

  • db.session_type.name - The type of the current session (only Amazon DocumentDB).

  • db.sql.id - The hash of the full, non-tokenized SQL statement generated by Performance Insights (all engines except Amazon DocumentDB).

  • db.sql.db_id - Either the SQL ID generated by the database engine, or a value generated by Performance Insights that begins with pi- (all engines except Amazon DocumentDB).

  • db.sql.statement - The full text of the SQL statement that is running, as in SELECT * FROM employees (all engines except Amazon DocumentDB)

  • db.sql.tokenized_id - The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). The db.sql.tokenized_id dimension fetches the value of the db.sql_tokenized.id dimension. Amazon RDS returns db.sql.tokenized_id from the db.sql dimension group.

  • db.sql_tokenized.id - The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). In the console, db.sql_tokenized.id is called the Support ID because Amazon Web Services Support can look at this data to help you troubleshoot database issues.

  • db.sql_tokenized.db_id - Either the native database ID used to refer to the SQL statement, or a synthetic ID such as pi-2372568224 that Performance Insights generates if the native database ID isn't available (all engines except Amazon DocumentDB).

  • db.sql_tokenized.statement - The text of the SQL digest, as in SELECT * FROM employees WHERE employee_id = ? (all engines except Amazon DocumentDB)

  • db.user.id - The ID of the user logged in to the database (all engines except Amazon DocumentDB).

  • db.user.name - The name of the user logged in to the database (all engines except Amazon DocumentDB).

  • db.wait_event.name - The event for which the backend is waiting (all engines except Amazon DocumentDB).

  • db.wait_event.type - The type of event for which the backend is waiting (all engines except Amazon DocumentDB).

  • db.wait_event_type.name - The name of the event type for which the backend is waiting (all engines except Amazon DocumentDB).

  • db.wait_state.name - The event for which the backend is waiting (only Amazon DocumentDB).

" }, "Limit":{ "shape":"Limit", @@ -646,7 +654,7 @@ }, "DimensionsMetricList":{ "type":"list", - "member":{"shape":"RequestString"}, + "member":{"shape":"SanitizedString"}, "max":5, "min":1 }, @@ -678,6 +686,14 @@ "UNKNOWN" ] }, + "FineGrainedAction":{ + "type":"string", + "enum":[ + "DescribeDimensionKeys", + "GetDimensionKeyDetails", + "GetResourceMetrics" + ] + }, "GetDimensionKeyDetailsRequest":{ "type":"structure", "required":[ @@ -972,6 +988,10 @@ "NextToken":{ "shape":"NextToken", "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the token, up to the value specified by MaxRecords.

" + }, + "AuthorizedActions":{ + "shape":"AuthorizedActionsList", + "documentation":"

The actions to discover the dimensions you are authorized to access. If you specify multiple actions, then the response will contain the dimensions common for all the actions.

When you don't specify this request parameter or provide an empty list, the response contains all the available dimensions for the target database engine whether or not you are authorized to access them.

" } } }, @@ -1152,7 +1172,7 @@ "required":["Metric"], "members":{ "Metric":{ - "shape":"RequestString", + "shape":"SanitizedString", "documentation":"

The name of a Performance Insights metric to be measured.

Valid values for Metric are:

If the number of active sessions is less than an internal Performance Insights threshold, db.load.avg and db.sampledload.avg are the same value. If the number of active sessions is greater than the internal threshold, Performance Insights samples the active sessions, with db.load.avg showing the scaled values, db.sampledload.avg showing the raw values, and db.sampledload.avg less than db.load.avg. For most use cases, you can query db.load.avg only.

" }, "GroupBy":{ @@ -1161,14 +1181,14 @@ }, "Filter":{ "shape":"MetricQueryFilterMap", - "documentation":"

One or more filters to apply in the request. Restrictions:

  • Any number of filters by the same dimension, as specified in the GroupBy parameter.

  • A single filter for any other dimension in this dimension group.

" + "documentation":"

One or more filters to apply in the request. Restrictions:

  • Any number of filters by the same dimension, as specified in the GroupBy parameter.

  • A single filter for any other dimension in this dimension group.

The db.sql.db_id filter isn't available for RDS for SQL Server DB instances.

" } }, "documentation":"

A single query to be processed. You must provide the metric to query and append an aggregate function to the metric. For example, to find the average for the metric db.load you must use db.load.avg. Valid values for aggregate functions include .avg, .min, .max, and .sum. If no other parameters are specified, Performance Insights returns all data points for the specified metric. Optionally, you can request that the data points be aggregated by dimension group (GroupBy), and return only those data points that match your criteria (Filter).

" }, "MetricQueryFilterMap":{ "type":"map", - "key":{"shape":"RequestString"}, + "key":{"shape":"SanitizedString"}, "value":{"shape":"RequestString"} }, "MetricQueryList":{ @@ -1179,7 +1199,7 @@ }, "MetricTypeList":{ "type":"list", - "member":{"shape":"RequestString"} + "member":{"shape":"SanitizedString"} }, "MetricValuesList":{ "type":"list", @@ -1252,15 +1272,9 @@ "min":0, "pattern":".*\\S.*" }, - "RequestStringList":{ - "type":"list", - "member":{"shape":"RequestString"}, - "max":10, - "min":1 - }, "RequestedDimensionList":{ "type":"list", - "member":{"shape":"RequestString"}, + "member":{"shape":"SanitizedString"}, "max":10, "min":1 }, @@ -1316,6 +1330,19 @@ "type":"list", "member":{"shape":"ResponseResourceMetric"} }, + "SanitizedString":{ + "type":"string", + "documentation":"A generic string type that forbids characters that could expose our service (or services downstream) to security risks around injections.", + "max":256, + "min":0, + "pattern":"^[a-zA-Z0-9-_\\.:/*)( ]+$" + }, + "SanitizedStringList":{ + "type":"list", + "member":{"shape":"SanitizedString"}, + "max":10, + "min":1 + }, "ServiceType":{ "type":"string", "enum":[ diff --git a/botocore/data/pinpoint-sms-voice-v2/2022-03-31/service-2.json b/botocore/data/pinpoint-sms-voice-v2/2022-03-31/service-2.json index 1d37990f92..9701c2bc27 100644 --- a/botocore/data/pinpoint-sms-voice-v2/2022-03-31/service-2.json +++ b/botocore/data/pinpoint-sms-voice-v2/2022-03-31/service-2.json @@ -2,9 +2,11 @@ "version":"2.0", "metadata":{ "apiVersion":"2022-03-31", + "auth":["aws.auth#sigv4"], "endpointPrefix":"sms-voice", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon Pinpoint SMS Voice V2", "serviceId":"Pinpoint SMS Voice V2", "signatureVersion":"v4", @@ -85,7 +87,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a new event destination in a configuration set.

An event destination is a location where you send message events. The event options are Amazon CloudWatch, Amazon Kinesis Data Firehose, or Amazon SNS. For example, when a message is delivered successfully, you can send information about that event to an event destination, or send notifications to endpoints that are subscribed to an Amazon SNS topic.

Each configuration set can contain between 0 and 5 event destinations. Each event destination can contain a reference to a single destination, such as a CloudWatch or Kinesis Data Firehose destination.

" + "documentation":"

Creates a new event destination in a configuration set.

An event destination is a location where you send message events. The event options are Amazon CloudWatch, Amazon Data Firehose, or Amazon SNS. For example, when a message is delivered successfully, you can send information about that event to an event destination, or send notifications to endpoints that are subscribed to an Amazon SNS topic.

Each configuration set can contain between 0 and 5 event destinations. Each event destination can contain a reference to a single destination, such as a CloudWatch or Firehose destination.

" }, "CreateOptOutList":{ "name":"CreateOptOutList", @@ -103,7 +105,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a new opt-out list.

If the opt-out list name already exists, an error is returned.

An opt-out list is a list of phone numbers that are opted out, meaning you can't send SMS or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported opt-out keywords, see SMS opt out in the Amazon Pinpoint User Guide.

" + "documentation":"

Creates a new opt-out list.

If the opt-out list name already exists, an error is returned.

An opt-out list is a list of phone numbers that are opted out, meaning you can't send SMS or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported opt-out keywords, see SMS opt out in the AWS End User Messaging SMS User Guide.

" }, "CreatePool":{ "name":"CreatePool", @@ -334,7 +336,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes an existing keyword from an origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, Amazon Pinpoint responds with a customizable message.

Keywords \"HELP\" and \"STOP\" can't be deleted or modified.

" + "documentation":"

Deletes an existing keyword from an origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message.

Keywords \"HELP\" and \"STOP\" can't be deleted or modified.

" }, "DeleteMediaMessageSpendLimitOverride":{ "name":"DeleteMediaMessageSpendLimitOverride", @@ -492,7 +494,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is controlled by Amazon Web Services. For more information on spend limits (quotas) see Amazon Pinpoint quotas in the Amazon Pinpoint Developer Guide.

" + "documentation":"

Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is controlled by Amazon Web Services. For more information on spend limits (quotas) see Quotas in the AWS End User Messaging SMS User Guide.

" }, "DeleteVerifiedDestinationNumber":{ "name":"DeleteVerifiedDestinationNumber", @@ -526,7 +528,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes an account level monthly spend limit override for sending voice messages. Deleting a spend limit override sets the EnforcedLimit equal to the MaxLimit, which is controlled by Amazon Web Services. For more information on spending limits (quotas) see Amazon Pinpoint quotas in the Amazon Pinpoint Developer Guide.

" + "documentation":"

Deletes an account level monthly spend limit override for sending voice messages. Deleting a spend limit override sets the EnforcedLimit equal to the MaxLimit, which is controlled by Amazon Web Services. For more information on spending limits (quotas) see Quotas in the AWS End User Messaging SMS User Guide.

" }, "DescribeAccountAttributes":{ "name":"DescribeAccountAttributes", @@ -542,7 +544,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes attributes of your Amazon Web Services account. The supported account attributes include account tier, which indicates whether your account is in the sandbox or production environment. When you're ready to move your account out of the sandbox, create an Amazon Web Services Support case for a service limit increase request.

New Amazon Pinpoint accounts are placed into an SMS or voice sandbox. The sandbox protects both Amazon Web Services end recipients and SMS or voice recipients from fraud and abuse.

" + "documentation":"

Describes attributes of your Amazon Web Services account. The supported account attributes include account tier, which indicates whether your account is in the sandbox or production environment. When you're ready to move your account out of the sandbox, create an Amazon Web Services Support case for a service limit increase request.

New accounts are placed into an SMS or voice sandbox. The sandbox protects both Amazon Web Services end recipients and SMS or voice recipients from fraud and abuse.

" }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", @@ -558,7 +560,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes the current Amazon Pinpoint SMS Voice V2 resource quotas for your account. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.

When you establish an Amazon Web Services account, the account has initial quotas on the maximum number of configuration sets, opt-out lists, phone numbers, and pools that you can create in a given Region. For more information see Amazon Pinpoint quotas in the Amazon Pinpoint Developer Guide.

" + "documentation":"

Describes the current AWS End User Messaging SMS and Voice SMS Voice V2 resource quotas for your account. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.

When you establish an Amazon Web Services account, the account has initial quotas on the maximum number of configuration sets, opt-out lists, phone numbers, and pools that you can create in a given Region. For more information see Quotas in the AWS End User Messaging SMS User Guide.

" }, "DescribeConfigurationSets":{ "name":"DescribeConfigurationSets", @@ -592,7 +594,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes the specified keywords or all keywords on your origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, Amazon Pinpoint responds with a customizable message.

If you specify a keyword that isn't valid, an error is returned.

" + "documentation":"

Describes the specified keywords or all keywords on your origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message.

If you specify a keyword that isn't valid, an error is returned.

" }, "DescribeOptOutLists":{ "name":"DescribeOptOutLists", @@ -826,7 +828,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes the current Amazon Pinpoint monthly spend limits for sending voice and text messages.

When you establish an Amazon Web Services account, the account has initial monthly spend limit in a given Region. For more information on increasing your monthly spend limit, see Requesting increases to your monthly SMS spending quota for Amazon Pinpoint in the Amazon Pinpoint User Guide.

" + "documentation":"

Describes the current monthly spend limits for sending voice and text messages.

When you establish an Amazon Web Services account, the account has initial monthly spend limit in a given Region. For more information on increasing your monthly spend limit, see Requesting increases to your monthly SMS, MMS, or Voice spending quota in the AWS End User Messaging SMS User Guide.

" }, "DescribeVerifiedDestinationNumbers":{ "name":"DescribeVerifiedDestinationNumbers", @@ -984,7 +986,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates or updates a keyword configuration on an origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, Amazon Pinpoint responds with a customizable message.

If you specify a keyword that isn't valid, an error is returned.

" + "documentation":"

Creates or updates a keyword configuration on an origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message.

If you specify a keyword that isn't valid, an error is returned.

" }, "PutOptedOutNumber":{ "name":"PutOptedOutNumber", @@ -1074,7 +1076,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Request an origination phone number for use in your account. For more information on phone number request see Requesting a number in the Amazon Pinpoint User Guide.

" + "documentation":"

Request an origination phone number for use in your account. For more information on phone number request see Request a phone number in the AWS End User Messaging SMS User Guide.

" }, "RequestSenderId":{ "name":"RequestSenderId", @@ -1149,7 +1151,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a new text message and sends it to a recipient's phone number.

SMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit depends on the destination country of your messages, as well as the type of phone number (origination number) that you use to send the message. For more information, see Message Parts per Second (MPS) limits in the Amazon Pinpoint User Guide.

" + "documentation":"

Creates a new text message and sends it to a recipient's phone number. SendTextMessage only sends an SMS message to one recipient each time it is invoked.

SMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit depends on the destination country of your messages, as well as the type of phone number (origination number) that you use to send the message. For more information about MPS, see Message Parts per Second (MPS) limits in the AWS End User Messaging SMS User Guide.

" }, "SendVoiceMessage":{ "name":"SendVoiceMessage", @@ -1168,7 +1170,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Allows you to send a request that sends a voice message through Amazon Pinpoint. This operation uses Amazon Polly to convert a text script into a voice message.

" + "documentation":"

Allows you to send a request that sends a voice message. This operation uses Amazon Polly to convert a text script into a voice message.

" }, "SetAccountDefaultProtectConfiguration":{ "name":"SetAccountDefaultProtectConfiguration", @@ -1303,7 +1305,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Adds or overwrites only the specified tags for the specified Amazon Pinpoint SMS Voice, version 2 resource. When you specify an existing tag key, the value is overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer Guide.

" + "documentation":"

Adds or overwrites only the specified tags for the specified resource. When you specify an existing tag key, the value is overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see Tags in the AWS End User Messaging SMS User Guide.

" }, "UntagResource":{ "name":"UntagResource", @@ -1320,7 +1322,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Removes the association of the specified tags from an Amazon Pinpoint SMS Voice V2 resource. For more information on tags see Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer Guide.

" + "documentation":"

Removes the association of the specified tags from a resource. For more information on tags see Tags in the AWS End User Messaging SMS User Guide.

" }, "UpdateEventDestination":{ "name":"UpdateEventDestination", @@ -1338,7 +1340,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates an existing event destination in a configuration set. You can update the IAM role ARN for CloudWatch Logs and Kinesis Data Firehose. You can also enable or disable the event destination.

You may want to update an event destination to change its matching event types or updating the destination resource ARN. You can't change an event destination's type between CloudWatch Logs, Kinesis Data Firehose, and Amazon SNS.

" + "documentation":"

Updates an existing event destination in a configuration set. You can update the IAM role ARN for CloudWatch Logs and Firehose. You can also enable or disable the event destination.

You may want to update an event destination to change its matching event types or updating the destination resource ARN. You can't change an event destination's type between CloudWatch Logs, Firehose, and Amazon SNS.

" }, "UpdatePhoneNumber":{ "name":"UpdatePhoneNumber", @@ -1922,7 +1924,7 @@ }, "MatchingEventTypes":{ "shape":"EventTypeList", - "documentation":"

An array of event types that determine which events to log. If \"ALL\" is used, then Amazon Pinpoint logs every event type.

The TEXT_SENT event type is not supported.

" + "documentation":"

An array of event types that determine which events to log. If \"ALL\" is used, then AWS End User Messaging SMS and Voice logs every event type.

The TEXT_SENT event type is not supported.

" }, "CloudWatchLogsDestination":{ "shape":"CloudWatchLogsDestination", @@ -1930,7 +1932,7 @@ }, "KinesisFirehoseDestination":{ "shape":"KinesisFirehoseDestination", - "documentation":"

An object that contains information about an event destination for logging to Amazon Kinesis Data Firehose.

" + "documentation":"

An object that contains information about an event destination for logging to Amazon Data Firehose.

" }, "SnsDestination":{ "shape":"SnsDestination", @@ -2010,7 +2012,7 @@ "members":{ "OriginationIdentity":{ "shape":"PhoneOrSenderIdOrArn", - "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn.

" + "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn.

After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity.

" }, "IsoCountryCode":{ "shape":"IsoCountryCode", @@ -2018,7 +2020,7 @@ }, "MessageType":{ "shape":"MessageType", - "documentation":"

The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive.

" + "documentation":"

The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. After the pool is created the MessageType can't be changed.

" }, "DeletionProtectionEnabled":{ "shape":"Boolean", @@ -2068,7 +2070,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -2076,7 +2078,7 @@ }, "SharedRoutesEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

Indicates whether shared routes are enabled for the pool.

" + "documentation":"

Indicates whether shared routes are enabled for the pool. Set to false and only origination identities in this pool are used to send messages.

" }, "DeletionProtectionEnabled":{ "shape":"PrimitiveBoolean", @@ -2739,7 +2741,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -3933,14 +3935,14 @@ }, "KinesisFirehoseDestination":{ "shape":"KinesisFirehoseDestination", - "documentation":"

An object that contains information about an event destination for logging to Amazon Kinesis Data Firehose.

" + "documentation":"

An object that contains information about an event destination for logging to Amazon Data Firehose.

" }, "SnsDestination":{ "shape":"SnsDestination", "documentation":"

An object that contains information about an event destination that sends logging events to Amazon SNS.

" } }, - "documentation":"

Contains information about an event destination.

Event destinations are associated with configuration sets, which enable you to publish message sending events to CloudWatch, Kinesis Data Firehose, or Amazon SNS.

" + "documentation":"

Contains information about an event destination.

Event destinations are associated with configuration sets, which enable you to publish message sending events to CloudWatch, Firehose, or Amazon SNS.

" }, "EventDestinationList":{ "type":"list", @@ -4085,7 +4087,7 @@ }, "CountryRuleSet":{ "shape":"ProtectConfigurationCountryRuleSet", - "documentation":"

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide.

" + "documentation":"

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide.

" } } }, @@ -4209,14 +4211,14 @@ "members":{ "IamRoleArn":{ "shape":"IamRoleArn", - "documentation":"

The ARN of an Identity and Access Management role that is able to write event data to an Amazon Kinesis Data Firehose destination.

" + "documentation":"

The ARN of an Identity and Access Management role that is able to write event data to an Amazon Data Firehose destination.

" }, "DeliveryStreamArn":{ "shape":"DeliveryStreamArn", "documentation":"

The Amazon Resource Name (ARN) of the delivery stream.

" } }, - "documentation":"

Contains the delivery stream Amazon Resource Name (ARN), and the ARN of the Identity and Access Management (IAM) role associated with a Kinesis Data Firehose event destination.

Event destinations, such as Kinesis Data Firehose, are associated with configuration sets, which enable you to publish message sending events.

" + "documentation":"

Contains the delivery stream Amazon Resource Name (ARN), and the ARN of the Identity and Access Management (IAM) role associated with a Firehose event destination.

Event destinations, such as Firehose, are associated with configuration sets, which enable you to publish message sending events.

" }, "LanguageCode":{ "type":"string", @@ -4723,7 +4725,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

When set to false an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out request. For more information see Self-managed opt-outs

" + "documentation":"

When set to false an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out request. For more information see Self-managed opt-outs

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -4858,7 +4860,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

When set to false, an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. For more information see Self-managed opt-outs

" + "documentation":"

When set to false, an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. For more information see Self-managed opt-outs

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -4866,7 +4868,7 @@ }, "SharedRoutesEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

Allows you to enable shared routes on your pool.

By default, this is set to False. If you set this value to True, your messages are sent using phone numbers or sender IDs (depending on the country) that are shared with other Amazon Pinpoint users. In some countries, such as the United States, senders aren't allowed to use shared routes and must use a dedicated phone number or short code.

" + "documentation":"

Allows you to enable shared routes on your pool.

By default, this is set to False. If you set this value to True, your messages are sent using phone numbers or sender IDs (depending on the country) that are shared with other users. In some countries, such as the United States, senders aren't allowed to use shared routes and must use a dedicated phone number or short code.

" }, "DeletionProtectionEnabled":{ "shape":"PrimitiveBoolean", @@ -5958,7 +5960,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -6137,7 +6139,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -6498,11 +6500,11 @@ }, "MaxPrice":{ "shape":"MaxPrice", - "documentation":"

The maximum amount that you want to spend, in US dollars, per each text message part. A text message can contain multiple parts.

" + "documentation":"

The maximum amount that you want to spend, in US dollars, per each text message. If the calculated amount to send the text message is greater than MaxPrice, the message is not sent and an error is returned.

" }, "TimeToLive":{ "shape":"TimeToLive", - "documentation":"

How long the text message is valid for. By default this is 72 hours.

" + "documentation":"

How long the text message is valid for, in seconds. By default this is 72 hours. If the messages isn't handed off before the TTL expires we stop attempting to hand off the message and return TTL_EXPIRED event.

" }, "Context":{ "shape":"ContextMap", @@ -6510,11 +6512,11 @@ }, "DestinationCountryParameters":{ "shape":"DestinationCountryParameters", - "documentation":"

This field is used for any country-specific registration requirements. Currently, this setting is only used when you send messages to recipients in India using a sender ID. For more information see Special requirements for sending SMS messages to recipients in India.

" + "documentation":"

This field is used for any country-specific registration requirements. Currently, this setting is only used when you send messages to recipients in India using a sender ID. For more information see Special requirements for sending SMS messages to recipients in India.

  • IN_ENTITY_ID The entity ID or Principal Entity (PE) ID that you received after completing the sender ID registration process.

  • IN_TEMPLATE_ID The template ID that you received after completing the sender ID registration process.

    Make sure that the Template ID that you specify matches your message template exactly. If your message doesn't match the template that you provided during the registration process, the mobile carriers might reject your message.

" }, "DryRun":{ "shape":"PrimitiveBoolean", - "documentation":"

When set to true, the message is checked and validated, but isn't sent to the end recipient.

" + "documentation":"

When set to true, the message is checked and validated, but isn't sent to the end recipient. You are not charged for using DryRun.

The Message Parts per Second (MPS) limit when using DryRun is five. If your origination identity has a lower MPS limit then the lower MPS limit is used. For more information about MPS limits, see Message Parts per Second (MPS) limits in the AWS End User Messaging SMS User Guide..

" }, "ProtectConfigurationId":{ "shape":"ProtectConfigurationIdOrArn", @@ -6615,7 +6617,7 @@ "documentation":"

The two-character code, in ISO 3166-1 alpha-2 format, for the country or region.

" } }, - "documentation":"

The alphanumeric sender ID in a specific country that you want to describe. For more information on sender IDs see Requesting sender IDs for SMS messaging with Amazon Pinpoint in the Amazon Pinpoint User Guide.

" + "documentation":"

The alphanumeric sender ID in a specific country that you want to describe. For more information on sender IDs see Requesting sender IDs in the AWS End User Messaging SMS User Guide.

" }, "SenderIdFilter":{ "type":"structure", @@ -6949,7 +6951,7 @@ "documentation":"

When set to True, the value that has been specified in the EnforcedLimit is used to determine the maximum amount in US dollars that can be spent to send messages each month, in US dollars.

" } }, - "documentation":"

Describes the current Amazon Pinpoint monthly spend limits for sending voice and text messages. For more information on increasing your monthly spend limit, see Requesting increases to your monthly SMS spending quota for Amazon Pinpoint in the Amazon Pinpoint User Guide.

" + "documentation":"

Describes the current monthly spend limits for sending voice and text messages. For more information on increasing your monthly spend limit, see Requesting a spending quota increase in the AWS End User Messaging SMS User Guide.

" }, "SpendLimitList":{ "type":"list", @@ -7223,7 +7225,7 @@ }, "KinesisFirehoseDestination":{ "shape":"KinesisFirehoseDestination", - "documentation":"

An object that contains information about an event destination for logging to Kinesis Data Firehose.

" + "documentation":"

An object that contains information about an event destination for logging to Firehose.

" }, "SnsDestination":{ "shape":"SnsDestination", @@ -7270,7 +7272,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"Boolean", - "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListNameOrArn", @@ -7377,7 +7379,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"Boolean", - "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListNameOrArn", @@ -7426,7 +7428,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon Pinpoint automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -7464,7 +7466,7 @@ }, "CountryRuleSetUpdates":{ "shape":"ProtectConfigurationCountryRuleSet", - "documentation":"

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide.

" + "documentation":"

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide.

" } } }, @@ -7926,5 +7928,5 @@ "pattern":"[A-Za-z0-9_:/\\+-]+" } }, - "documentation":"

Welcome to the Amazon Pinpoint SMS and Voice, version 2 API Reference. This guide provides information about Amazon Pinpoint SMS and Voice, version 2 API resources, including supported HTTP methods, parameters, and schemas.

Amazon Pinpoint is an Amazon Web Services service that you can use to engage with your recipients across multiple messaging channels. The Amazon Pinpoint SMS and Voice, version 2 API provides programmatic access to options that are unique to the SMS and voice channels. Amazon Pinpoint SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API.

If you're new to Amazon Pinpoint SMS, it's also helpful to review the Amazon Pinpoint SMS User Guide. The Amazon Pinpoint Developer Guide provides tutorials, code samples, and procedures that demonstrate how to use Amazon Pinpoint SMS features programmatically and how to integrate Amazon Pinpoint functionality into mobile apps and other types of applications. The guide also provides key information, such as Amazon Pinpoint integration with other Amazon Web Services services, and the quotas that apply to use of the service.

Regional availability

The Amazon Pinpoint SMS and Voice, version 2 API Reference is available in several Amazon Web Services Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Pinpoint endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference.

In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure.

" + "documentation":"

Welcome to the AWS End User Messaging SMS and Voice, version 2 API Reference. This guide provides information about AWS End User Messaging SMS and Voice, version 2 API resources, including supported HTTP methods, parameters, and schemas.

Amazon Pinpoint is an Amazon Web Services service that you can use to engage with your recipients across multiple messaging channels. The AWS End User Messaging SMS and Voice, version 2 API provides programmatic access to options that are unique to the SMS and voice channels. AWS End User Messaging SMS and Voice, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API.

If you're new to AWS End User Messaging SMS and Voice, it's also helpful to review the AWS End User Messaging SMS User Guide. The AWS End User Messaging SMS User Guide provides tutorials, code samples, and procedures that demonstrate how to use AWS End User Messaging SMS and Voice features programmatically and how to integrate functionality into mobile apps and other types of applications. The guide also provides key information, such as AWS End User Messaging SMS and Voice integration with other Amazon Web Services services, and the quotas that apply to use of the service.

Regional availability

The AWS End User Messaging SMS and Voice version 2 API Reference is available in several Amazon Web Services Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Pinpoint endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference.

In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure.

" } diff --git a/botocore/data/pinpoint/2016-12-01/service-2.json b/botocore/data/pinpoint/2016-12-01/service-2.json index 51061c10c3..883510339b 100644 --- a/botocore/data/pinpoint/2016-12-01/service-2.json +++ b/botocore/data/pinpoint/2016-12-01/service-2.json @@ -8,7 +8,10 @@ "protocol": "rest-json", "jsonVersion": "1.1", "uid": "pinpoint-2016-12-01", - "signatureVersion": "v4" + "signatureVersion": "v4", + "auth": [ + "aws.auth#sigv4" + ] }, "documentation": "

Doc Engage API - Amazon Pinpoint API

", "operations": { @@ -3896,7 +3899,9 @@ "shape": "ListTagsForResourceResponse", "documentation": "

The request succeeded.

" }, - "errors": [], + "errors": [ + + ], "documentation": "

Retrieves all the tags (keys and values) that are associated with an application, campaign, message template, or segment.

" }, "ListTemplateVersions": { @@ -4315,7 +4320,9 @@ "input": { "shape": "TagResourceRequest" }, - "errors": [], + "errors": [ + + ], "documentation": "

Adds one or more tags (keys and values) to an application, campaign, message template, or segment.

" }, "UntagResource": { @@ -4328,7 +4335,9 @@ "input": { "shape": "UntagResourceRequest" }, - "errors": [], + "errors": [ + + ], "documentation": "

Removes one or more tags (keys and values) from an application, campaign, message template, or segment.

" }, "UpdateAdmChannel": { @@ -6768,8 +6777,8 @@ "documentation": "

The verified email address to send the email from. The default address is the FromAddress specified for the email channel for the application.

" }, "Headers": { - "shape": "ListOfMessageHeader", - "documentation": "

The list of MessageHeaders for the email. You can have up to 15 MessageHeaders for each email.

" + "shape": "ListOfMessageHeader", + "documentation": "

The list of MessageHeaders for the email. You can have up to 15 MessageHeaders for each email.

" }, "HtmlBody": { "shape": "__string", @@ -8698,8 +8707,8 @@ "documentation": "

The subject line, or title, to use in email messages that are based on the message template.

" }, "Headers": { - "shape": "ListOfMessageHeader", - "documentation": "

The list of MessageHeaders for the email. You can have up to 15 Headers.

" + "shape": "ListOfMessageHeader", + "documentation": "

The list of MessageHeaders for the email. You can have up to 15 Headers.

" }, "tags": { "shape": "MapOf__string", @@ -8749,8 +8758,8 @@ "documentation": "

The subject line, or title, that's used in email messages that are based on the message template.

" }, "Headers": { - "shape": "ListOfMessageHeader", - "documentation": "

The list of MessageHeaders for the email. You can have up to 15 Headers.

" + "shape": "ListOfMessageHeader", + "documentation": "

The list of MessageHeaders for the email. You can have up to 15 Headers.

" }, "tags": { "shape": "MapOf__string", @@ -14389,31 +14398,31 @@ "documentation": "

The body of the email message, in plain text format. We recommend using plain text format for email clients that don't render HTML content and clients that are connected to high-latency networks, such as mobile devices.

" }, "Headers": { - "shape": "ListOfMessageHeader", - "documentation": "

The list of MessageHeaders for the email. You can have up to 15 Headers.

" + "shape": "ListOfMessageHeader", + "documentation": "

The list of MessageHeaders for the email. You can have up to 15 Headers.

" } }, "documentation": "

Specifies the contents of an email message, composed of a subject, a text part, and an HTML part.

" }, "ListOfMessageHeader": { - "type": "list", - "member": { - "shape": "MessageHeader" - } + "type": "list", + "member": { + "shape": "MessageHeader" + } }, "MessageHeader": { - "type": "structure", - "members": { - "Name": { - "shape": "__string", - "documentation": "

The name of the message header. The header name can contain up to 126 characters.

" - }, - "Value": { - "shape": "__string", - "documentation": "

The value of the message header. The header value can contain up to 870 characters, including the length of any rendered attributes. For example if you add the {CreationDate} attribute, it renders as YYYY-MM-DDTHH:MM:SS.SSSZ and is 24 characters in length.

" - } - }, - "documentation": "

Contains the name and value pair of an email header to add to your email. You can have up to 15 MessageHeaders. A header can contain information such as the sender, receiver, route, or timestamp.

" + "type": "structure", + "members": { + "Name": { + "shape": "__string", + "documentation": "

The name of the message header. The header name can contain up to 126 characters.

" + }, + "Value": { + "shape": "__string", + "documentation": "

The value of the message header. The header value can contain up to 870 characters, including the length of any rendered attributes. For example if you add the {CreationDate} attribute, it renders as YYYY-MM-DDTHH:MM:SS.SSSZ and is 24 characters in length.

" + } + }, + "documentation": "

Contains the name and value pair of an email header to add to your email. You can have up to 15 MessageHeaders. A header can contain information such as the sender, receiver, route, or timestamp.

" }, "SimpleEmailPart": { "type": "structure", @@ -16729,4 +16738,4 @@ } } } -} +} \ No newline at end of file diff --git a/botocore/data/pipes/2015-10-07/service-2.json b/botocore/data/pipes/2015-10-07/service-2.json index bee3797e61..a8b1337620 100644 --- a/botocore/data/pipes/2015-10-07/service-2.json +++ b/botocore/data/pipes/2015-10-07/service-2.json @@ -3,8 +3,8 @@ "metadata":{ "apiVersion":"2015-10-07", "endpointPrefix":"pipes", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon EventBridge Pipes", "serviceId":"Pipes", "signatureVersion":"v4", @@ -195,19 +195,19 @@ "type":"string", "max":1600, "min":1, - "pattern":"^arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1})?:(\\d{12})?:(.+)$" + "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1})?:(\\d{12})?:(.+)" }, "ArnOrJsonPath":{ "type":"string", "max":1600, "min":1, - "pattern":"^arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1})?:(\\d{12})?:(.+)|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)$" + "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1})?:(\\d{12})?:(.+)|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)" }, "ArnOrUrl":{ "type":"string", "max":1600, "min":1, - "pattern":"^smk://(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]):[0-9]{1,5}|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1})?:(\\d{12})?:(.+)$" + "pattern":"smk://(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]):[0-9]{1,5}|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1})?:(\\d{12})?:(.+)" }, "AssignPublicIp":{ "type":"string", @@ -220,17 +220,17 @@ "type":"structure", "required":["Subnets"], "members":{ - "AssignPublicIp":{ - "shape":"AssignPublicIp", - "documentation":"

Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when LaunchType in EcsParameters is set to FARGATE.

" + "Subnets":{ + "shape":"Subnets", + "documentation":"

Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets.

" }, "SecurityGroups":{ "shape":"SecurityGroups", "documentation":"

Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.

" }, - "Subnets":{ - "shape":"Subnets", - "documentation":"

Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets.

" + "AssignPublicIp":{ + "shape":"AssignPublicIp", + "documentation":"

Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when LaunchType in EcsParameters is set to FARGATE.

" } }, "documentation":"

This structure specifies the VPC subnets and security groups for the task, and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode.

" @@ -386,10 +386,6 @@ "type":"structure", "required":["capacityProvider"], "members":{ - "base":{ - "shape":"CapacityProviderStrategyItemBase", - "documentation":"

The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. If no value is specified, the default value of 0 is used.

" - }, "capacityProvider":{ "shape":"CapacityProvider", "documentation":"

The short name of the capacity provider.

" @@ -397,6 +393,10 @@ "weight":{ "shape":"CapacityProviderStrategyItemWeight", "documentation":"

The weight value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied.

" + }, + "base":{ + "shape":"CapacityProviderStrategyItemBase", + "documentation":"

The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. If no value is specified, the default value of 0 is used.

" } }, "documentation":"

The details of a capacity provider strategy. To learn more, see CapacityProviderStrategyItem in the Amazon ECS API Reference.

" @@ -415,7 +415,7 @@ "type":"string", "max":1600, "min":1, - "pattern":"^(^arn:aws([a-z]|\\-)*:logs:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}):(\\d{12}):log-group:[\\.\\-_/#A-Za-z0-9]{1,512}(:\\*)?)$" + "pattern":"(^arn:aws([a-z]|\\-)*:logs:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}):(\\d{12}):log-group:[\\.\\-_/#A-Za-z0-9]{1,512}(:\\*)?)" }, "CloudwatchLogsLogDestination":{ "type":"structure", @@ -467,11 +467,17 @@ "type":"structure", "required":[ "Name", - "RoleArn", "Source", - "Target" + "Target", + "RoleArn" ], "members":{ + "Name":{ + "shape":"PipeName", + "documentation":"

The name of the pipe.

", + "location":"uri", + "locationName":"Name" + }, "Description":{ "shape":"PipeDescription", "documentation":"

A description of the pipe.

" @@ -480,6 +486,14 @@ "shape":"RequestedPipeState", "documentation":"

The state the pipe should be in.

" }, + "Source":{ + "shape":"ArnOrUrl", + "documentation":"

The ARN of the source resource.

" + }, + "SourceParameters":{ + "shape":"PipeSourceParameters", + "documentation":"

The parameters required to set up a source for your pipe.

" + }, "Enrichment":{ "shape":"OptionalArn", "documentation":"

The ARN of the enrichment resource.

" @@ -488,39 +502,25 @@ "shape":"PipeEnrichmentParameters", "documentation":"

The parameters required to set up enrichment on your pipe.

" }, - "LogConfiguration":{ - "shape":"PipeLogConfigurationParameters", - "documentation":"

The logging configuration settings for the pipe.

" + "Target":{ + "shape":"Arn", + "documentation":"

The ARN of the target resource.

" }, - "Name":{ - "shape":"PipeName", - "documentation":"

The name of the pipe.

", - "location":"uri", - "locationName":"Name" + "TargetParameters":{ + "shape":"PipeTargetParameters", + "documentation":"

The parameters required to set up a target for your pipe.

For more information about pipe target parameters, including how to use dynamic path parameters, see Target parameters in the Amazon EventBridge User Guide.

" }, "RoleArn":{ "shape":"RoleArn", "documentation":"

The ARN of the role that allows the pipe to send data to the target.

" }, - "Source":{ - "shape":"ArnOrUrl", - "documentation":"

The ARN of the source resource.

" - }, - "SourceParameters":{ - "shape":"PipeSourceParameters", - "documentation":"

The parameters required to set up a source for your pipe.

" - }, "Tags":{ "shape":"TagMap", "documentation":"

The list of key-value pairs to associate with the pipe.

" }, - "Target":{ - "shape":"Arn", - "documentation":"

The ARN of the target resource.

" - }, - "TargetParameters":{ - "shape":"PipeTargetParameters", - "documentation":"

The parameters required to set up a target for your pipe.

For more information about pipe target parameters, including how to use dynamic path parameters, see Target parameters in the Amazon EventBridge User Guide.

" + "LogConfiguration":{ + "shape":"PipeLogConfigurationParameters", + "documentation":"

The logging configuration settings for the pipe.

" } } }, @@ -531,25 +531,25 @@ "shape":"PipeArn", "documentation":"

The ARN of the pipe.

" }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

The time the pipe was created.

" + "Name":{ + "shape":"PipeName", + "documentation":"

The name of the pipe.

" + }, + "DesiredState":{ + "shape":"RequestedPipeState", + "documentation":"

The state the pipe should be in.

" }, "CurrentState":{ "shape":"PipeState", "documentation":"

The state the pipe is in.

" }, - "DesiredState":{ - "shape":"RequestedPipeState", - "documentation":"

The state the pipe should be in.

" + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time the pipe was created.

" }, "LastModifiedTime":{ "shape":"Timestamp", "documentation":"

When the pipe was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "Name":{ - "shape":"PipeName", - "documentation":"

The name of the pipe.

" } } }, @@ -596,25 +596,25 @@ "shape":"PipeArn", "documentation":"

The ARN of the pipe.

" }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

The time the pipe was created.

" + "Name":{ + "shape":"PipeName", + "documentation":"

The name of the pipe.

" + }, + "DesiredState":{ + "shape":"RequestedPipeStateDescribeResponse", + "documentation":"

The state the pipe should be in.

" }, "CurrentState":{ "shape":"PipeState", "documentation":"

The state the pipe is in.

" }, - "DesiredState":{ - "shape":"RequestedPipeStateDescribeResponse", - "documentation":"

The state the pipe should be in.

" + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time the pipe was created.

" }, "LastModifiedTime":{ "shape":"Timestamp", "documentation":"

When the pipe was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "Name":{ - "shape":"PipeName", - "documentation":"

The name of the pipe.

" } } }, @@ -637,13 +637,9 @@ "shape":"PipeArn", "documentation":"

The ARN of the pipe.

" }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

The time the pipe was created.

" - }, - "CurrentState":{ - "shape":"PipeState", - "documentation":"

The state the pipe is in.

" + "Name":{ + "shape":"PipeName", + "documentation":"

The name of the pipe.

" }, "Description":{ "shape":"PipeDescription", @@ -653,29 +649,13 @@ "shape":"RequestedPipeStateDescribeResponse", "documentation":"

The state the pipe should be in.

" }, - "Enrichment":{ - "shape":"OptionalArn", - "documentation":"

The ARN of the enrichment resource.

" - }, - "EnrichmentParameters":{ - "shape":"PipeEnrichmentParameters", - "documentation":"

The parameters required to set up enrichment on your pipe.

" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"

When the pipe was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "LogConfiguration":{ - "shape":"PipeLogConfiguration", - "documentation":"

The logging configuration settings for the pipe.

" - }, - "Name":{ - "shape":"PipeName", - "documentation":"

The name of the pipe.

" + "CurrentState":{ + "shape":"PipeState", + "documentation":"

The state the pipe is in.

" }, - "RoleArn":{ - "shape":"RoleArn", - "documentation":"

The ARN of the role that allows the pipe to send data to the target.

" + "StateReason":{ + "shape":"PipeStateReason", + "documentation":"

The reason the pipe is in its current state.

" }, "Source":{ "shape":"ArnOrUrl", @@ -685,13 +665,13 @@ "shape":"PipeSourceParameters", "documentation":"

The parameters required to set up a source for your pipe.

" }, - "StateReason":{ - "shape":"PipeStateReason", - "documentation":"

The reason the pipe is in its current state.

" + "Enrichment":{ + "shape":"OptionalArn", + "documentation":"

The ARN of the enrichment resource.

" }, - "Tags":{ - "shape":"TagMap", - "documentation":"

The list of key-value pairs to associate with the pipe.

" + "EnrichmentParameters":{ + "shape":"PipeEnrichmentParameters", + "documentation":"

The parameters required to set up enrichment on your pipe.

" }, "Target":{ "shape":"Arn", @@ -700,9 +680,72 @@ "TargetParameters":{ "shape":"PipeTargetParameters", "documentation":"

The parameters required to set up a target for your pipe.

For more information about pipe target parameters, including how to use dynamic path parameters, see Target parameters in the Amazon EventBridge User Guide.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the role that allows the pipe to send data to the target.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The list of key-value pairs to associate with the pipe.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time the pipe was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the pipe was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + }, + "LogConfiguration":{ + "shape":"PipeLogConfiguration", + "documentation":"

The logging configuration settings for the pipe.

" } } }, + "DimensionMapping":{ + "type":"structure", + "required":[ + "DimensionValue", + "DimensionValueType", + "DimensionName" + ], + "members":{ + "DimensionValue":{ + "shape":"DimensionValue", + "documentation":"

Dynamic path to the dimension value in the source event.

" + }, + "DimensionValueType":{ + "shape":"DimensionValueType", + "documentation":"

The data type of the dimension for the time-series data.

" + }, + "DimensionName":{ + "shape":"DimensionName", + "documentation":"

The metadata attributes of the time series. For example, the name and Availability Zone of an Amazon EC2 instance or the name of the manufacturer of a wind turbine are dimensions.

" + } + }, + "documentation":"

Maps source data to a dimension in the target Timestream for LiveAnalytics table.

For more information, see Amazon Timestream for LiveAnalytics concepts

" + }, + "DimensionMappings":{ + "type":"list", + "member":{"shape":"DimensionMapping"}, + "max":128, + "min":1 + }, + "DimensionName":{ + "type":"string", + "max":256, + "min":1 + }, + "DimensionValue":{ + "type":"string", + "max":2048, + "min":1 + }, + "DimensionValueType":{ + "type":"string", + "enum":["VARCHAR"] + }, "DynamoDBStreamStartPosition":{ "type":"string", "enum":[ @@ -892,7 +935,7 @@ "type":"string", "max":300, "min":1, - "pattern":"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]):[0-9]{1,5}$", + "pattern":"(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]):[0-9]{1,5}", "sensitive":true }, "EphemeralStorageSize":{ @@ -901,6 +944,15 @@ "max":200, "min":21 }, + "EpochTimeUnit":{ + "type":"string", + "enum":[ + "MILLISECONDS", + "SECONDS", + "MICROSECONDS", + "NANOSECONDS" + ] + }, "ErrorMessage":{"type":"string"}, "EventBridgeDetailType":{ "type":"string", @@ -912,7 +964,7 @@ "type":"string", "max":50, "min":1, - "pattern":"^[A-Za-z0-9\\-]+[\\.][A-Za-z0-9\\-]+$", + "pattern":"[A-Za-z0-9\\-]+[\\.][A-Za-z0-9\\-]+", "sensitive":true }, "EventBridgeEventResourceList":{ @@ -925,7 +977,7 @@ "type":"string", "max":256, "min":1, - "pattern":"(?=[/\\.\\-_A-Za-z0-9]+)((?!aws\\.).*)|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)", + "pattern":".*(?=[/\\.\\-_A-Za-z0-9]+)((?!aws\\.).*)|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*).*", "sensitive":true }, "EventPattern":{ @@ -964,17 +1016,17 @@ "type":"string", "max":1600, "min":1, - "pattern":"^(^arn:aws([a-z]|\\-)*:firehose:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}):(\\d{12}):deliverystream/[a-zA-Z0-9_.-]{1,64})$" + "pattern":"(^arn:aws([a-z]|\\-)*:firehose:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}):(\\d{12}):deliverystream/[a-zA-Z0-9_.-]{1,64})" }, "FirehoseLogDestination":{ "type":"structure", "members":{ "DeliveryStreamArn":{ "shape":"FirehoseArn", - "documentation":"

The Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.

" + "documentation":"

The Amazon Resource Name (ARN) of the Firehose delivery stream to which EventBridge delivers the pipe log records.

" } }, - "documentation":"

The Amazon Kinesis Data Firehose logging configuration settings for the pipe.

" + "documentation":"

The Amazon Data Firehose logging configuration settings for the pipe.

" }, "FirehoseLogDestinationParameters":{ "type":"structure", @@ -982,16 +1034,16 @@ "members":{ "DeliveryStreamArn":{ "shape":"FirehoseArn", - "documentation":"

Specifies the Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the Firehose delivery stream to which EventBridge delivers the pipe log records.

" } }, - "documentation":"

The Amazon Kinesis Data Firehose logging configuration settings for the pipe.

" + "documentation":"

The Amazon Data Firehose logging configuration settings for the pipe.

" }, "HeaderKey":{ "type":"string", "max":512, "min":0, - "pattern":"^[!#$%&'*+-.^_`|~0-9a-zA-Z]+|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)$" + "pattern":"[!#$%&'*+-.^_`|~0-9a-zA-Z]+|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)" }, "HeaderParametersMap":{ "type":"map", @@ -1002,7 +1054,7 @@ "type":"string", "max":512, "min":0, - "pattern":"^[ \\t]*[\\x20-\\x7E]+([ \\t]+[\\x20-\\x7E]+)*[ \\t]*|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)$", + "pattern":"[ \\t]*[\\x20-\\x7E]+([ \\t]+[\\x20-\\x7E]+)*[ \\t]*|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)", "sensitive":true }, "IncludeExecutionData":{ @@ -1044,7 +1096,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*$" + "pattern":"\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*" }, "KafkaBootstrapServers":{ "type":"list", @@ -1056,7 +1108,7 @@ "type":"string", "max":249, "min":1, - "pattern":"^[^.]([a-zA-Z0-9\\-_.]+)$", + "pattern":"[^.]([a-zA-Z0-9\\-_.]+)", "sensitive":true }, "KinesisPartitionKey":{ @@ -1107,11 +1159,11 @@ "ListPipesRequest":{ "type":"structure", "members":{ - "CurrentState":{ - "shape":"PipeState", - "documentation":"

The state the pipe is in.

", + "NamePrefix":{ + "shape":"PipeName", + "documentation":"

A value that will return a subset of the pipes associated with this account. For example, \"NamePrefix\": \"ABC\" will return all endpoints with \"ABC\" in the name.

", "location":"querystring", - "locationName":"CurrentState" + "locationName":"NamePrefix" }, "DesiredState":{ "shape":"RequestedPipeState", @@ -1119,23 +1171,11 @@ "location":"querystring", "locationName":"DesiredState" }, - "Limit":{ - "shape":"LimitMax100", - "documentation":"

The maximum number of pipes to include in the response.

", - "location":"querystring", - "locationName":"Limit" - }, - "NamePrefix":{ - "shape":"PipeName", - "documentation":"

A value that will return a subset of the pipes associated with this account. For example, \"NamePrefix\": \"ABC\" will return all endpoints with \"ABC\" in the name.

", - "location":"querystring", - "locationName":"NamePrefix" - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

", + "CurrentState":{ + "shape":"PipeState", + "documentation":"

The state the pipe is in.

", "location":"querystring", - "locationName":"NextToken" + "locationName":"CurrentState" }, "SourcePrefix":{ "shape":"ResourceArn", @@ -1148,19 +1188,31 @@ "documentation":"

The prefix matching the pipe target.

", "location":"querystring", "locationName":"TargetPrefix" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

", + "location":"querystring", + "locationName":"NextToken" + }, + "Limit":{ + "shape":"LimitMax100", + "documentation":"

The maximum number of pipes to include in the response.

", + "location":"querystring", + "locationName":"Limit" } } }, "ListPipesResponse":{ "type":"structure", "members":{ - "NextToken":{ - "shape":"NextToken", - "documentation":"

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" - }, "Pipes":{ "shape":"PipeList", "documentation":"

The pipes returned by the call.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" } } }, @@ -1214,17 +1266,17 @@ "type":"string", "max":1000, "min":1, - "pattern":"^[\\s\\S]*$", + "pattern":"[\\s\\S]*", "sensitive":true }, "MSKAccessCredentials":{ "type":"structure", "members":{ - "ClientCertificateTlsAuth":{ + "SaslScram512Auth":{ "shape":"SecretManagerArn", "documentation":"

The ARN of the Secrets Manager secret.

" }, - "SaslScram512Auth":{ + "ClientCertificateTlsAuth":{ "shape":"SecretManagerArn", "documentation":"

The ARN of the Secrets Manager secret.

" } @@ -1257,6 +1309,26 @@ "max":10000, "min":-1 }, + "MeasureName":{ + "type":"string", + "max":1024, + "min":1 + }, + "MeasureValue":{ + "type":"string", + "max":2048, + "min":1 + }, + "MeasureValueType":{ + "type":"string", + "enum":[ + "DOUBLE", + "BIGINT", + "VARCHAR", + "BOOLEAN", + "TIMESTAMP" + ] + }, "MessageDeduplicationId":{ "type":"string", "max":100, @@ -1269,6 +1341,69 @@ "min":0, "sensitive":true }, + "MultiMeasureAttributeMapping":{ + "type":"structure", + "required":[ + "MeasureValue", + "MeasureValueType", + "MultiMeasureAttributeName" + ], + "members":{ + "MeasureValue":{ + "shape":"MeasureValue", + "documentation":"

Dynamic path to the measurement attribute in the source event.

" + }, + "MeasureValueType":{ + "shape":"MeasureValueType", + "documentation":"

Data type of the measurement attribute in the source event.

" + }, + "MultiMeasureAttributeName":{ + "shape":"MultiMeasureAttributeName", + "documentation":"

Target measure name to be used.

" + } + }, + "documentation":"

A mapping of a source event data field to a measure in a Timestream for LiveAnalytics record.

" + }, + "MultiMeasureAttributeMappings":{ + "type":"list", + "member":{"shape":"MultiMeasureAttributeMapping"}, + "max":256, + "min":1 + }, + "MultiMeasureAttributeName":{ + "type":"string", + "max":256, + "min":1 + }, + "MultiMeasureMapping":{ + "type":"structure", + "required":[ + "MultiMeasureName", + "MultiMeasureAttributeMappings" + ], + "members":{ + "MultiMeasureName":{ + "shape":"MultiMeasureName", + "documentation":"

The name of the multiple measurements per record (multi-measure).

" + }, + "MultiMeasureAttributeMappings":{ + "shape":"MultiMeasureAttributeMappings", + "documentation":"

Mappings that represent multiple source event fields mapped to measures in the same Timestream for LiveAnalytics record.

" + } + }, + "documentation":"

Maps multiple measures from the source event to the same Timestream for LiveAnalytics record.

For more information, see Amazon Timestream for LiveAnalytics concepts

" + }, + "MultiMeasureMappings":{ + "type":"list", + "member":{"shape":"MultiMeasureMapping"}, + "max":1024, + "min":0 + }, + "MultiMeasureName":{ + "type":"string", + "max":256, + "min":1 + }, "NetworkConfiguration":{ "type":"structure", "members":{ @@ -1305,11 +1440,11 @@ "type":"string", "max":1600, "min":0, - "pattern":"^$|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1})?:(\\d{12})?:(.+)$" + "pattern":"$|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1})?:(\\d{12})?:(.+)" }, "PathParameter":{ "type":"string", - "pattern":"^(?!\\s*$).+|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)$", + "pattern":"(?!\\s*$).+|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)", "sensitive":true }, "PathParameterList":{ @@ -1319,45 +1454,45 @@ "Pipe":{ "type":"structure", "members":{ + "Name":{ + "shape":"PipeName", + "documentation":"

The name of the pipe.

" + }, "Arn":{ "shape":"PipeArn", "documentation":"

The ARN of the pipe.

" }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

The time the pipe was created.

" + "DesiredState":{ + "shape":"RequestedPipeState", + "documentation":"

The state the pipe should be in.

" }, "CurrentState":{ "shape":"PipeState", "documentation":"

The state the pipe is in.

" }, - "DesiredState":{ - "shape":"RequestedPipeState", - "documentation":"

The state the pipe should be in.

" + "StateReason":{ + "shape":"PipeStateReason", + "documentation":"

The reason the pipe is in its current state.

" }, - "Enrichment":{ - "shape":"OptionalArn", - "documentation":"

The ARN of the enrichment resource.

" + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time the pipe was created.

" }, "LastModifiedTime":{ "shape":"Timestamp", "documentation":"

When the pipe was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" }, - "Name":{ - "shape":"PipeName", - "documentation":"

The name of the pipe.

" - }, "Source":{ "shape":"ArnOrUrl", "documentation":"

The ARN of the source resource.

" }, - "StateReason":{ - "shape":"PipeStateReason", - "documentation":"

The reason the pipe is in its current state.

" - }, "Target":{ "shape":"Arn", "documentation":"

The ARN of the target resource.

" + }, + "Enrichment":{ + "shape":"OptionalArn", + "documentation":"

The ARN of the enrichment resource.

" } }, "documentation":"

An object that represents a pipe. Amazon EventBridgePipes connect event sources to targets and reduces the need for specialized knowledge and integration code.

" @@ -1366,26 +1501,26 @@ "type":"string", "max":1600, "min":1, - "pattern":"^arn:aws([a-z]|\\-)*:([a-zA-Z0-9\\-]+):([a-z]|\\d|\\-)*:([0-9]{12})?:(.+)$" + "pattern":"arn:aws([a-z]|\\-)*:([a-zA-Z0-9\\-]+):([a-z]|\\d|\\-)*:([0-9]{12})?:(.+)" }, "PipeDescription":{ "type":"string", "max":512, "min":0, - "pattern":"^.*$", + "pattern":".*", "sensitive":true }, "PipeEnrichmentHttpParameters":{ "type":"structure", "members":{ - "HeaderParameters":{ - "shape":"HeaderParametersMap", - "documentation":"

The headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination.

" - }, "PathParameterValues":{ "shape":"PathParameterList", "documentation":"

The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards (\"*\").

" }, + "HeaderParameters":{ + "shape":"HeaderParametersMap", + "documentation":"

The headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination.

" + }, "QueryStringParameters":{ "shape":"QueryStringParametersMap", "documentation":"

The query string keys/values that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination.

" @@ -1396,13 +1531,13 @@ "PipeEnrichmentParameters":{ "type":"structure", "members":{ - "HttpParameters":{ - "shape":"PipeEnrichmentHttpParameters", - "documentation":"

Contains the HTTP parameters to use when the target is a API Gateway REST endpoint or EventBridge ApiDestination.

If you specify an API Gateway REST API or EventBridge ApiDestination as a target, you can use this parameter to specify headers, path parameters, and query string keys/values as part of your target invoking request. If you're using ApiDestinations, the corresponding Connection can also have these values configured. In case of any conflicting keys, values from the Connection take precedence.

" - }, "InputTemplate":{ "shape":"InputTemplate", "documentation":"

Valid JSON text passed to the enrichment. In this case, nothing from the event itself is passed to the enrichment. For more information, see The JavaScript Object Notation (JSON) Data Interchange Format.

To remove an input template, specify an empty string.

" + }, + "HttpParameters":{ + "shape":"PipeEnrichmentHttpParameters", + "documentation":"

Contains the HTTP parameters to use when the target is a API Gateway REST endpoint or EventBridge ApiDestination.

If you specify an API Gateway REST API or EventBridge ApiDestination as a target, you can use this parameter to specify headers, path parameters, and query string keys/values as part of your target invoking request. If you're using ApiDestinations, the corresponding Connection can also have these values configured. In case of any conflicting keys, values from the Connection take precedence.

" } }, "documentation":"

The parameters required to set up enrichment on your pipe.

" @@ -1414,25 +1549,25 @@ "PipeLogConfiguration":{ "type":"structure", "members":{ - "CloudwatchLogsLogDestination":{ - "shape":"CloudwatchLogsLogDestination", - "documentation":"

The Amazon CloudWatch Logs logging configuration settings for the pipe.

" + "S3LogDestination":{ + "shape":"S3LogDestination", + "documentation":"

The Amazon S3 logging configuration settings for the pipe.

" }, "FirehoseLogDestination":{ "shape":"FirehoseLogDestination", - "documentation":"

The Amazon Kinesis Data Firehose logging configuration settings for the pipe.

" + "documentation":"

The Amazon Data Firehose logging configuration settings for the pipe.

" }, - "IncludeExecutionData":{ - "shape":"IncludeExecutionData", - "documentation":"

Whether the execution data (specifically, the payload, awsRequest, and awsResponse fields) is included in the log messages for this pipe.

This applies to all log destinations for the pipe.

For more information, see Including execution data in logs in the Amazon EventBridge User Guide.

" + "CloudwatchLogsLogDestination":{ + "shape":"CloudwatchLogsLogDestination", + "documentation":"

The Amazon CloudWatch Logs logging configuration settings for the pipe.

" }, "Level":{ "shape":"LogLevel", "documentation":"

The level of logging detail to include. This applies to all log destinations for the pipe.

" }, - "S3LogDestination":{ - "shape":"S3LogDestination", - "documentation":"

The Amazon S3 logging configuration settings for the pipe.

" + "IncludeExecutionData":{ + "shape":"IncludeExecutionData", + "documentation":"

Whether the execution data (specifically, the payload, awsRequest, and awsResponse fields) is included in the log messages for this pipe.

This applies to all log destinations for the pipe.

For more information, see Including execution data in logs in the Amazon EventBridge User Guide.

" } }, "documentation":"

The logging configuration settings for the pipe.

" @@ -1441,34 +1576,34 @@ "type":"structure", "required":["Level"], "members":{ - "CloudwatchLogsLogDestination":{ - "shape":"CloudwatchLogsLogDestinationParameters", - "documentation":"

The Amazon CloudWatch Logs logging configuration settings for the pipe.

" + "S3LogDestination":{ + "shape":"S3LogDestinationParameters", + "documentation":"

The Amazon S3 logging configuration settings for the pipe.

" }, "FirehoseLogDestination":{ "shape":"FirehoseLogDestinationParameters", - "documentation":"

The Amazon Kinesis Data Firehose logging configuration settings for the pipe.

" + "documentation":"

The Amazon Data Firehose logging configuration settings for the pipe.

" }, - "IncludeExecutionData":{ - "shape":"IncludeExecutionData", - "documentation":"

Specify ON to include the execution data (specifically, the payload and awsRequest fields) in the log messages for this pipe.

This applies to all log destinations for the pipe.

For more information, see Including execution data in logs in the Amazon EventBridge User Guide.

The default is OFF.

" + "CloudwatchLogsLogDestination":{ + "shape":"CloudwatchLogsLogDestinationParameters", + "documentation":"

The Amazon CloudWatch Logs logging configuration settings for the pipe.

" }, "Level":{ "shape":"LogLevel", "documentation":"

The level of logging detail to include. This applies to all log destinations for the pipe.

For more information, see Specifying EventBridge Pipes log level in the Amazon EventBridge User Guide.

" }, - "S3LogDestination":{ - "shape":"S3LogDestinationParameters", - "documentation":"

The Amazon S3 logging configuration settings for the pipe.

" + "IncludeExecutionData":{ + "shape":"IncludeExecutionData", + "documentation":"

Specify ALL to include the execution data (specifically, the payload, awsRequest, and awsResponse fields) in the log messages for this pipe.

This applies to all log destinations for the pipe.

For more information, see Including execution data in logs in the Amazon EventBridge User Guide.

By default, execution data is not included.

" } }, - "documentation":"

Specifies the logging configuration settings for the pipe.

When you call UpdatePipe, EventBridge updates the fields in the PipeLogConfigurationParameters object atomically as one and overrides existing values. This is by design. If you don't specify an optional field in any of the Amazon Web Services service parameters objects (CloudwatchLogsLogDestinationParameters, FirehoseLogDestinationParameters, or S3LogDestinationParameters), EventBridge sets that field to its system-default value during the update.

For example, suppose when you created the pipe you specified a Kinesis Data Firehose stream log destination. You then update the pipe to add an Amazon S3 log destination. In addition to specifying the S3LogDestinationParameters for the new log destination, you must also specify the fields in the FirehoseLogDestinationParameters object in order to retain the Kinesis Data Firehose stream log destination.

For more information on generating pipe log records, see Log EventBridge Pipes in the Amazon EventBridge User Guide.

" + "documentation":"

Specifies the logging configuration settings for the pipe.

When you call UpdatePipe, EventBridge updates the fields in the PipeLogConfigurationParameters object atomically as one and overrides existing values. This is by design. If you don't specify an optional field in any of the Amazon Web Services service parameters objects (CloudwatchLogsLogDestinationParameters, FirehoseLogDestinationParameters, or S3LogDestinationParameters), EventBridge sets that field to its system-default value during the update.

For example, suppose when you created the pipe you specified a Firehose stream log destination. You then update the pipe to add an Amazon S3 log destination. In addition to specifying the S3LogDestinationParameters for the new log destination, you must also specify the fields in the FirehoseLogDestinationParameters object in order to retain the Firehose stream log destination.

For more information on generating pipe log records, see Log EventBridge Pipes in the Amazon EventBridge User Guide.

" }, "PipeName":{ "type":"string", "max":64, "min":1, - "pattern":"^[\\.\\-_A-Za-z0-9]+$" + "pattern":"[\\.\\-_A-Za-z0-9]+" }, "PipeSourceActiveMQBrokerParameters":{ "type":"structure", @@ -1477,21 +1612,21 @@ "QueueName" ], "members":{ - "BatchSize":{ - "shape":"LimitMax10000", - "documentation":"

The maximum number of records to include in each batch.

" - }, "Credentials":{ "shape":"MQBrokerAccessCredentials", "documentation":"

The credentials needed to access the resource.

" }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

The maximum length of a time to wait for events.

" - }, "QueueName":{ "shape":"MQBrokerQueueName", "documentation":"

The name of the destination queue to consume.

" + }, + "BatchSize":{ + "shape":"LimitMax10000", + "documentation":"

The maximum number of records to include in each batch.

" + }, + "MaximumBatchingWindowInSeconds":{ + "shape":"MaximumBatchingWindowInSeconds", + "documentation":"

The maximum length of a time to wait for events.

" } }, "documentation":"

The parameters for using an Active MQ broker as a source.

" @@ -1508,6 +1643,10 @@ "shape":"DeadLetterConfig", "documentation":"

Define the target queue to send dead-letter queue events to.

" }, + "OnPartialBatchItemFailure":{ + "shape":"OnPartialBatchItemFailureStreams", + "documentation":"

(Streams only) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.

" + }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", "documentation":"

The maximum length of a time to wait for events.

" @@ -1520,10 +1659,6 @@ "shape":"MaximumRetryAttemptsESM", "documentation":"

(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.

" }, - "OnPartialBatchItemFailure":{ - "shape":"OnPartialBatchItemFailureStreams", - "documentation":"

(Streams only) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.

" - }, "ParallelizationFactor":{ "shape":"LimitMax10", "documentation":"

(Streams only) The number of batches to process concurrently from each shard. The default value is 1.

" @@ -1547,6 +1682,10 @@ "shape":"DeadLetterConfig", "documentation":"

Define the target queue to send dead-letter queue events to.

" }, + "OnPartialBatchItemFailure":{ + "shape":"OnPartialBatchItemFailureStreams", + "documentation":"

(Streams only) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.

" + }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", "documentation":"

The maximum length of a time to wait for events.

" @@ -1559,10 +1698,6 @@ "shape":"MaximumRetryAttemptsESM", "documentation":"

(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.

" }, - "OnPartialBatchItemFailure":{ - "shape":"OnPartialBatchItemFailureStreams", - "documentation":"

(Streams only) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.

" - }, "ParallelizationFactor":{ "shape":"LimitMax10", "documentation":"

(Streams only) The number of batches to process concurrently from each shard. The default value is 1.

" @@ -1582,10 +1717,22 @@ "type":"structure", "required":["TopicName"], "members":{ + "TopicName":{ + "shape":"KafkaTopicName", + "documentation":"

The name of the topic that the pipe will read from.

" + }, + "StartingPosition":{ + "shape":"MSKStartPosition", + "documentation":"

(Streams only) The position in a stream from which to start reading.

" + }, "BatchSize":{ "shape":"LimitMax10000", "documentation":"

The maximum number of records to include in each batch.

" }, + "MaximumBatchingWindowInSeconds":{ + "shape":"MaximumBatchingWindowInSeconds", + "documentation":"

The maximum length of a time to wait for events.

" + }, "ConsumerGroupID":{ "shape":"URI", "documentation":"

The name of the destination queue to consume.

" @@ -1593,18 +1740,6 @@ "Credentials":{ "shape":"MSKAccessCredentials", "documentation":"

The credentials needed to access the resource.

" - }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

The maximum length of a time to wait for events.

" - }, - "StartingPosition":{ - "shape":"MSKStartPosition", - "documentation":"

(Streams only) The position in a stream from which to start reading.

" - }, - "TopicName":{ - "shape":"KafkaTopicName", - "documentation":"

The name of the topic that the pipe will read from.

" } }, "documentation":"

The parameters for using an MSK stream as a source.

" @@ -1612,14 +1747,6 @@ "PipeSourceParameters":{ "type":"structure", "members":{ - "ActiveMQBrokerParameters":{ - "shape":"PipeSourceActiveMQBrokerParameters", - "documentation":"

The parameters for using an Active MQ broker as a source.

" - }, - "DynamoDBStreamParameters":{ - "shape":"PipeSourceDynamoDBStreamParameters", - "documentation":"

The parameters for using a DynamoDB stream as a source.

" - }, "FilterCriteria":{ "shape":"FilterCriteria", "documentation":"

The collection of event patterns used to filter events.

To remove a filter, specify a FilterCriteria object with an empty array of Filter objects.

For more information, see Events and Event Patterns in the Amazon EventBridge User Guide.

" @@ -1628,21 +1755,29 @@ "shape":"PipeSourceKinesisStreamParameters", "documentation":"

The parameters for using a Kinesis stream as a source.

" }, - "ManagedStreamingKafkaParameters":{ - "shape":"PipeSourceManagedStreamingKafkaParameters", - "documentation":"

The parameters for using an MSK stream as a source.

" + "DynamoDBStreamParameters":{ + "shape":"PipeSourceDynamoDBStreamParameters", + "documentation":"

The parameters for using a DynamoDB stream as a source.

" + }, + "SqsQueueParameters":{ + "shape":"PipeSourceSqsQueueParameters", + "documentation":"

The parameters for using a Amazon SQS stream as a source.

" + }, + "ActiveMQBrokerParameters":{ + "shape":"PipeSourceActiveMQBrokerParameters", + "documentation":"

The parameters for using an Active MQ broker as a source.

" }, "RabbitMQBrokerParameters":{ "shape":"PipeSourceRabbitMQBrokerParameters", "documentation":"

The parameters for using a Rabbit MQ broker as a source.

" }, + "ManagedStreamingKafkaParameters":{ + "shape":"PipeSourceManagedStreamingKafkaParameters", + "documentation":"

The parameters for using an MSK stream as a source.

" + }, "SelfManagedKafkaParameters":{ "shape":"PipeSourceSelfManagedKafkaParameters", - "documentation":"

The parameters for using a self-managed Apache Kafka stream as a source.

" - }, - "SqsQueueParameters":{ - "shape":"PipeSourceSqsQueueParameters", - "documentation":"

The parameters for using a Amazon SQS stream as a source.

" + "documentation":"

The parameters for using a self-managed Apache Kafka stream as a source.

A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services. This includes both clusters you manage yourself, as well as those hosted by a third-party provider, such as Confluent Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.

" } }, "documentation":"

The parameters required to set up a source for your pipe.

" @@ -1654,18 +1789,10 @@ "QueueName" ], "members":{ - "BatchSize":{ - "shape":"LimitMax10000", - "documentation":"

The maximum number of records to include in each batch.

" - }, "Credentials":{ "shape":"MQBrokerAccessCredentials", "documentation":"

The credentials needed to access the resource.

" }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

The maximum length of a time to wait for events.

" - }, "QueueName":{ "shape":"MQBrokerQueueName", "documentation":"

The name of the destination queue to consume.

" @@ -1673,6 +1800,14 @@ "VirtualHost":{ "shape":"URI", "documentation":"

The name of the virtual host associated with the source broker.

" + }, + "BatchSize":{ + "shape":"LimitMax10000", + "documentation":"

The maximum number of records to include in each batch.

" + }, + "MaximumBatchingWindowInSeconds":{ + "shape":"MaximumBatchingWindowInSeconds", + "documentation":"

The maximum length of a time to wait for events.

" } }, "documentation":"

The parameters for using a Rabbit MQ broker as a source.

" @@ -1681,6 +1816,14 @@ "type":"structure", "required":["TopicName"], "members":{ + "TopicName":{ + "shape":"KafkaTopicName", + "documentation":"

The name of the topic that the pipe will read from.

" + }, + "StartingPosition":{ + "shape":"SelfManagedKafkaStartPosition", + "documentation":"

(Streams only) The position in a stream from which to start reading.

" + }, "AdditionalBootstrapServers":{ "shape":"KafkaBootstrapServers", "documentation":"

An array of server URLs.

" @@ -1689,6 +1832,10 @@ "shape":"LimitMax10000", "documentation":"

The maximum number of records to include in each batch.

" }, + "MaximumBatchingWindowInSeconds":{ + "shape":"MaximumBatchingWindowInSeconds", + "documentation":"

The maximum length of a time to wait for events.

" + }, "ConsumerGroupID":{ "shape":"URI", "documentation":"

The name of the destination queue to consume.

" @@ -1697,28 +1844,16 @@ "shape":"SelfManagedKafkaAccessConfigurationCredentials", "documentation":"

The credentials needed to access the resource.

" }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

The maximum length of a time to wait for events.

" - }, "ServerRootCaCertificate":{ "shape":"SecretManagerArn", "documentation":"

The ARN of the Secrets Manager secret used for certification.

" }, - "StartingPosition":{ - "shape":"SelfManagedKafkaStartPosition", - "documentation":"

(Streams only) The position in a stream from which to start reading.

" - }, - "TopicName":{ - "shape":"KafkaTopicName", - "documentation":"

The name of the topic that the pipe will read from.

" - }, "Vpc":{ "shape":"SelfManagedKafkaAccessConfigurationVpc", "documentation":"

This structure specifies the VPC subnets and security groups for the stream, and whether a public IP address is to be used.

" } }, - "documentation":"

The parameters for using a self-managed Apache Kafka stream as a source.

" + "documentation":"

The parameters for using a self-managed Apache Kafka stream as a source.

A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services. This includes both clusters you manage yourself, as well as those hosted by a third-party provider, such as Confluent Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.

" }, "PipeSourceSqsQueueParameters":{ "type":"structure", @@ -1758,7 +1893,7 @@ "type":"string", "max":512, "min":0, - "pattern":"^.*$" + "pattern":".*" }, "PipeTargetBatchJobParameters":{ "type":"structure", @@ -1767,10 +1902,22 @@ "JobName" ], "members":{ + "JobDefinition":{ + "shape":"String", + "documentation":"

The job definition used by this job. This value can be one of name, name:revision, or the Amazon Resource Name (ARN) for the job definition. If name is specified without a revision then the latest active revision is used.

" + }, + "JobName":{ + "shape":"String", + "documentation":"

The name of the job. It can be up to 128 letters long. The first character must be alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_).

" + }, "ArrayProperties":{ "shape":"BatchArrayProperties", "documentation":"

The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. This parameter is used only if the target is an Batch job.

" }, + "RetryStrategy":{ + "shape":"BatchRetryStrategy", + "documentation":"

The retry strategy to use for failed jobs. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition.

" + }, "ContainerOverrides":{ "shape":"BatchContainerOverrides", "documentation":"

The overrides that are sent to a container.

" @@ -1779,21 +1926,9 @@ "shape":"BatchDependsOn", "documentation":"

A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a SEQUENTIAL type dependency without specifying a job ID for array jobs so that each child array job completes sequentially, starting at index 0. You can also specify an N_TO_N type dependency with a job ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of each dependency to complete before it can begin.

" }, - "JobDefinition":{ - "shape":"String", - "documentation":"

The job definition used by this job. This value can be one of name, name:revision, or the Amazon Resource Name (ARN) for the job definition. If name is specified without a revision then the latest active revision is used.

" - }, - "JobName":{ - "shape":"String", - "documentation":"

The name of the job. It can be up to 128 letters long. The first character must be alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_).

" - }, "Parameters":{ "shape":"BatchParametersMap", "documentation":"

Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job definition. Parameters are specified as a key and value pair mapping. Parameters included here override any corresponding parameter defaults from the job definition.

" - }, - "RetryStrategy":{ - "shape":"BatchRetryStrategy", - "documentation":"

The retry strategy to use for failed jobs. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition.

" } }, "documentation":"

The parameters for using an Batch job as a target.

" @@ -1816,6 +1951,30 @@ "type":"structure", "required":["TaskDefinitionArn"], "members":{ + "TaskDefinitionArn":{ + "shape":"ArnOrJsonPath", + "documentation":"

The ARN of the task definition to use if the event target is an Amazon ECS task.

" + }, + "TaskCount":{ + "shape":"LimitMin1", + "documentation":"

The number of tasks to create based on TaskDefinition. The default is 1.

" + }, + "LaunchType":{ + "shape":"LaunchType", + "documentation":"

Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. The FARGATE value is supported only in the Regions where Fargate with Amazon ECS is supported. For more information, see Fargate on Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" + }, + "NetworkConfiguration":{ + "shape":"NetworkConfiguration", + "documentation":"

Use this structure if the Amazon ECS task uses the awsvpc network mode. This structure specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. This structure is required if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks.

If you specify NetworkConfiguration when the target ECS task does not use the awsvpc network mode, the task fails.

" + }, + "PlatformVersion":{ + "shape":"String", + "documentation":"

Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0.

This structure is used only if LaunchType is FARGATE. For more information about valid platform versions, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + }, + "Group":{ + "shape":"String", + "documentation":"

Specifies an Amazon ECS task group for the task. The maximum length is 255 characters.

" + }, "CapacityProviderStrategy":{ "shape":"CapacityProviderStrategy", "documentation":"

The capacity provider strategy to use for the task.

If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used.

" @@ -1828,22 +1987,6 @@ "shape":"Boolean", "documentation":"

Whether or not to enable the execute command functionality for the containers in this task. If true, this enables execute command functionality on all containers in the task.

" }, - "Group":{ - "shape":"String", - "documentation":"

Specifies an Amazon ECS task group for the task. The maximum length is 255 characters.

" - }, - "LaunchType":{ - "shape":"LaunchType", - "documentation":"

Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. The FARGATE value is supported only in the Regions where Fargate with Amazon ECS is supported. For more information, see Fargate on Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" - }, - "NetworkConfiguration":{ - "shape":"NetworkConfiguration", - "documentation":"

Use this structure if the Amazon ECS task uses the awsvpc network mode. This structure specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. This structure is required if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks.

If you specify NetworkConfiguration when the target ECS task does not use the awsvpc network mode, the task fails.

" - }, - "Overrides":{ - "shape":"EcsTaskOverride", - "documentation":"

The overrides that are associated with a task.

" - }, "PlacementConstraints":{ "shape":"PlacementConstraints", "documentation":"

An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task (including constraints in the task definition and those specified at runtime).

" @@ -1852,10 +1995,6 @@ "shape":"PlacementStrategies", "documentation":"

The placement strategy objects to use for the task. You can specify a maximum of five strategy rules per task.

" }, - "PlatformVersion":{ - "shape":"String", - "documentation":"

Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0.

This structure is used only if LaunchType is FARGATE. For more information about valid platform versions, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" - }, "PropagateTags":{ "shape":"PropagateTags", "documentation":"

Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags are not propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.

" @@ -1864,17 +2003,13 @@ "shape":"ReferenceId", "documentation":"

The reference ID to use for the task.

" }, + "Overrides":{ + "shape":"EcsTaskOverride", + "documentation":"

The overrides that are associated with a task.

" + }, "Tags":{ "shape":"TagList", "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. To learn more, see RunTask in the Amazon ECS API Reference.

" - }, - "TaskCount":{ - "shape":"LimitMin1", - "documentation":"

The number of tasks to create based on TaskDefinition. The default is 1.

" - }, - "TaskDefinitionArn":{ - "shape":"ArnOrJsonPath", - "documentation":"

The ARN of the task definition to use if the event target is an Amazon ECS task.

" } }, "documentation":"

The parameters for using an Amazon ECS task as a target.

" @@ -1882,22 +2017,22 @@ "PipeTargetEventBridgeEventBusParameters":{ "type":"structure", "members":{ - "DetailType":{ - "shape":"EventBridgeDetailType", - "documentation":"

A free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail.

" - }, "EndpointId":{ "shape":"EventBridgeEndpointId", "documentation":"

The URL subdomain of the endpoint. For example, if the URL for Endpoint is https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo.

" }, - "Resources":{ - "shape":"EventBridgeEventResourceList", - "documentation":"

Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.

" + "DetailType":{ + "shape":"EventBridgeDetailType", + "documentation":"

A free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail.

" }, "Source":{ "shape":"EventBridgeEventSource", "documentation":"

The source of the event.

" }, + "Resources":{ + "shape":"EventBridgeEventResourceList", + "documentation":"

Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.

" + }, "Time":{ "shape":"JsonPath", "documentation":"

The time stamp of the event, per RFC3339. If no time stamp is provided, the time stamp of the PutEvents call is used.

" @@ -1908,14 +2043,14 @@ "PipeTargetHttpParameters":{ "type":"structure", "members":{ - "HeaderParameters":{ - "shape":"HeaderParametersMap", - "documentation":"

The headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination.

" - }, "PathParameterValues":{ "shape":"PathParameterList", "documentation":"

The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards (\"*\").

" }, + "HeaderParameters":{ + "shape":"HeaderParametersMap", + "documentation":"

The headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination.

" + }, "QueryStringParameters":{ "shape":"QueryStringParametersMap", "documentation":"

The query string keys/values that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination.

" @@ -1954,38 +2089,38 @@ "PipeTargetParameters":{ "type":"structure", "members":{ - "BatchJobParameters":{ - "shape":"PipeTargetBatchJobParameters", - "documentation":"

The parameters for using an Batch job as a target.

" + "InputTemplate":{ + "shape":"InputTemplate", + "documentation":"

Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. For more information, see The JavaScript Object Notation (JSON) Data Interchange Format.

To remove an input template, specify an empty string.

" }, - "CloudWatchLogsParameters":{ - "shape":"PipeTargetCloudWatchLogsParameters", - "documentation":"

The parameters for using an CloudWatch Logs log stream as a target.

" + "LambdaFunctionParameters":{ + "shape":"PipeTargetLambdaFunctionParameters", + "documentation":"

The parameters for using a Lambda function as a target.

" + }, + "StepFunctionStateMachineParameters":{ + "shape":"PipeTargetStateMachineParameters", + "documentation":"

The parameters for using a Step Functions state machine as a target.

" + }, + "KinesisStreamParameters":{ + "shape":"PipeTargetKinesisStreamParameters", + "documentation":"

The parameters for using a Kinesis stream as a target.

" }, "EcsTaskParameters":{ "shape":"PipeTargetEcsTaskParameters", "documentation":"

The parameters for using an Amazon ECS task as a target.

" }, - "EventBridgeEventBusParameters":{ - "shape":"PipeTargetEventBridgeEventBusParameters", - "documentation":"

The parameters for using an EventBridge event bus as a target.

" + "BatchJobParameters":{ + "shape":"PipeTargetBatchJobParameters", + "documentation":"

The parameters for using an Batch job as a target.

" + }, + "SqsQueueParameters":{ + "shape":"PipeTargetSqsQueueParameters", + "documentation":"

The parameters for using a Amazon SQS stream as a target.

" }, "HttpParameters":{ "shape":"PipeTargetHttpParameters", "documentation":"

These are custom parameter to be used when the target is an API Gateway REST APIs or EventBridge ApiDestinations.

" }, - "InputTemplate":{ - "shape":"InputTemplate", - "documentation":"

Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. For more information, see The JavaScript Object Notation (JSON) Data Interchange Format.

To remove an input template, specify an empty string.

" - }, - "KinesisStreamParameters":{ - "shape":"PipeTargetKinesisStreamParameters", - "documentation":"

The parameters for using a Kinesis stream as a target.

" - }, - "LambdaFunctionParameters":{ - "shape":"PipeTargetLambdaFunctionParameters", - "documentation":"

The parameters for using a Lambda function as a target.

" - }, "RedshiftDataParameters":{ "shape":"PipeTargetRedshiftDataParameters", "documentation":"

These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement.

" @@ -1994,13 +2129,17 @@ "shape":"PipeTargetSageMakerPipelineParameters", "documentation":"

The parameters for using a SageMaker pipeline as a target.

" }, - "SqsQueueParameters":{ - "shape":"PipeTargetSqsQueueParameters", - "documentation":"

The parameters for using a Amazon SQS stream as a target.

" + "EventBridgeEventBusParameters":{ + "shape":"PipeTargetEventBridgeEventBusParameters", + "documentation":"

The parameters for using an EventBridge event bus as a target.

" }, - "StepFunctionStateMachineParameters":{ - "shape":"PipeTargetStateMachineParameters", - "documentation":"

The parameters for using a Step Functions state machine as a target.

" + "CloudWatchLogsParameters":{ + "shape":"PipeTargetCloudWatchLogsParameters", + "documentation":"

The parameters for using an CloudWatch Logs log stream as a target.

" + }, + "TimestreamParameters":{ + "shape":"PipeTargetTimestreamParameters", + "documentation":"

The parameters for using a Timestream for LiveAnalytics table as a target.

" } }, "documentation":"

The parameters required to set up a target for your pipe.

For more information about pipe target parameters, including how to use dynamic path parameters, see Target parameters in the Amazon EventBridge User Guide.

" @@ -2012,6 +2151,10 @@ "Sqls" ], "members":{ + "SecretManagerArn":{ + "shape":"SecretManagerArnOrJsonPath", + "documentation":"

The name or ARN of the secret that enables access to the database. Required when authenticating using Secrets Manager.

" + }, "Database":{ "shape":"Database", "documentation":"

The name of the database. Required when authenticating using temporary credentials.

" @@ -2020,14 +2163,6 @@ "shape":"DbUser", "documentation":"

The database user name. Required when authenticating using temporary credentials.

" }, - "SecretManagerArn":{ - "shape":"SecretManagerArnOrJsonPath", - "documentation":"

The name or ARN of the secret that enables access to the database. Required when authenticating using Secrets Manager.

" - }, - "Sqls":{ - "shape":"Sqls", - "documentation":"

The SQL statement text to run.

" - }, "StatementName":{ "shape":"StatementName", "documentation":"

The name of the SQL statement. You can name the SQL statement when you create it to identify the query.

" @@ -2035,6 +2170,10 @@ "WithEvent":{ "shape":"Boolean", "documentation":"

Indicates whether to send an event back to EventBridge after the SQL statement runs.

" + }, + "Sqls":{ + "shape":"Sqls", + "documentation":"

The SQL statement text to run.

" } }, "documentation":"

These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement.

" @@ -2052,13 +2191,13 @@ "PipeTargetSqsQueueParameters":{ "type":"structure", "members":{ - "MessageDeduplicationId":{ - "shape":"MessageDeduplicationId", - "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of sent messages.

" - }, "MessageGroupId":{ "shape":"MessageGroupId", "documentation":"

The FIFO message group ID to use as the target.

" + }, + "MessageDeduplicationId":{ + "shape":"MessageDeduplicationId", + "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of sent messages.

" } }, "documentation":"

The parameters for using a Amazon SQS stream as a target.

" @@ -2073,16 +2212,59 @@ }, "documentation":"

The parameters for using a Step Functions state machine as a target.

" }, - "PlacementConstraint":{ + "PipeTargetTimestreamParameters":{ "type":"structure", + "required":[ + "TimeValue", + "VersionValue", + "DimensionMappings" + ], "members":{ - "expression":{ - "shape":"PlacementConstraintExpression", - "documentation":"

A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. To learn more, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" + "TimeValue":{ + "shape":"TimeValue", + "documentation":"

Dynamic path to the source data field that represents the time value for your data.

" + }, + "EpochTimeUnit":{ + "shape":"EpochTimeUnit", + "documentation":"

The granularity of the time units used. Default is MILLISECONDS.

Required if TimeFieldType is specified as EPOCH.

" + }, + "TimeFieldType":{ + "shape":"TimeFieldType", + "documentation":"

The type of time value used.

The default is EPOCH.

" + }, + "TimestampFormat":{ + "shape":"TimestampFormat", + "documentation":"

How to format the timestamps. For example, YYYY-MM-DDThh:mm:ss.sssTZD.

Required if TimeFieldType is specified as TIMESTAMP_FORMAT.

" + }, + "VersionValue":{ + "shape":"VersionValue", + "documentation":"

64 bit version value or source data field that represents the version value for your data.

Write requests with a higher version number will update the existing measure values of the record and version. In cases where the measure value is the same, the version will still be updated.

Default value is 1.

Timestream for LiveAnalytics does not support updating partial measure values in a record.

Write requests for duplicate data with a higher version number will update the existing measure value and version. In cases where the measure value is the same, Version will still be updated. Default value is 1.

Version must be 1 or greater, or you will receive a ValidationException error.

" }, + "DimensionMappings":{ + "shape":"DimensionMappings", + "documentation":"

Map source data to dimensions in the target Timestream for LiveAnalytics table.

For more information, see Amazon Timestream for LiveAnalytics concepts

" + }, + "SingleMeasureMappings":{ + "shape":"SingleMeasureMappings", + "documentation":"

Mappings of single source data fields to individual records in the specified Timestream for LiveAnalytics table.

" + }, + "MultiMeasureMappings":{ + "shape":"MultiMeasureMappings", + "documentation":"

Maps multiple measures from the source event to the same record in the specified Timestream for LiveAnalytics table.

" + } + }, + "documentation":"

The parameters for using a Timestream for LiveAnalytics table as a target.

" + }, + "PlacementConstraint":{ + "type":"structure", + "members":{ "type":{ "shape":"PlacementConstraintType", "documentation":"

The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict the selection to a group of valid candidates.

" + }, + "expression":{ + "shape":"PlacementConstraintExpression", + "documentation":"

A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. To learn more, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" } }, "documentation":"

An object representing a constraint on task placement. To learn more, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

" @@ -2115,13 +2297,13 @@ "PlacementStrategy":{ "type":"structure", "members":{ - "field":{ - "shape":"PlacementStrategyField", - "documentation":"

The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used.

" - }, "type":{ "shape":"PlacementStrategyType", "documentation":"

The type of placement strategy. The random placement strategy randomly places tasks on available candidates. The spread placement strategy spreads placement across available candidates evenly based on the field parameter. The binpack strategy places tasks on available candidates that have the least available amount of the resource that is specified with the field parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory (but still enough to run the task).

" + }, + "field":{ + "shape":"PlacementStrategyField", + "documentation":"

The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used.

" } }, "documentation":"

The task placement strategy for a task or service. To learn more, see Task Placement Strategies in the Amazon Elastic Container Service Service Developer Guide.

" @@ -2148,7 +2330,7 @@ "type":"string", "max":512, "min":0, - "pattern":"^[^\\x00-\\x1F\\x7F]+|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)$" + "pattern":"[^\\x00-\\x1F\\x7F]+|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)" }, "QueryStringParametersMap":{ "type":"map", @@ -2159,7 +2341,7 @@ "type":"string", "max":512, "min":0, - "pattern":"^[^\\x00-\\x09\\x0B\\x0C\\x0E-\\x1F\\x7F]+|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)$", + "pattern":"[^\\x00-\\x09\\x0B\\x0C\\x0E-\\x1F\\x7F]+|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)", "sensitive":true }, "ReferenceId":{ @@ -2192,7 +2374,7 @@ "type":"string", "max":1600, "min":1, - "pattern":"^arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z0-9+=,.@\\-_/]+$" + "pattern":"arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z0-9+=,.@\\-_/]+" }, "S3LogDestination":{ "type":"structure", @@ -2201,6 +2383,10 @@ "shape":"String", "documentation":"

The name of the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.

" }, + "Prefix":{ + "shape":"String", + "documentation":"

The prefix text with which to begin Amazon S3 log object names.

For more information, see Organizing objects using prefixes in the Amazon Simple Storage Service User Guide.

" + }, "BucketOwner":{ "shape":"String", "documentation":"

The Amazon Web Services account that owns the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.

" @@ -2208,10 +2394,6 @@ "OutputFormat":{ "shape":"S3OutputFormat", "documentation":"

The format EventBridge uses for the log records.

" - }, - "Prefix":{ - "shape":"String", - "documentation":"

The prefix text with which to begin Amazon S3 log object names.

For more information, see Organizing objects using prefixes in the Amazon Simple Storage Service User Guide.

" } }, "documentation":"

The Amazon S3 logging configuration settings for the pipe.

" @@ -2249,7 +2431,7 @@ }, "S3LogDestinationParametersBucketOwnerString":{ "type":"string", - "pattern":"^\\d{12}$" + "pattern":"\\d{12}" }, "S3LogDestinationParametersPrefixString":{ "type":"string", @@ -2292,7 +2474,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)$", + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)", "sensitive":true }, "SageMakerPipelineParameterValue":{ @@ -2306,27 +2488,27 @@ "documentation":"

// Optional SecretManager ARN which stores the database credentials

", "max":1600, "min":1, - "pattern":"^(^arn:aws([a-z]|\\-)*:secretsmanager:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}):(\\d{12}):secret:.+)$" + "pattern":"(^arn:aws([a-z]|\\-)*:secretsmanager:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}):(\\d{12}):secret:.+)" }, "SecretManagerArnOrJsonPath":{ "type":"string", "documentation":"

// For targets, can either specify an ARN or a jsonpath pointing to the ARN.

", "max":1600, "min":1, - "pattern":"^(^arn:aws([a-z]|\\-)*:secretsmanager:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}):(\\d{12}):secret:.+)|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)$" + "pattern":"(^arn:aws([a-z]|\\-)*:secretsmanager:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}):(\\d{12}):secret:.+)|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)" }, "SecurityGroup":{ "type":"string", "max":1024, "min":1, - "pattern":"^sg-[0-9a-zA-Z]*|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)$", + "pattern":"sg-[0-9a-zA-Z]*|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)", "sensitive":true }, "SecurityGroupId":{ "type":"string", "max":1024, "min":1, - "pattern":"^sg-[0-9a-zA-Z]*$", + "pattern":"sg-[0-9a-zA-Z]*", "sensitive":true }, "SecurityGroupIds":{ @@ -2349,7 +2531,7 @@ "shape":"SecretManagerArn", "documentation":"

The ARN of the Secrets Manager secret.

" }, - "ClientCertificateTlsAuth":{ + "SaslScram512Auth":{ "shape":"SecretManagerArn", "documentation":"

The ARN of the Secrets Manager secret.

" }, @@ -2357,7 +2539,7 @@ "shape":"SecretManagerArn", "documentation":"

The ARN of the Secrets Manager secret.

" }, - "SaslScram512Auth":{ + "ClientCertificateTlsAuth":{ "shape":"SecretManagerArn", "documentation":"

The ARN of the Secrets Manager secret.

" } @@ -2368,13 +2550,13 @@ "SelfManagedKafkaAccessConfigurationVpc":{ "type":"structure", "members":{ - "SecurityGroup":{ - "shape":"SecurityGroupIds", - "documentation":"

Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.

" - }, "Subnets":{ "shape":"SubnetIds", "documentation":"

Specifies the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets.

" + }, + "SecurityGroup":{ + "shape":"SecurityGroupIds", + "documentation":"

Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.

" } }, "documentation":"

This structure specifies the VPC subnets and security groups for the stream, and whether a public IP address is to be used.

" @@ -2390,17 +2572,13 @@ "type":"structure", "required":[ "message", - "quotaCode", "resourceId", "resourceType", - "serviceCode" + "serviceCode", + "quotaCode" ], "members":{ "message":{"shape":"String"}, - "quotaCode":{ - "shape":"String", - "documentation":"

The identifier of the quota that caused the exception.

" - }, "resourceId":{ "shape":"String", "documentation":"

The ID of the resource that caused the exception.

" @@ -2412,6 +2590,10 @@ "serviceCode":{ "shape":"String", "documentation":"

The identifier of the service that caused the exception.

" + }, + "quotaCode":{ + "shape":"String", + "documentation":"

The identifier of the quota that caused the exception.

" } }, "documentation":"

A quota has been exceeded.

", @@ -2421,6 +2603,35 @@ }, "exception":true }, + "SingleMeasureMapping":{ + "type":"structure", + "required":[ + "MeasureValue", + "MeasureValueType", + "MeasureName" + ], + "members":{ + "MeasureValue":{ + "shape":"MeasureValue", + "documentation":"

Dynamic path of the source field to map to the measure in the record.

" + }, + "MeasureValueType":{ + "shape":"MeasureValueType", + "documentation":"

Data type of the source field.

" + }, + "MeasureName":{ + "shape":"MeasureName", + "documentation":"

Target measure name for the measurement attribute in the Timestream table.

" + } + }, + "documentation":"

Maps a single source data field to a single record in the specified Timestream for LiveAnalytics table.

For more information, see Amazon Timestream for LiveAnalytics concepts

" + }, + "SingleMeasureMappings":{ + "type":"list", + "member":{"shape":"SingleMeasureMapping"}, + "max":8192, + "min":0 + }, "Sql":{ "type":"string", "documentation":"

// A single Redshift SQL

", @@ -2454,25 +2665,25 @@ "shape":"PipeArn", "documentation":"

The ARN of the pipe.

" }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

The time the pipe was created.

" + "Name":{ + "shape":"PipeName", + "documentation":"

The name of the pipe.

" + }, + "DesiredState":{ + "shape":"RequestedPipeState", + "documentation":"

The state the pipe should be in.

" }, "CurrentState":{ "shape":"PipeState", "documentation":"

The state the pipe is in.

" }, - "DesiredState":{ - "shape":"RequestedPipeState", - "documentation":"

The state the pipe should be in.

" + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time the pipe was created.

" }, "LastModifiedTime":{ "shape":"Timestamp", "documentation":"

When the pipe was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "Name":{ - "shape":"PipeName", - "documentation":"

The name of the pipe.

" } } }, @@ -2502,25 +2713,25 @@ "shape":"PipeArn", "documentation":"

The ARN of the pipe.

" }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

The time the pipe was created.

" + "Name":{ + "shape":"PipeName", + "documentation":"

The name of the pipe.

" + }, + "DesiredState":{ + "shape":"RequestedPipeState", + "documentation":"

The state the pipe should be in.

" }, "CurrentState":{ "shape":"PipeState", "documentation":"

The state the pipe is in.

" }, - "DesiredState":{ - "shape":"RequestedPipeState", - "documentation":"

The state the pipe should be in.

" + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time the pipe was created.

" }, "LastModifiedTime":{ "shape":"Timestamp", "documentation":"

When the pipe was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "Name":{ - "shape":"PipeName", - "documentation":"

The name of the pipe.

" } } }, @@ -2533,14 +2744,14 @@ "type":"string", "max":1024, "min":1, - "pattern":"^subnet-[0-9a-z]*|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)$", + "pattern":"subnet-[0-9a-z]*|(\\$(\\.[\\w/_-]+(\\[(\\d+|\\*)\\])*)*)", "sensitive":true }, "SubnetId":{ "type":"string", "max":1024, "min":1, - "pattern":"^subnet-[0-9a-z]*$", + "pattern":"subnet-[0-9a-z]*", "sensitive":true }, "SubnetIds":{ @@ -2631,6 +2842,10 @@ "required":["message"], "members":{ "message":{"shape":"String"}, + "serviceCode":{ + "shape":"String", + "documentation":"

The identifier of the service that caused the exception.

" + }, "quotaCode":{ "shape":"String", "documentation":"

The identifier of the quota that caused the exception.

" @@ -2640,10 +2855,6 @@ "documentation":"

The number of seconds to wait before retrying the action that caused the exception.

", "location":"header", "locationName":"Retry-After" - }, - "serviceCode":{ - "shape":"String", - "documentation":"

The identifier of the service that caused the exception.

" } }, "documentation":"

An action was throttled.

", @@ -2653,12 +2864,29 @@ }, "exception":true }, + "TimeFieldType":{ + "type":"string", + "enum":[ + "EPOCH", + "TIMESTAMP_FORMAT" + ] + }, + "TimeValue":{ + "type":"string", + "max":256, + "min":1 + }, "Timestamp":{"type":"timestamp"}, + "TimestampFormat":{ + "type":"string", + "max":256, + "min":1 + }, "URI":{ "type":"string", "max":200, "min":1, - "pattern":"^[a-zA-Z0-9-\\/*:_+=.@-]*$", + "pattern":"[a-zA-Z0-9-\\/*:_+=.@-]*", "sensitive":true }, "UntagResourceRequest":{ @@ -2694,6 +2922,12 @@ "RoleArn" ], "members":{ + "Name":{ + "shape":"PipeName", + "documentation":"

The name of the pipe.

", + "location":"uri", + "locationName":"Name" + }, "Description":{ "shape":"PipeDescription", "documentation":"

A description of the pipe.

" @@ -2702,6 +2936,10 @@ "shape":"RequestedPipeState", "documentation":"

The state the pipe should be in.

" }, + "SourceParameters":{ + "shape":"UpdatePipeSourceParameters", + "documentation":"

The parameters required to set up a source for your pipe.

" + }, "Enrichment":{ "shape":"OptionalArn", "documentation":"

The ARN of the enrichment resource.

" @@ -2710,24 +2948,6 @@ "shape":"PipeEnrichmentParameters", "documentation":"

The parameters required to set up enrichment on your pipe.

" }, - "LogConfiguration":{ - "shape":"PipeLogConfigurationParameters", - "documentation":"

The logging configuration settings for the pipe.

" - }, - "Name":{ - "shape":"PipeName", - "documentation":"

The name of the pipe.

", - "location":"uri", - "locationName":"Name" - }, - "RoleArn":{ - "shape":"RoleArn", - "documentation":"

The ARN of the role that allows the pipe to send data to the target.

" - }, - "SourceParameters":{ - "shape":"UpdatePipeSourceParameters", - "documentation":"

The parameters required to set up a source for your pipe.

" - }, "Target":{ "shape":"Arn", "documentation":"

The ARN of the target resource.

" @@ -2735,6 +2955,14 @@ "TargetParameters":{ "shape":"PipeTargetParameters", "documentation":"

The parameters required to set up a target for your pipe.

For more information about pipe target parameters, including how to use dynamic path parameters, see Target parameters in the Amazon EventBridge User Guide.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the role that allows the pipe to send data to the target.

" + }, + "LogConfiguration":{ + "shape":"PipeLogConfigurationParameters", + "documentation":"

The logging configuration settings for the pipe.

" } } }, @@ -2745,25 +2973,25 @@ "shape":"PipeArn", "documentation":"

The ARN of the pipe.

" }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

The time the pipe was created.

" + "Name":{ + "shape":"PipeName", + "documentation":"

The name of the pipe.

" + }, + "DesiredState":{ + "shape":"RequestedPipeState", + "documentation":"

The state the pipe should be in.

" }, "CurrentState":{ "shape":"PipeState", "documentation":"

The state the pipe is in.

" }, - "DesiredState":{ - "shape":"RequestedPipeState", - "documentation":"

The state the pipe should be in.

" + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time the pipe was created.

" }, "LastModifiedTime":{ "shape":"Timestamp", "documentation":"

When the pipe was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "Name":{ - "shape":"PipeName", - "documentation":"

The name of the pipe.

" } } }, @@ -2771,14 +2999,14 @@ "type":"structure", "required":["Credentials"], "members":{ - "BatchSize":{ - "shape":"LimitMax10000", - "documentation":"

The maximum number of records to include in each batch.

" - }, "Credentials":{ "shape":"MQBrokerAccessCredentials", "documentation":"

The credentials needed to access the resource.

" }, + "BatchSize":{ + "shape":"LimitMax10000", + "documentation":"

The maximum number of records to include in each batch.

" + }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", "documentation":"

The maximum length of a time to wait for events.

" @@ -2797,6 +3025,10 @@ "shape":"DeadLetterConfig", "documentation":"

Define the target queue to send dead-letter queue events to.

" }, + "OnPartialBatchItemFailure":{ + "shape":"OnPartialBatchItemFailureStreams", + "documentation":"

(Streams only) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.

" + }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", "documentation":"

The maximum length of a time to wait for events.

" @@ -2809,10 +3041,6 @@ "shape":"MaximumRetryAttemptsESM", "documentation":"

(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.

" }, - "OnPartialBatchItemFailure":{ - "shape":"OnPartialBatchItemFailureStreams", - "documentation":"

(Streams only) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.

" - }, "ParallelizationFactor":{ "shape":"LimitMax10", "documentation":"

(Streams only) The number of batches to process concurrently from each shard. The default value is 1.

" @@ -2831,6 +3059,10 @@ "shape":"DeadLetterConfig", "documentation":"

Define the target queue to send dead-letter queue events to.

" }, + "OnPartialBatchItemFailure":{ + "shape":"OnPartialBatchItemFailureStreams", + "documentation":"

(Streams only) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.

" + }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", "documentation":"

The maximum length of a time to wait for events.

" @@ -2843,10 +3075,6 @@ "shape":"MaximumRetryAttemptsESM", "documentation":"

(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.

" }, - "OnPartialBatchItemFailure":{ - "shape":"OnPartialBatchItemFailureStreams", - "documentation":"

(Streams only) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.

" - }, "ParallelizationFactor":{ "shape":"LimitMax10", "documentation":"

(Streams only) The number of batches to process concurrently from each shard. The default value is 1.

" @@ -2875,14 +3103,6 @@ "UpdatePipeSourceParameters":{ "type":"structure", "members":{ - "ActiveMQBrokerParameters":{ - "shape":"UpdatePipeSourceActiveMQBrokerParameters", - "documentation":"

The parameters for using an Active MQ broker as a source.

" - }, - "DynamoDBStreamParameters":{ - "shape":"UpdatePipeSourceDynamoDBStreamParameters", - "documentation":"

The parameters for using a DynamoDB stream as a source.

" - }, "FilterCriteria":{ "shape":"FilterCriteria", "documentation":"

The collection of event patterns used to filter events.

To remove a filter, specify a FilterCriteria object with an empty array of Filter objects.

For more information, see Events and Event Patterns in the Amazon EventBridge User Guide.

" @@ -2891,21 +3111,29 @@ "shape":"UpdatePipeSourceKinesisStreamParameters", "documentation":"

The parameters for using a Kinesis stream as a source.

" }, - "ManagedStreamingKafkaParameters":{ - "shape":"UpdatePipeSourceManagedStreamingKafkaParameters", - "documentation":"

The parameters for using an MSK stream as a source.

" + "DynamoDBStreamParameters":{ + "shape":"UpdatePipeSourceDynamoDBStreamParameters", + "documentation":"

The parameters for using a DynamoDB stream as a source.

" + }, + "SqsQueueParameters":{ + "shape":"UpdatePipeSourceSqsQueueParameters", + "documentation":"

The parameters for using a Amazon SQS stream as a source.

" + }, + "ActiveMQBrokerParameters":{ + "shape":"UpdatePipeSourceActiveMQBrokerParameters", + "documentation":"

The parameters for using an Active MQ broker as a source.

" }, "RabbitMQBrokerParameters":{ "shape":"UpdatePipeSourceRabbitMQBrokerParameters", "documentation":"

The parameters for using a Rabbit MQ broker as a source.

" }, + "ManagedStreamingKafkaParameters":{ + "shape":"UpdatePipeSourceManagedStreamingKafkaParameters", + "documentation":"

The parameters for using an MSK stream as a source.

" + }, "SelfManagedKafkaParameters":{ "shape":"UpdatePipeSourceSelfManagedKafkaParameters", - "documentation":"

The parameters for using a self-managed Apache Kafka stream as a source.

" - }, - "SqsQueueParameters":{ - "shape":"UpdatePipeSourceSqsQueueParameters", - "documentation":"

The parameters for using a Amazon SQS stream as a source.

" + "documentation":"

The parameters for using a self-managed Apache Kafka stream as a source.

A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services. This includes both clusters you manage yourself, as well as those hosted by a third-party provider, such as Confluent Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.

" } }, "documentation":"

The parameters required to set up a source for your pipe.

" @@ -2914,14 +3142,14 @@ "type":"structure", "required":["Credentials"], "members":{ - "BatchSize":{ - "shape":"LimitMax10000", - "documentation":"

The maximum number of records to include in each batch.

" - }, "Credentials":{ "shape":"MQBrokerAccessCredentials", "documentation":"

The credentials needed to access the resource.

" }, + "BatchSize":{ + "shape":"LimitMax10000", + "documentation":"

The maximum number of records to include in each batch.

" + }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", "documentation":"

The maximum length of a time to wait for events.

" @@ -2936,14 +3164,14 @@ "shape":"LimitMax10000", "documentation":"

The maximum number of records to include in each batch.

" }, - "Credentials":{ - "shape":"SelfManagedKafkaAccessConfigurationCredentials", - "documentation":"

The credentials needed to access the resource.

" - }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", "documentation":"

The maximum length of a time to wait for events.

" }, + "Credentials":{ + "shape":"SelfManagedKafkaAccessConfigurationCredentials", + "documentation":"

The credentials needed to access the resource.

" + }, "ServerRootCaCertificate":{ "shape":"SecretManagerArn", "documentation":"

The ARN of the Secrets Manager secret used for certification.

" @@ -2953,7 +3181,7 @@ "documentation":"

This structure specifies the VPC subnets and security groups for the stream, and whether a public IP address is to be used.

" } }, - "documentation":"

The parameters for using a self-managed Apache Kafka stream as a source.

" + "documentation":"

The parameters for using a self-managed Apache Kafka stream as a source.

A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services. This includes both clusters you manage yourself, as well as those hosted by a third-party provider, such as Confluent Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.

" }, "UpdatePipeSourceSqsQueueParameters":{ "type":"structure", @@ -2972,11 +3200,11 @@ "ValidationException":{ "type":"structure", "members":{ + "message":{"shape":"ErrorMessage"}, "fieldList":{ "shape":"ValidationExceptionFieldList", "documentation":"

The list of fields for which validation failed and the corresponding failure messages.

" - }, - "message":{"shape":"ErrorMessage"} + } }, "documentation":"

Indicates that an error has occurred while performing a validate operation.

", "error":{ @@ -2988,17 +3216,17 @@ "ValidationExceptionField":{ "type":"structure", "required":[ - "message", - "name" + "name", + "message" ], "members":{ - "message":{ - "shape":"ErrorMessage", - "documentation":"

The message of the exception.

" - }, "name":{ "shape":"String", "documentation":"

The name of the exception.

" + }, + "message":{ + "shape":"ErrorMessage", + "documentation":"

The message of the exception.

" } }, "documentation":"

Indicates that an error has occurred while performing a validate operation.

" @@ -3006,8 +3234,12 @@ "ValidationExceptionFieldList":{ "type":"list", "member":{"shape":"ValidationExceptionField"} + }, + "VersionValue":{ + "type":"string", + "max":256, + "min":1 } }, - "documentation":"

Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need for specialized knowledge and integration code when developing event driven architectures. This helps ensures consistency across your company’s applications. With Pipes, the target can be any available EventBridge target. To set up a pipe, you select the event source, add optional event filtering, define optional enrichment, and select the target for the event data.

", - "xmlNamespace":"http://events.amazonaws.com/doc/2015-10-07" + "documentation":"

Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need for specialized knowledge and integration code when developing event driven architectures. This helps ensures consistency across your company’s applications. With Pipes, the target can be any available EventBridge target. To set up a pipe, you select the event source, add optional event filtering, define optional enrichment, and select the target for the event data.

" } diff --git a/botocore/data/pipes/2015-10-07/waiters-2.json b/botocore/data/pipes/2015-10-07/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/pipes/2015-10-07/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/polly/2016-06-10/service-2.json b/botocore/data/polly/2016-06-10/service-2.json index 420e8cbe37..8334cd308f 100644 --- a/botocore/data/polly/2016-06-10/service-2.json +++ b/botocore/data/polly/2016-06-10/service-2.json @@ -4,10 +4,12 @@ "apiVersion":"2016-06-10", "endpointPrefix":"polly", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Polly", "serviceId":"Polly", "signatureVersion":"v4", - "uid":"polly-2016-06-10" + "uid":"polly-2016-06-10", + "auth":["aws.auth#sigv4"] }, "operations":{ "DeleteLexicon":{ diff --git a/botocore/data/qapps/2023-11-27/endpoint-rule-set-1.json b/botocore/data/qapps/2023-11-27/endpoint-rule-set-1.json new file mode 100644 index 0000000000..3d09620c35 --- /dev/null +++ b/botocore/data/qapps/2023-11-27/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://data.qapps-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://data.qapps-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://data.qapps.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://data.qapps.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/botocore/data/mobile/2017-07-01/paginators-1.json b/botocore/data/qapps/2023-11-27/paginators-1.json similarity index 50% rename from botocore/data/mobile/2017-07-01/paginators-1.json rename to botocore/data/qapps/2023-11-27/paginators-1.json index e86bb7d02b..0d13e6c265 100644 --- a/botocore/data/mobile/2017-07-01/paginators-1.json +++ b/botocore/data/qapps/2023-11-27/paginators-1.json @@ -1,16 +1,16 @@ { "pagination": { - "ListBundles": { - "result_key": "bundleList", - "output_token": "nextToken", + "ListLibraryItems": { "input_token": "nextToken", - "limit_key": "maxResults" - }, - "ListProjects": { - "result_key": "projects", "output_token": "nextToken", + "limit_key": "limit", + "result_key": "libraryItems" + }, + "ListQApps": { "input_token": "nextToken", - "limit_key": "maxResults" + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "apps" } } } diff --git a/botocore/data/qapps/2023-11-27/service-2.json b/botocore/data/qapps/2023-11-27/service-2.json new file mode 100644 index 0000000000..bdda103f4c --- /dev/null +++ b/botocore/data/qapps/2023-11-27/service-2.json @@ -0,0 +1,2659 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2023-11-27", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"data.qapps", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"QApps", + "serviceId":"QApps", + "signatureVersion":"v4", + "signingName":"qapps", + "uid":"qapps-2023-11-27" + }, + "operations":{ + "AssociateLibraryItemReview":{ + "name":"AssociateLibraryItemReview", + "http":{ + "method":"POST", + "requestUri":"/catalog.associateItemRating", + "responseCode":200 + }, + "input":{"shape":"AssociateLibraryItemReviewInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Associates a rating or review for a library item with the user submitting the request. This increments the rating count for the specified library item.

" + }, + "AssociateQAppWithUser":{ + "name":"AssociateQAppWithUser", + "http":{ + "method":"POST", + "requestUri":"/apps.install", + "responseCode":200 + }, + "input":{"shape":"AssociateQAppWithUserInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

This operation creates a link between the user's identity calling the operation and a specific Q App. This is useful to mark the Q App as a favorite for the user if the user doesn't own the Amazon Q App so they can still run it and see it in their inventory of Q Apps.

" + }, + "CreateLibraryItem":{ + "name":"CreateLibraryItem", + "http":{ + "method":"POST", + "requestUri":"/catalog.createItem", + "responseCode":200 + }, + "input":{"shape":"CreateLibraryItemInput"}, + "output":{"shape":"CreateLibraryItemOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Creates a new library item for an Amazon Q App, allowing it to be discovered and used by other allowed users.

" + }, + "CreateQApp":{ + "name":"CreateQApp", + "http":{ + "method":"POST", + "requestUri":"/apps.create", + "responseCode":200 + }, + "input":{"shape":"CreateQAppInput"}, + "output":{"shape":"CreateQAppOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ContentTooLargeException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Creates a new Amazon Q App based on the provided definition. The Q App definition specifies the cards and flow of the Q App. This operation also calculates the dependencies between the cards by inspecting the references in the prompts.

" + }, + "DeleteLibraryItem":{ + "name":"DeleteLibraryItem", + "http":{ + "method":"POST", + "requestUri":"/catalog.deleteItem", + "responseCode":200 + }, + "input":{"shape":"DeleteLibraryItemInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes a library item for an Amazon Q App, removing it from the library so it can no longer be discovered or used by other users.

", + "idempotent":true + }, + "DeleteQApp":{ + "name":"DeleteQApp", + "http":{ + "method":"POST", + "requestUri":"/apps.delete", + "responseCode":200 + }, + "input":{"shape":"DeleteQAppInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes an Amazon Q App owned by the user. If the Q App was previously published to the library, it is also removed from the library.

", + "idempotent":true + }, + "DisassociateLibraryItemReview":{ + "name":"DisassociateLibraryItemReview", + "http":{ + "method":"POST", + "requestUri":"/catalog.disassociateItemRating", + "responseCode":200 + }, + "input":{"shape":"DisassociateLibraryItemReviewInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Removes a rating or review previously submitted by the user for a library item.

" + }, + "DisassociateQAppFromUser":{ + "name":"DisassociateQAppFromUser", + "http":{ + "method":"POST", + "requestUri":"/apps.uninstall", + "responseCode":200 + }, + "input":{"shape":"DisassociateQAppFromUserInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Disassociates a Q App from a user removing the user's access to run the Q App.

" + }, + "GetLibraryItem":{ + "name":"GetLibraryItem", + "http":{ + "method":"GET", + "requestUri":"/catalog.getItem", + "responseCode":200 + }, + "input":{"shape":"GetLibraryItemInput"}, + "output":{"shape":"GetLibraryItemOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves details about a library item for an Amazon Q App, including its metadata, categories, ratings, and usage statistics.

" + }, + "GetQApp":{ + "name":"GetQApp", + "http":{ + "method":"GET", + "requestUri":"/apps.get", + "responseCode":200 + }, + "input":{"shape":"GetQAppInput"}, + "output":{"shape":"GetQAppOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves the full details of an Q App, including its definition specifying the cards and flow.

" + }, + "GetQAppSession":{ + "name":"GetQAppSession", + "http":{ + "method":"GET", + "requestUri":"/runtime.getQAppSession", + "responseCode":200 + }, + "input":{"shape":"GetQAppSessionInput"}, + "output":{"shape":"GetQAppSessionOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves the current state and results for an active session of an Amazon Q App.

" + }, + "ImportDocument":{ + "name":"ImportDocument", + "http":{ + "method":"POST", + "requestUri":"/apps.importDocument", + "responseCode":200 + }, + "input":{"shape":"ImportDocumentInput"}, + "output":{"shape":"ImportDocumentOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ContentTooLargeException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Uploads a file that can then be used either as a default in a FileUploadCard from Q App definition or as a file that is used inside a single Q App run. The purpose of the document is determined by a scope parameter that indicates whether it is at the app definition level or at the app session level.

" + }, + "ListLibraryItems":{ + "name":"ListLibraryItems", + "http":{ + "method":"GET", + "requestUri":"/catalog.list", + "responseCode":200 + }, + "input":{"shape":"ListLibraryItemsInput"}, + "output":{"shape":"ListLibraryItemsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Lists the library items for Amazon Q Apps that are published and available for users in your Amazon Web Services account.

" + }, + "ListQApps":{ + "name":"ListQApps", + "http":{ + "method":"GET", + "requestUri":"/apps.list", + "responseCode":200 + }, + "input":{"shape":"ListQAppsInput"}, + "output":{"shape":"ListQAppsOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Lists the Amazon Q Apps owned by or associated with the user either because they created it or because they used it from the library in the past. The user identity is extracted from the credentials used to invoke this operation..

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceARN}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Lists the tags associated with an Amazon Q Apps resource.

" + }, + "PredictQApp":{ + "name":"PredictQApp", + "http":{ + "method":"POST", + "requestUri":"/apps.predictQApp", + "responseCode":200 + }, + "input":{"shape":"PredictQAppInput"}, + "output":{"shape":"PredictQAppOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Generates an Amazon Q App definition based on either a conversation or a problem statement provided as input.The resulting app definition can be used to call CreateQApp. This API doesn't create Amazon Q Apps directly.

" + }, + "StartQAppSession":{ + "name":"StartQAppSession", + "http":{ + "method":"POST", + "requestUri":"/runtime.startQAppSession", + "responseCode":200 + }, + "input":{"shape":"StartQAppSessionInput"}, + "output":{"shape":"StartQAppSessionOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Starts a new session for an Amazon Q App, allowing inputs to be provided and the app to be run.

Each Q App session will be condensed into a single conversation in the web experience.

" + }, + "StopQAppSession":{ + "name":"StopQAppSession", + "http":{ + "method":"POST", + "requestUri":"/runtime.deleteMiniAppRun", + "responseCode":200 + }, + "input":{"shape":"StopQAppSessionInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Stops an active session for an Amazon Q App.This deletes all data related to the session and makes it invalid for future uses. The results of the session will be persisted as part of the conversation.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceARN}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Associates tags with an Amazon Q Apps resource.

", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceARN}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Disassociates tags from an Amazon Q Apps resource.

", + "idempotent":true + }, + "UpdateLibraryItem":{ + "name":"UpdateLibraryItem", + "http":{ + "method":"POST", + "requestUri":"/catalog.updateItem", + "responseCode":200 + }, + "input":{"shape":"UpdateLibraryItemInput"}, + "output":{"shape":"UpdateLibraryItemOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates the metadata and status of a library item for an Amazon Q App.

" + }, + "UpdateQApp":{ + "name":"UpdateQApp", + "http":{ + "method":"POST", + "requestUri":"/apps.update", + "responseCode":200 + }, + "input":{"shape":"UpdateQAppInput"}, + "output":{"shape":"UpdateQAppOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ContentTooLargeException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates an existing Amazon Q App, allowing modifications to its title, description, and definition.

" + }, + "UpdateQAppSession":{ + "name":"UpdateQAppSession", + "http":{ + "method":"POST", + "requestUri":"/runtime.updateQAppSession", + "responseCode":200 + }, + "input":{"shape":"UpdateQAppSessionInput"}, + "output":{"shape":"UpdateQAppSessionOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates the session for a given Q App sessionId. This is only valid when at least one card of the session is in the WAITING state. Data for each WAITING card can be provided as input. If inputs are not provided, the call will be accepted but session will not move forward. Inputs for cards that are not in the WAITING status will be ignored.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The client is not authorized to perform the requested operation.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, + "AppArn":{"type":"string"}, + "AppDefinition":{ + "type":"structure", + "required":[ + "appDefinitionVersion", + "cards" + ], + "members":{ + "appDefinitionVersion":{ + "shape":"String", + "documentation":"

The version of the app definition schema or specification.

" + }, + "cards":{ + "shape":"CardModelList", + "documentation":"

The cards that make up the Q App, such as text input, file upload, or query cards.

" + }, + "canEdit":{ + "shape":"Boolean", + "documentation":"

A flag indicating whether the Q App's definition can be edited by the user.

" + } + }, + "documentation":"

The definition of the Q App, specifying the cards and flow.

" + }, + "AppDefinitionInput":{ + "type":"structure", + "required":["cards"], + "members":{ + "cards":{ + "shape":"CardList", + "documentation":"

The cards that make up the Q App definition.

" + }, + "initialPrompt":{ + "shape":"InitialPrompt", + "documentation":"

The initial prompt displayed when the Q App is started.

" + } + }, + "documentation":"

The input for defining an Q App.

" + }, + "AppRequiredCapabilities":{ + "type":"list", + "member":{"shape":"AppRequiredCapability"} + }, + "AppRequiredCapability":{ + "type":"string", + "enum":[ + "FileUpload", + "CreatorMode", + "RetrievalMode", + "PluginMode" + ] + }, + "AppStatus":{ + "type":"string", + "enum":[ + "PUBLISHED", + "DRAFT", + "DELETED" + ] + }, + "AppVersion":{ + "type":"integer", + "box":true, + "max":2147483647, + "min":0 + }, + "AssociateLibraryItemReviewInput":{ + "type":"structure", + "required":[ + "instanceId", + "libraryItemId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier for the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "libraryItemId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the library item to associate the review with.

" + } + } + }, + "AssociateQAppWithUserInput":{ + "type":"structure", + "required":[ + "instanceId", + "appId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The ID of the Amazon Q App to associate with the user.

" + } + } + }, + "AttributeFilter":{ + "type":"structure", + "members":{ + "andAllFilters":{ + "shape":"AttributeFilters", + "documentation":"

Performs a logical AND operation on all supplied filters.

" + }, + "orAllFilters":{ + "shape":"AttributeFilters", + "documentation":"

Performs a logical OR operation on all supplied filters.

" + }, + "notFilter":{ + "shape":"AttributeFilter", + "documentation":"

Performs a logical NOT operation on all supplied filters.

" + }, + "equalsTo":{ + "shape":"DocumentAttribute", + "documentation":"

Performs an equals operation on two document attributes or metadata fields. Supported for the following document attribute value types: dateValue, longValue, stringListValue and stringValue.

" + }, + "containsAll":{ + "shape":"DocumentAttribute", + "documentation":"

Returns true when a document contains all the specified document attributes or metadata fields. Supported for the following document attribute value types: stringListValue.

" + }, + "containsAny":{ + "shape":"DocumentAttribute", + "documentation":"

Returns true when a document contains any of the specified document attributes or metadata fields. Supported for the following document attribute value types: stringListValue.

" + }, + "greaterThan":{ + "shape":"DocumentAttribute", + "documentation":"

Performs a greater than operation on two document attributes or metadata fields. Supported for the following document attribute value types: dateValue and longValue.

" + }, + "greaterThanOrEquals":{ + "shape":"DocumentAttribute", + "documentation":"

Performs a greater than or equals operation on two document attributes or metadata fields. Supported for the following document attribute value types: dateValue and longValue.

" + }, + "lessThan":{ + "shape":"DocumentAttribute", + "documentation":"

Performs a less than operation on two document attributes or metadata fields. Supported for the following document attribute value types: dateValue and longValue.

" + }, + "lessThanOrEquals":{ + "shape":"DocumentAttribute", + "documentation":"

Performs a less than or equals operation on two document attributes or metadata fields.Supported for the following document attribute value type: dateValue and longValue.

" + } + }, + "documentation":"

The filter criteria used on responses based on document attributes or metadata fields.

" + }, + "AttributeFilters":{ + "type":"list", + "member":{"shape":"AttributeFilter"} + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "Card":{ + "type":"structure", + "members":{ + "textInput":{ + "shape":"TextInputCard", + "documentation":"

A container for the properties of the text input card.

" + }, + "qQuery":{ + "shape":"QQueryCard", + "documentation":"

A container for the properties of the query card.

" + }, + "qPlugin":{ + "shape":"QPluginCard", + "documentation":"

A container for the properties of the plugin card.

" + }, + "fileUpload":{ + "shape":"FileUploadCard", + "documentation":"

A container for the properties of the file upload card.

" + } + }, + "documentation":"

A card representing a component or step in an Amazon Q App's flow.

", + "union":true + }, + "CardInput":{ + "type":"structure", + "members":{ + "textInput":{ + "shape":"TextInputCardInput", + "documentation":"

A container for the properties of the text input card.

" + }, + "qQuery":{ + "shape":"QQueryCardInput", + "documentation":"

A container for the properties of the query input card.

" + }, + "qPlugin":{ + "shape":"QPluginCardInput", + "documentation":"

A container for the properties of the plugin input card.

" + }, + "fileUpload":{ + "shape":"FileUploadCardInput", + "documentation":"

A container for the properties of the file upload input card.

" + } + }, + "documentation":"

The properties defining an input card in an Amazon Q App.

", + "union":true + }, + "CardList":{ + "type":"list", + "member":{"shape":"CardInput"}, + "max":20, + "min":0 + }, + "CardModelList":{ + "type":"list", + "member":{"shape":"Card"}, + "max":20, + "min":0 + }, + "CardOutputSource":{ + "type":"string", + "enum":[ + "approved-sources", + "llm" + ] + }, + "CardStatus":{ + "type":"structure", + "required":[ + "currentState", + "currentValue" + ], + "members":{ + "currentState":{ + "shape":"ExecutionStatus", + "documentation":"

The current state of the card.

" + }, + "currentValue":{ + "shape":"String", + "documentation":"

The current value or result associated with the card.

" + } + }, + "documentation":"

The current status and value of a card in an active Amazon Q App session.

" + }, + "CardStatusMap":{ + "type":"map", + "key":{"shape":"UUID"}, + "value":{"shape":"CardStatus"} + }, + "CardType":{ + "type":"string", + "enum":[ + "text-input", + "q-query", + "file-upload", + "q-plugin" + ] + }, + "CardValue":{ + "type":"structure", + "required":[ + "cardId", + "value" + ], + "members":{ + "cardId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the card.

" + }, + "value":{ + "shape":"CardValueValueString", + "documentation":"

The value or result associated with the card.

" + } + }, + "documentation":"

The value or result associated with a card in a Amazon Q App session.

" + }, + "CardValueList":{ + "type":"list", + "member":{"shape":"CardValue"}, + "max":20, + "min":0 + }, + "CardValueValueString":{ + "type":"string", + "max":5000, + "min":0 + }, + "Category":{ + "type":"structure", + "required":[ + "id", + "title" + ], + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of the category.

" + }, + "title":{ + "shape":"String", + "documentation":"

The title or name of the category.

" + } + }, + "documentation":"

A category used to classify and filter library items for Amazon Q Apps.

" + }, + "CategoryIdList":{ + "type":"list", + "member":{"shape":"UUID"}, + "max":3, + "min":0 + }, + "CategoryList":{ + "type":"list", + "member":{"shape":"Category"}, + "max":3, + "min":0 + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The unique identifier of the resource

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

The type of the resource

" + } + }, + "documentation":"

The requested operation could not be completed due to a conflict with the current state of the resource.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "ContentTooLargeException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The unique identifier of the resource

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

The type of the resource

" + } + }, + "documentation":"

The requested operation could not be completed because the content exceeds the maximum allowed size.

", + "error":{ + "httpStatusCode":413, + "senderFault":true + }, + "exception":true + }, + "ConversationMessage":{ + "type":"structure", + "required":[ + "body", + "type" + ], + "members":{ + "body":{ + "shape":"ConversationMessageBodyString", + "documentation":"

The text content of the conversation message.

" + }, + "type":{ + "shape":"Sender", + "documentation":"

The type of the conversation message.

" + } + }, + "documentation":"

A message in a conversation, used as input for generating an Amazon Q App definition.

" + }, + "ConversationMessageBodyString":{ + "type":"string", + "max":7000, + "min":0 + }, + "CreateLibraryItemInput":{ + "type":"structure", + "required":[ + "instanceId", + "appId", + "appVersion", + "categories" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Amazon Q App to publish to the library.

" + }, + "appVersion":{ + "shape":"AppVersion", + "documentation":"

The version of the Amazon Q App to publish to the library.

" + }, + "categories":{ + "shape":"CategoryIdList", + "documentation":"

The categories to associate with the library item for easier discovery.

" + } + } + }, + "CreateLibraryItemOutput":{ + "type":"structure", + "required":[ + "libraryItemId", + "status", + "createdAt", + "createdBy", + "ratingCount" + ], + "members":{ + "libraryItemId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the new library item.

" + }, + "status":{ + "shape":"String", + "documentation":"

The status of the new library item, such as \"Published\".

" + }, + "createdAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the library item was created.

" + }, + "createdBy":{ + "shape":"String", + "documentation":"

The user who created the library item.

" + }, + "updatedAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the library item was last updated.

" + }, + "updatedBy":{ + "shape":"String", + "documentation":"

The user who last updated the library item.

" + }, + "ratingCount":{ + "shape":"Integer", + "documentation":"

The number of ratings the library item has received from users.

" + } + } + }, + "CreateQAppInput":{ + "type":"structure", + "required":[ + "instanceId", + "title", + "appDefinition" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "title":{ + "shape":"Title", + "documentation":"

The title of the new Q App.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the new Q App.

" + }, + "appDefinition":{ + "shape":"AppDefinitionInput", + "documentation":"

The definition of the new Q App, specifying the cards and flow.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Optional tags to associate with the new Q App.

" + } + } + }, + "CreateQAppOutput":{ + "type":"structure", + "required":[ + "appId", + "appArn", + "title", + "appVersion", + "status", + "createdAt", + "createdBy", + "updatedAt", + "updatedBy" + ], + "members":{ + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the new Q App.

" + }, + "appArn":{ + "shape":"AppArn", + "documentation":"

The Amazon Resource Name (ARN) of the new Q App.

" + }, + "title":{ + "shape":"Title", + "documentation":"

The title of the new Q App.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the new Q App.

" + }, + "initialPrompt":{ + "shape":"InitialPrompt", + "documentation":"

The initial prompt displayed when the Q App is started.

" + }, + "appVersion":{ + "shape":"AppVersion", + "documentation":"

The version of the new Q App.

" + }, + "status":{ + "shape":"AppStatus", + "documentation":"

The status of the new Q App, such as \"Created\".

" + }, + "createdAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the Q App was created.

" + }, + "createdBy":{ + "shape":"String", + "documentation":"

The user who created the Q App.

" + }, + "updatedAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the Q App was last updated.

" + }, + "updatedBy":{ + "shape":"String", + "documentation":"

The user who last updated the Q App.

" + }, + "requiredCapabilities":{ + "shape":"AppRequiredCapabilities", + "documentation":"

The capabilities required to run the Q App, such as file upload or third-party integrations.

" + } + } + }, + "Default":{ + "type":"string", + "max":500, + "min":0 + }, + "DeleteLibraryItemInput":{ + "type":"structure", + "required":[ + "instanceId", + "libraryItemId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "libraryItemId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the library item to delete.

" + } + } + }, + "DeleteQAppInput":{ + "type":"structure", + "required":[ + "instanceId", + "appId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App to delete.

" + } + } + }, + "DependencyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "Description":{ + "type":"string", + "max":500, + "min":0 + }, + "DisassociateLibraryItemReviewInput":{ + "type":"structure", + "required":[ + "instanceId", + "libraryItemId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "libraryItemId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the library item to remove the review from.

" + } + } + }, + "DisassociateQAppFromUserInput":{ + "type":"structure", + "required":[ + "instanceId", + "appId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App to disassociate from the user.

" + } + } + }, + "DocumentAttribute":{ + "type":"structure", + "required":[ + "name", + "value" + ], + "members":{ + "name":{ + "shape":"DocumentAttributeKey", + "documentation":"

The identifier for the attribute.

" + }, + "value":{ + "shape":"DocumentAttributeValue", + "documentation":"

The value of the attribute.

" + } + }, + "documentation":"

A document attribute or metadata field.

" + }, + "DocumentAttributeKey":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9_][a-zA-Z0-9_-]*" + }, + "DocumentAttributeStringListValue":{ + "type":"list", + "member":{"shape":"PlatoString"} + }, + "DocumentAttributeValue":{ + "type":"structure", + "members":{ + "stringValue":{ + "shape":"DocumentAttributeValueStringValueString", + "documentation":"

A string.

" + }, + "stringListValue":{ + "shape":"DocumentAttributeStringListValue", + "documentation":"

A list of strings.

" + }, + "longValue":{ + "shape":"Long", + "documentation":"

A long integer value.

" + }, + "dateValue":{ + "shape":"Timestamp", + "documentation":"

A date expressed as an ISO 8601 string.

It's important for the time zone to be included in the ISO 8601 date-time format. For example, 2012-03-25T12:30:10+01:00 is the ISO 8601 date-time format for March 25th 2012 at 12:30PM (plus 10 seconds) in Central European Time.

" + } + }, + "documentation":"

The value of a document attribute. You can only provide one value for a document attribute.

", + "union":true + }, + "DocumentAttributeValueStringValueString":{ + "type":"string", + "max":2048, + "min":0 + }, + "DocumentScope":{ + "type":"string", + "enum":[ + "APPLICATION", + "SESSION" + ] + }, + "ExecutionStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "WAITING", + "COMPLETED" + ] + }, + "FileUploadCard":{ + "type":"structure", + "required":[ + "id", + "title", + "dependencies", + "type" + ], + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of the file upload card.

" + }, + "title":{ + "shape":"Title", + "documentation":"

The title of the file upload card.

" + }, + "dependencies":{ + "shape":"DependencyList", + "documentation":"

Any dependencies or requirements for the file upload card.

" + }, + "type":{ + "shape":"CardType", + "documentation":"

The type of the card.

" + }, + "filename":{ + "shape":"String", + "documentation":"

The name of the file being uploaded.

" + }, + "fileId":{ + "shape":"String", + "documentation":"

The unique identifier of the file associated with the card.

" + }, + "allowOverride":{ + "shape":"Boolean", + "documentation":"

A flag indicating if the user can override the default file for the upload card.

" + } + }, + "documentation":"

A card in an Amazon Q App that allows the user to upload a file.

" + }, + "FileUploadCardInput":{ + "type":"structure", + "required":[ + "title", + "id", + "type" + ], + "members":{ + "title":{ + "shape":"Title", + "documentation":"

The title or label of the file upload card.

" + }, + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of the file upload card.

" + }, + "type":{ + "shape":"CardType", + "documentation":"

The type of the card.

" + }, + "filename":{ + "shape":"Filename", + "documentation":"

The default filename to use for the file upload card.

" + }, + "fileId":{ + "shape":"UUID", + "documentation":"

The identifier of a pre-uploaded file associated with the card.

" + }, + "allowOverride":{ + "shape":"Boolean", + "documentation":"

A flag indicating if the user can override the default file for the upload card.

" + } + }, + "documentation":"

Represents a file upload card. It can optionally receive a filename and fileId to set a default file. If not received, the user must provide the file when the Q App runs.

" + }, + "Filename":{ + "type":"string", + "max":100, + "min":0 + }, + "GetLibraryItemInput":{ + "type":"structure", + "required":[ + "instanceId", + "libraryItemId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "libraryItemId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the library item to retrieve.

", + "location":"querystring", + "locationName":"libraryItemId" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Amazon Q App associated with the library item.

", + "location":"querystring", + "locationName":"appId" + } + } + }, + "GetLibraryItemOutput":{ + "type":"structure", + "required":[ + "libraryItemId", + "appId", + "appVersion", + "categories", + "status", + "createdAt", + "createdBy", + "ratingCount" + ], + "members":{ + "libraryItemId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the library item.

" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App associated with the library item.

" + }, + "appVersion":{ + "shape":"AppVersion", + "documentation":"

The version of the Q App associated with the library item.

" + }, + "categories":{ + "shape":"CategoryList", + "documentation":"

The categories associated with the library item for discovery.

" + }, + "status":{ + "shape":"String", + "documentation":"

The status of the library item, such as \"Published\".

" + }, + "createdAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the library item was created.

" + }, + "createdBy":{ + "shape":"String", + "documentation":"

The user who created the library item.

" + }, + "updatedAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the library item was last updated.

" + }, + "updatedBy":{ + "shape":"String", + "documentation":"

The user who last updated the library item.

" + }, + "ratingCount":{ + "shape":"Integer", + "documentation":"

The number of ratings the library item has received from users.

" + }, + "isRatedByUser":{ + "shape":"Boolean", + "documentation":"

Whether the current user has rated the library item.

" + }, + "userCount":{ + "shape":"Integer", + "documentation":"

The number of users who have associated the Q App with their account.

" + } + } + }, + "GetQAppInput":{ + "type":"structure", + "required":[ + "instanceId", + "appId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App to retrieve.

", + "location":"querystring", + "locationName":"appId" + } + } + }, + "GetQAppOutput":{ + "type":"structure", + "required":[ + "appId", + "appArn", + "title", + "appVersion", + "status", + "createdAt", + "createdBy", + "updatedAt", + "updatedBy", + "appDefinition" + ], + "members":{ + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App.

" + }, + "appArn":{ + "shape":"AppArn", + "documentation":"

The Amazon Resource Name (ARN) of the Q App.

" + }, + "title":{ + "shape":"Title", + "documentation":"

The title of the Q App.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the Q App.

" + }, + "initialPrompt":{ + "shape":"InitialPrompt", + "documentation":"

The initial prompt displayed when the Q App is started.

" + }, + "appVersion":{ + "shape":"AppVersion", + "documentation":"

The version of the Q App.

" + }, + "status":{ + "shape":"AppStatus", + "documentation":"

The status of the Q App.

" + }, + "createdAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the Q App was created.

" + }, + "createdBy":{ + "shape":"String", + "documentation":"

The user who created the Q App.

" + }, + "updatedAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the Q App was last updated.

" + }, + "updatedBy":{ + "shape":"String", + "documentation":"

The user who last updated the Q App.

" + }, + "requiredCapabilities":{ + "shape":"AppRequiredCapabilities", + "documentation":"

The capabilities required to run the Q App, such as file upload or third-party integrations.

" + }, + "appDefinition":{ + "shape":"AppDefinition", + "documentation":"

The full definition of the Q App, specifying the cards and flow.

" + } + } + }, + "GetQAppSessionInput":{ + "type":"structure", + "required":[ + "instanceId", + "sessionId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "sessionId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App session to retrieve.

", + "location":"querystring", + "locationName":"sessionId" + } + } + }, + "GetQAppSessionOutput":{ + "type":"structure", + "required":[ + "sessionId", + "sessionArn", + "status", + "cardStatus" + ], + "members":{ + "sessionId":{ + "shape":"String", + "documentation":"

The unique identifier of the Q App session.

" + }, + "sessionArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the Q App session.

" + }, + "status":{ + "shape":"ExecutionStatus", + "documentation":"

The current status of the Q App session.

" + }, + "cardStatus":{ + "shape":"CardStatusMap", + "documentation":"

The current status for each card in the Q App session.

" + } + } + }, + "ImportDocumentInput":{ + "type":"structure", + "required":[ + "instanceId", + "cardId", + "appId", + "fileContentsBase64", + "fileName", + "scope" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "cardId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the card the file is associated with, if applicable.

" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App the file is associated with.

" + }, + "fileContentsBase64":{ + "shape":"String", + "documentation":"

The base64-encoded contents of the file to upload.

" + }, + "fileName":{ + "shape":"Filename", + "documentation":"

The name of the file being uploaded.

" + }, + "scope":{ + "shape":"DocumentScope", + "documentation":"

Whether the file is associated with an Q App definition or a specific Q App session.

" + }, + "sessionId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App session the file is associated with, if applicable.

" + } + } + }, + "ImportDocumentOutput":{ + "type":"structure", + "members":{ + "fileId":{ + "shape":"String", + "documentation":"

The unique identifier assigned to the uploaded file.

" + } + } + }, + "InitialPrompt":{ + "type":"string", + "max":10000, + "min":0 + }, + "InstanceId":{"type":"string"}, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

The number of seconds to wait before retrying the operation

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

An internal service error occurred while processing the request.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "LibraryItemList":{ + "type":"list", + "member":{"shape":"LibraryItemMember"} + }, + "LibraryItemMember":{ + "type":"structure", + "required":[ + "libraryItemId", + "appId", + "appVersion", + "categories", + "status", + "createdAt", + "createdBy", + "ratingCount" + ], + "members":{ + "libraryItemId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the library item.

" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App associated with the library item.

" + }, + "appVersion":{ + "shape":"AppVersion", + "documentation":"

The version of the Q App associated with the library item.

" + }, + "categories":{ + "shape":"CategoryList", + "documentation":"

The categories associated with the library item.

" + }, + "status":{ + "shape":"String", + "documentation":"

The status of the library item.

" + }, + "createdAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the library item was created.

" + }, + "createdBy":{ + "shape":"String", + "documentation":"

The user who created the library item.

" + }, + "updatedAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the library item was last updated.

" + }, + "updatedBy":{ + "shape":"String", + "documentation":"

The user who last updated the library item.

" + }, + "ratingCount":{ + "shape":"Integer", + "documentation":"

The number of ratings the library item has received.

" + }, + "isRatedByUser":{ + "shape":"Boolean", + "documentation":"

Whether the current user has rated the library item.

" + }, + "userCount":{ + "shape":"Integer", + "documentation":"

The number of users who have the associated Q App.

" + } + }, + "documentation":"

A library item is a snapshot of an Amazon Q App that can be published so the users in their Amazon Q Apps library can discover it, clone it, and run it.

" + }, + "LibraryItemStatus":{ + "type":"string", + "enum":[ + "PUBLISHED", + "DISABLED" + ] + }, + "ListLibraryItemsInput":{ + "type":"structure", + "required":["instanceId"], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "limit":{ + "shape":"PageLimit", + "documentation":"

The maximum number of library items to return in the response.

", + "location":"querystring", + "locationName":"limit" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token to request the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "categoryId":{ + "shape":"UUID", + "documentation":"

Optional category to filter the library items by.

", + "location":"querystring", + "locationName":"categoryId" + } + } + }, + "ListLibraryItemsOutput":{ + "type":"structure", + "members":{ + "libraryItems":{ + "shape":"LibraryItemList", + "documentation":"

The list of library items meeting the request criteria.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The token to use to request the next page of results.

" + } + } + }, + "ListQAppsInput":{ + "type":"structure", + "required":["instanceId"], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "limit":{ + "shape":"PageLimit", + "documentation":"

The maximum number of Q Apps to return in the response.

", + "location":"querystring", + "locationName":"limit" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token to request the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListQAppsOutput":{ + "type":"structure", + "required":["apps"], + "members":{ + "apps":{ + "shape":"UserAppsList", + "documentation":"

The list of Amazon Q Apps meeting the request criteria.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The token to use to request the next page of results.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceARN"], + "members":{ + "resourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource whose tags should be listed.

", + "location":"uri", + "locationName":"resourceARN" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"Tags", + "documentation":"

The list of tags that are assigned to the resource.

" + } + } + }, + "Long":{ + "type":"long", + "box":true + }, + "PageLimit":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "PaginationToken":{ + "type":"string", + "max":300, + "min":0 + }, + "Placeholder":{ + "type":"string", + "max":500, + "min":0 + }, + "PlatoString":{ + "type":"string", + "max":2048, + "min":1 + }, + "PluginId":{ + "type":"string", + "max":36, + "min":36 + }, + "PluginType":{ + "type":"string", + "enum":[ + "SERVICE_NOW", + "SALESFORCE", + "JIRA", + "ZENDESK", + "CUSTOM" + ] + }, + "PredictAppDefinition":{ + "type":"structure", + "required":[ + "title", + "appDefinition" + ], + "members":{ + "title":{ + "shape":"Title", + "documentation":"

The title of the generated Q App definition.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the generated Q App definition.

" + }, + "appDefinition":{ + "shape":"AppDefinitionInput", + "documentation":"

The definition specifying the cards and flow of the generated Q App.

" + } + }, + "documentation":"

The definition of an Amazon Q App generated based on input such as a conversation or problem statement.

" + }, + "PredictQAppInput":{ + "type":"structure", + "required":["instanceId"], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "options":{ + "shape":"PredictQAppInputOptions", + "documentation":"

The input to generate the Q App definition from, either a conversation or problem statement.

" + } + } + }, + "PredictQAppInputOptions":{ + "type":"structure", + "members":{ + "conversation":{ + "shape":"PredictQAppInputOptionsConversationList", + "documentation":"

A conversation to use as input for generating the Q App definition.

" + }, + "problemStatement":{ + "shape":"PredictQAppInputOptionsProblemStatementString", + "documentation":"

A problem statement to use as input for generating the Q App definition.

" + } + }, + "documentation":"

The input options for generating an Q App definition.

", + "union":true + }, + "PredictQAppInputOptionsConversationList":{ + "type":"list", + "member":{"shape":"ConversationMessage"}, + "max":25, + "min":1 + }, + "PredictQAppInputOptionsProblemStatementString":{ + "type":"string", + "max":10000, + "min":0 + }, + "PredictQAppOutput":{ + "type":"structure", + "required":[ + "app", + "problemStatement" + ], + "members":{ + "app":{ + "shape":"PredictAppDefinition", + "documentation":"

The generated Q App definition.

" + }, + "problemStatement":{ + "shape":"String", + "documentation":"

The problem statement extracted from the input conversation, if provided.

" + } + } + }, + "Prompt":{ + "type":"string", + "max":7000, + "min":0 + }, + "QAppsTimestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "QPluginCard":{ + "type":"structure", + "required":[ + "id", + "title", + "dependencies", + "type", + "prompt", + "pluginType", + "pluginId" + ], + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of the plugin card.

" + }, + "title":{ + "shape":"Title", + "documentation":"

The title or label of the plugin card.

" + }, + "dependencies":{ + "shape":"DependencyList", + "documentation":"

Any dependencies or requirements for the plugin card.

" + }, + "type":{ + "shape":"CardType", + "documentation":"

The type of the card.

" + }, + "prompt":{ + "shape":"Prompt", + "documentation":"

The prompt or instructions displayed for the plugin card.

" + }, + "pluginType":{ + "shape":"PluginType", + "documentation":"

The type or category of the plugin used by the card.

" + }, + "pluginId":{ + "shape":"String", + "documentation":"

The unique identifier of the plugin used by the card.

" + } + }, + "documentation":"

A card in an Q App that integrates with a third-party plugin or service.

" + }, + "QPluginCardInput":{ + "type":"structure", + "required":[ + "title", + "id", + "type", + "prompt", + "pluginId" + ], + "members":{ + "title":{ + "shape":"Title", + "documentation":"

The title or label of the plugin card.

" + }, + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of the plugin card.

" + }, + "type":{ + "shape":"CardType", + "documentation":"

The type of the card.

" + }, + "prompt":{ + "shape":"Prompt", + "documentation":"

The prompt or instructions displayed for the plugin card.

" + }, + "pluginId":{ + "shape":"PluginId", + "documentation":"

The unique identifier of the plugin used by the card.

" + } + }, + "documentation":"

The input shape for defining a plugin card in an Amazon Q App.

" + }, + "QQueryCard":{ + "type":"structure", + "required":[ + "id", + "title", + "dependencies", + "type", + "prompt", + "outputSource" + ], + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of the query card.

" + }, + "title":{ + "shape":"Title", + "documentation":"

The title or label of the query card.

" + }, + "dependencies":{ + "shape":"DependencyList", + "documentation":"

Any dependencies or requirements for the query card.

" + }, + "type":{ + "shape":"CardType", + "documentation":"

The type of the card.

" + }, + "prompt":{ + "shape":"Prompt", + "documentation":"

The prompt or instructions displayed for the query card.

" + }, + "outputSource":{ + "shape":"CardOutputSource", + "documentation":"

The source or type of output generated by the query card.

" + }, + "attributeFilter":{ + "shape":"AttributeFilter", + "documentation":"

The Amazon Q Business filters applied in this query card when resolving data sources

" + } + }, + "documentation":"

A card in a Amazon Q App that generates a response based on the Amazon Q Business service.

" + }, + "QQueryCardInput":{ + "type":"structure", + "required":[ + "title", + "id", + "type", + "prompt" + ], + "members":{ + "title":{ + "shape":"Title", + "documentation":"

The title or label of the query card.

" + }, + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of the query card.

" + }, + "type":{ + "shape":"CardType", + "documentation":"

The type of the card.

" + }, + "prompt":{ + "shape":"Prompt", + "documentation":"

The prompt or instructions displayed for the query card.

" + }, + "outputSource":{ + "shape":"CardOutputSource", + "documentation":"

The source or type of output to generate for the query card.

" + }, + "attributeFilter":{ + "shape":"AttributeFilter", + "documentation":"

Turns on filtering of responses based on document attributes or metadata fields.

" + } + }, + "documentation":"

The input shape for defining a query card in an Amazon Q App.

" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The unique identifier of the resource

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

The type of the resource

" + } + }, + "documentation":"

The requested resource could not be found.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "Sender":{ + "type":"string", + "enum":[ + "USER", + "SYSTEM" + ] + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The unique identifier of the resource

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

The type of the resource

" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

The code for the service where the quota was exceeded

" + }, + "quotaCode":{ + "shape":"String", + "documentation":"

The code of the quota that was exceeded

" + } + }, + "documentation":"

The requested operation could not be completed because it would exceed the service's quota or limit.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "StartQAppSessionInput":{ + "type":"structure", + "required":[ + "instanceId", + "appId", + "appVersion" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App to start a session for.

" + }, + "appVersion":{ + "shape":"AppVersion", + "documentation":"

The version of the Q App to use for the session.

" + }, + "initialValues":{ + "shape":"CardValueList", + "documentation":"

Optional initial input values to provide for the Q App session.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Optional tags to associate with the new Q App session.

" + } + } + }, + "StartQAppSessionOutput":{ + "type":"structure", + "required":[ + "sessionId", + "sessionArn" + ], + "members":{ + "sessionId":{ + "shape":"String", + "documentation":"

The unique identifier of the new Q App session.

" + }, + "sessionArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the new Q App session.

" + } + } + }, + "StopQAppSessionInput":{ + "type":"structure", + "required":[ + "instanceId", + "sessionId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "sessionId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App session to stop.

" + } + } + }, + "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceARN", + "tags" + ], + "members":{ + "resourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource to tag.

", + "location":"uri", + "locationName":"resourceARN" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags to associate with the resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "TextInputCard":{ + "type":"structure", + "required":[ + "id", + "title", + "dependencies", + "type" + ], + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of the text input card.

" + }, + "title":{ + "shape":"Title", + "documentation":"

The title or label of the text input card.

" + }, + "dependencies":{ + "shape":"DependencyList", + "documentation":"

Any dependencies or requirements for the text input card.

" + }, + "type":{ + "shape":"CardType", + "documentation":"

The type of the card.

" + }, + "placeholder":{ + "shape":"Placeholder", + "documentation":"

The placeholder text to display in the text input field.

" + }, + "defaultValue":{ + "shape":"Default", + "documentation":"

The default value to pre-populate in the text input field.

" + } + }, + "documentation":"

A card in an Amazon Q App that allows the user to input text.

" + }, + "TextInputCardInput":{ + "type":"structure", + "required":[ + "title", + "id", + "type" + ], + "members":{ + "title":{ + "shape":"Title", + "documentation":"

The title or label of the text input card.

" + }, + "id":{ + "shape":"UUID", + "documentation":"

The unique identifier of the text input card.

" + }, + "type":{ + "shape":"CardType", + "documentation":"

The type of the card.

" + }, + "placeholder":{ + "shape":"Placeholder", + "documentation":"

The placeholder text to display in the text input field.

" + }, + "defaultValue":{ + "shape":"Default", + "documentation":"

The default value to pre-populate in the text input field.

" + } + }, + "documentation":"

The input shape for defining a text input card in an Amazon Q App.

" + }, + "ThrottlingException":{ + "type":"structure", + "required":[ + "message", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{"shape":"String"}, + "serviceCode":{ + "shape":"String", + "documentation":"

The code for the service where the quota was exceeded

" + }, + "quotaCode":{ + "shape":"String", + "documentation":"

The code of the quota that was exceeded

" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

The number of seconds to wait before retrying the operation

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

The requested operation could not be completed because too many requests were sent at once. Wait a bit and try again later.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "Title":{ + "type":"string", + "max":100, + "min":0 + }, + "UUID":{ + "type":"string", + "pattern":"[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}" + }, + "UnauthorizedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The client is not authenticated or authorized to perform the requested operation.

", + "error":{ + "httpStatusCode":401, + "senderFault":true + }, + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceARN", + "tagKeys" + ], + "members":{ + "resourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource to disassociate the tag from.

", + "location":"uri", + "locationName":"resourceARN" + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

The keys of the tags to disassociate from the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateLibraryItemInput":{ + "type":"structure", + "required":[ + "instanceId", + "libraryItemId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "libraryItemId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the library item to update.

" + }, + "status":{ + "shape":"LibraryItemStatus", + "documentation":"

The new status to set for the library item, such as \"Published\" or \"Hidden\".

" + }, + "categories":{ + "shape":"CategoryIdList", + "documentation":"

The new categories to associate with the library item.

" + } + } + }, + "UpdateLibraryItemOutput":{ + "type":"structure", + "required":[ + "libraryItemId", + "appId", + "appVersion", + "categories", + "status", + "createdAt", + "createdBy", + "ratingCount" + ], + "members":{ + "libraryItemId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the updated library item.

" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App associated with the library item.

" + }, + "appVersion":{ + "shape":"AppVersion", + "documentation":"

The version of the Q App associated with the library item.

" + }, + "categories":{ + "shape":"CategoryList", + "documentation":"

The categories associated with the updated library item.

" + }, + "status":{ + "shape":"String", + "documentation":"

The new status of the updated library item.

" + }, + "createdAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the library item was originally created.

" + }, + "createdBy":{ + "shape":"String", + "documentation":"

The user who originally created the library item.

" + }, + "updatedAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the library item was last updated.

" + }, + "updatedBy":{ + "shape":"String", + "documentation":"

The user who last updated the library item.

" + }, + "ratingCount":{ + "shape":"Integer", + "documentation":"

The number of ratings the library item has received.

" + }, + "isRatedByUser":{ + "shape":"Boolean", + "documentation":"

Whether the current user has rated the library item.

" + }, + "userCount":{ + "shape":"Integer", + "documentation":"

The number of users who have the associated Q App.

" + } + } + }, + "UpdateQAppInput":{ + "type":"structure", + "required":[ + "instanceId", + "appId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App to update.

" + }, + "title":{ + "shape":"Title", + "documentation":"

The new title for the Q App.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The new description for the Q App.

" + }, + "appDefinition":{ + "shape":"AppDefinitionInput", + "documentation":"

The new definition specifying the cards and flow for the Q App.

" + } + } + }, + "UpdateQAppOutput":{ + "type":"structure", + "required":[ + "appId", + "appArn", + "title", + "appVersion", + "status", + "createdAt", + "createdBy", + "updatedAt", + "updatedBy" + ], + "members":{ + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the updated Q App.

" + }, + "appArn":{ + "shape":"AppArn", + "documentation":"

The Amazon Resource Name (ARN) of the updated Q App.

" + }, + "title":{ + "shape":"Title", + "documentation":"

The new title of the updated Q App.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The new description of the updated Q App.

" + }, + "initialPrompt":{ + "shape":"InitialPrompt", + "documentation":"

The initial prompt for the updated Q App.

" + }, + "appVersion":{ + "shape":"AppVersion", + "documentation":"

The new version of the updated Q App.

" + }, + "status":{ + "shape":"AppStatus", + "documentation":"

The status of the updated Q App.

" + }, + "createdAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the Q App was originally created.

" + }, + "createdBy":{ + "shape":"String", + "documentation":"

The user who originally created the Q App.

" + }, + "updatedAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the Q App was last updated.

" + }, + "updatedBy":{ + "shape":"String", + "documentation":"

The user who last updated the Q App.

" + }, + "requiredCapabilities":{ + "shape":"AppRequiredCapabilities", + "documentation":"

The capabilities required for the updated Q App.

" + } + } + }, + "UpdateQAppSessionInput":{ + "type":"structure", + "required":[ + "instanceId", + "sessionId" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"

The unique identifier of the Amazon Q Business application environment instance.

", + "location":"header", + "locationName":"instance-id" + }, + "sessionId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App session to provide input for.

" + }, + "values":{ + "shape":"CardValueList", + "documentation":"

The input values to provide for the current state of the Q App session.

" + } + } + }, + "UpdateQAppSessionOutput":{ + "type":"structure", + "required":[ + "sessionId", + "sessionArn" + ], + "members":{ + "sessionId":{ + "shape":"String", + "documentation":"

The unique identifier of the updated Q App session.

" + }, + "sessionArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the updated Q App session.

" + } + } + }, + "UserAppItem":{ + "type":"structure", + "required":[ + "appId", + "appArn", + "title", + "createdAt" + ], + "members":{ + "appId":{ + "shape":"UUID", + "documentation":"

The unique identifier of the Q App.

" + }, + "appArn":{ + "shape":"AppArn", + "documentation":"

The Amazon Resource Name (ARN) of the Q App.

" + }, + "title":{ + "shape":"Title", + "documentation":"

The title of the Q App.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the Q App.

" + }, + "createdAt":{ + "shape":"QAppsTimestamp", + "documentation":"

The date and time the user's association with the Q App was created.

" + }, + "canEdit":{ + "shape":"Boolean", + "documentation":"

A flag indicating whether the user can edit the Q App.

" + }, + "status":{ + "shape":"String", + "documentation":"

The status of the user's association with the Q App.

" + } + }, + "documentation":"

An Amazon Q App associated with a user, either owned by the user or favorited.

" + }, + "UserAppsList":{ + "type":"list", + "member":{"shape":"UserAppItem"} + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The input failed to satisfy the constraints specified by the service.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"

The Amazon Q Apps feature capability within Amazon Q Business allows web experience users to create lightweight, purpose-built AI apps to fulfill specific tasks from within their web experience. For example, users can create an Q Appthat exclusively generates marketing-related content to improve your marketing team's productivity or a Q App for marketing content-generation like writing customer emails and creating promotional content using a certain style of voice, tone, and branding. For more information, see Amazon Q App in the Amazon Q Business User Guide.

" +} diff --git a/botocore/data/qapps/2023-11-27/waiters-2.json b/botocore/data/qapps/2023-11-27/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/qapps/2023-11-27/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/qbusiness/2023-11-27/service-2.json b/botocore/data/qbusiness/2023-11-27/service-2.json index b36507361a..3815eabe3c 100644 --- a/botocore/data/qbusiness/2023-11-27/service-2.json +++ b/botocore/data/qbusiness/2023-11-27/service-2.json @@ -2,6 +2,7 @@ "version":"2.0", "metadata":{ "apiVersion":"2023-11-27", + "auth":["aws.auth#sigv4"], "endpointPrefix":"qbusiness", "protocol":"rest-json", "protocolSettings":{"h2":"eventstream"}, @@ -110,7 +111,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates an Amazon Q Business application.

There are new tiers for Amazon Q Business. Not all features in Amazon Q Business Pro are also available in Amazon Q Business Lite. For information on what's included in Amazon Q Business Lite and what's included in Amazon Q Business Pro, see Amazon Q Business tiers. You must use the Amazon Q Business console to assign subscription tiers to users.

", + "documentation":"

Creates an Amazon Q Business application.

There are new tiers for Amazon Q Business. Not all features in Amazon Q Business Pro are also available in Amazon Q Business Lite. For information on what's included in Amazon Q Business Lite and what's included in Amazon Q Business Pro, see Amazon Q Business tiers. You must use the Amazon Q Business console to assign subscription tiers to users.

", "idempotent":true }, "CreateDataSource":{ @@ -1562,7 +1563,7 @@ }, "containsAny":{ "shape":"DocumentAttribute", - "documentation":"

Returns true when a document contains any of the specified document attributes or metadata fields. Supported for the following document attribute value types: dateValue, longValue, stringListValue and stringValue.

" + "documentation":"

Returns true when a document contains any of the specified document attributes or metadata fields. Supported for the following document attribute value types: stringListValue.

" }, "greaterThan":{ "shape":"DocumentAttribute", @@ -1995,7 +1996,7 @@ }, "parentMessageId":{ "shape":"MessageId", - "documentation":"

The identifier of the previous end user text input message in a conversation.

" + "documentation":"

The identifier of the previous system message in a conversation.

" }, "attributeFilter":{ "shape":"AttributeFilter", @@ -2201,6 +2202,14 @@ "attachmentsConfiguration":{ "shape":"AttachmentsConfiguration", "documentation":"

An option to allow end users to upload files directly during chat.

" + }, + "qAppsConfiguration":{ + "shape":"QAppsConfiguration", + "documentation":"

An option to allow end users to create and use Amazon Q Apps in the web experience.

" + }, + "personalizationConfiguration":{ + "shape":"PersonalizationConfiguration", + "documentation":"

Configuration information about chat response personalization. For more information, see Personalizing chat responses

" } } }, @@ -2306,7 +2315,7 @@ }, "type":{ "shape":"IndexType", - "documentation":"

The index type that's suitable for your needs. For more information on what's included in each type of index or index tier, see Amazon Q Business tiers.

" + "documentation":"

The index type that's suitable for your needs. For more information on what's included in each type of index, see Amazon Q Business tiers.

" }, "description":{ "shape":"Description", @@ -3557,6 +3566,14 @@ "attachmentsConfiguration":{ "shape":"AppliedAttachmentsConfiguration", "documentation":"

Settings for whether end users can upload files directly during chat.

" + }, + "qAppsConfiguration":{ + "shape":"QAppsConfiguration", + "documentation":"

Settings for whether end users can create and use Amazon Q Apps in the web experience.

" + }, + "personalizationConfiguration":{ + "shape":"PersonalizationConfiguration", + "documentation":"

Configuration information about chat response personalization. For more information, see Personalizing chat responses.

" } } }, @@ -5217,6 +5234,24 @@ "type":"string", "sensitive":true }, + "PersonalizationConfiguration":{ + "type":"structure", + "required":["personalizationControlMode"], + "members":{ + "personalizationControlMode":{ + "shape":"PersonalizationControlMode", + "documentation":"

An option to allow Amazon Q Business to customize chat responses using user specific metadata—specifically, location and job information—in your IAM Identity Center instance.

" + } + }, + "documentation":"

Configuration information about chat response personalization. For more information, see Personalizing chat responses.

" + }, + "PersonalizationControlMode":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "Plugin":{ "type":"structure", "members":{ @@ -5482,6 +5517,24 @@ "members":{ } }, + "QAppsConfiguration":{ + "type":"structure", + "required":["qAppsControlMode"], + "members":{ + "qAppsControlMode":{ + "shape":"QAppsControlMode", + "documentation":"

Status information about whether end users can create and use Amazon Q Apps in the web experience.

" + } + }, + "documentation":"

Configuration information about Amazon Q Apps. (preview feature)

" + }, + "QAppsControlMode":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "ReadAccessType":{ "type":"string", "enum":[ @@ -6219,6 +6272,14 @@ "attachmentsConfiguration":{ "shape":"AttachmentsConfiguration", "documentation":"

An option to allow end users to upload files directly during chat.

" + }, + "qAppsConfiguration":{ + "shape":"QAppsConfiguration", + "documentation":"

An option to allow end users to create and use Amazon Q Apps in the web experience.

" + }, + "personalizationConfiguration":{ + "shape":"PersonalizationConfiguration", + "documentation":"

Configuration information about chat response personalization. For more information, see Personalizing chat responses.

" } } }, @@ -6746,5 +6807,5 @@ "member":{"shape":"WebExperience"} } }, - "documentation":"

This is the Amazon Q Business API Reference. Amazon Q Business is a fully managed, generative-AI powered enterprise chat assistant that you can deploy within your organization. Amazon Q Business enhances employee productivity by supporting key tasks such as question-answering, knowledge discovery, writing email messages, summarizing text, drafting document outlines, and brainstorming ideas. Users ask questions of Amazon Q Business and get answers that are presented in a conversational manner. For an introduction to the service, see the Amazon Q Business User Guide .

For an overview of the Amazon Q Business APIs, see Overview of Amazon Q Business API operations.

For information about the IAM access control permissions you need to use this API, see IAM roles for Amazon Q Business in the Amazon Q Business User Guide.

You can use the following AWS SDKs to access Amazon Q Business APIs:

The following resources provide additional information about using the Amazon Q Business API:

" + "documentation":"

This is the Amazon Q Business API Reference. Amazon Q Business is a fully managed, generative-AI powered enterprise chat assistant that you can deploy within your organization. Amazon Q Business enhances employee productivity by supporting key tasks such as question-answering, knowledge discovery, writing email messages, summarizing text, drafting document outlines, and brainstorming ideas. Users ask questions of Amazon Q Business and get answers that are presented in a conversational manner. For an introduction to the service, see the Amazon Q Business User Guide .

For an overview of the Amazon Q Business APIs, see Overview of Amazon Q Business API operations.

For information about the IAM access control permissions you need to use this API, see IAM roles for Amazon Q Business in the Amazon Q Business User Guide.

The following resources provide additional information about using the Amazon Q Business API:

" } diff --git a/botocore/data/qconnect/2020-10-19/paginators-1.json b/botocore/data/qconnect/2020-10-19/paginators-1.json index 2d69a26956..6ef15569f0 100644 --- a/botocore/data/qconnect/2020-10-19/paginators-1.json +++ b/botocore/data/qconnect/2020-10-19/paginators-1.json @@ -59,6 +59,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "sessionSummaries" + }, + "ListContentAssociations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "contentAssociationSummaries" } } } diff --git a/botocore/data/qconnect/2020-10-19/service-2.json b/botocore/data/qconnect/2020-10-19/service-2.json index ca9cee52a0..15c7ce6101 100644 --- a/botocore/data/qconnect/2020-10-19/service-2.json +++ b/botocore/data/qconnect/2020-10-19/service-2.json @@ -68,6 +68,25 @@ "documentation":"

Creates Amazon Q in Connect content. Before to calling this API, use StartContentUpload to upload an asset.

", "idempotent":true }, + "CreateContentAssociation":{ + "name":"CreateContentAssociation", + "http":{ + "method":"POST", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/contents/{contentId}/associations", + "responseCode":200 + }, + "input":{"shape":"CreateContentAssociationRequest"}, + "output":{"shape":"CreateContentAssociationResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Creates an association between a content resource in a knowledge base and step-by-step guides. Step-by-step guides offer instructions to agents for resolving common customer issues. You create a content association to integrate Amazon Q in Connect and step-by-step guides.

After you integrate Amazon Q and step-by-step guides, when Amazon Q provides a recommendation to an agent based on the intent that it's detected, it also provides them with the option to start the step-by-step guide that you have associated with the content.

Note the following limitations:

  • You can create only one content association for each content resource in a knowledge base.

  • You can associate a step-by-step guide with multiple content resources.

For more information, see Integrate Amazon Q in Connect with step-by-step guides in the Amazon Connect Administrator Guide.

" + }, "CreateKnowledgeBase":{ "name":"CreateKnowledgeBase", "http":{ @@ -174,6 +193,23 @@ "documentation":"

Deletes the content.

", "idempotent":true }, + "DeleteContentAssociation":{ + "name":"DeleteContentAssociation", + "http":{ + "method":"DELETE", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/contents/{contentId}/associations/{contentAssociationId}", + "responseCode":204 + }, + "input":{"shape":"DeleteContentAssociationRequest"}, + "output":{"shape":"DeleteContentAssociationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes the content association.

For more information about content associations--what they are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides in the Amazon Connect Administrator Guide.

", + "idempotent":true + }, "DeleteImportJob":{ "name":"DeleteImportJob", "http":{ @@ -275,6 +311,22 @@ ], "documentation":"

Retrieves content, including a pre-signed URL to download the content.

" }, + "GetContentAssociation":{ + "name":"GetContentAssociation", + "http":{ + "method":"GET", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/contents/{contentId}/associations/{contentAssociationId}", + "responseCode":200 + }, + "input":{"shape":"GetContentAssociationRequest"}, + "output":{"shape":"GetContentAssociationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns the content association.

For more information about content associations--what they are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides in the Amazon Connect Administrator Guide.

" + }, "GetContentSummary":{ "name":"GetContentSummary", "http":{ @@ -404,6 +456,22 @@ ], "documentation":"

Lists information about assistants.

" }, + "ListContentAssociations":{ + "name":"ListContentAssociations", + "http":{ + "method":"GET", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/contents/{contentId}/associations", + "responseCode":200 + }, + "input":{"shape":"ListContentAssociationsRequest"}, + "output":{"shape":"ListContentAssociationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists the content associations.

For more information about content associations--what they are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides in the Amazon Connect Administrator Guide.

" + }, "ListContents":{ "name":"ListContents", "http":{ @@ -745,6 +813,16 @@ }, "exception":true }, + "AmazonConnectGuideAssociationData":{ + "type":"structure", + "members":{ + "flowId":{ + "shape":"GenericArn", + "documentation":"

The Amazon Resource Name (ARN) of an Amazon Connect flow. Step-by-step guides are a type of flow.

" + } + }, + "documentation":"

Content association data for a step-by-step guide.

" + }, "AndConditions":{ "type":"list", "member":{"shape":"TagCondition"} @@ -766,7 +844,7 @@ }, "Arn":{ "type":"string", - "pattern":"^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})?$" + "pattern":"^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$" }, "AssistantAssociationData":{ "type":"structure", @@ -1099,6 +1177,129 @@ "value":{"shape":"ContactAttributeValue"}, "sensitive":true }, + "ContentAssociationContents":{ + "type":"structure", + "members":{ + "amazonConnectGuideAssociation":{ + "shape":"AmazonConnectGuideAssociationData", + "documentation":"

The data of the step-by-step guide association.

" + } + }, + "documentation":"

The contents of a content association.

", + "union":true + }, + "ContentAssociationData":{ + "type":"structure", + "required":[ + "associationData", + "associationType", + "contentArn", + "contentAssociationArn", + "contentAssociationId", + "contentId", + "knowledgeBaseArn", + "knowledgeBaseId" + ], + "members":{ + "associationData":{ + "shape":"ContentAssociationContents", + "documentation":"

The content association.

" + }, + "associationType":{ + "shape":"ContentAssociationType", + "documentation":"

The type of association.

" + }, + "contentArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the content.

" + }, + "contentAssociationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the content association.

" + }, + "contentAssociationId":{ + "shape":"Uuid", + "documentation":"

The identifier of the content association. Can be either the ID or the ARN. URLs cannot contain the ARN.

" + }, + "contentId":{ + "shape":"Uuid", + "documentation":"

The identifier of the content.

" + }, + "knowledgeBaseArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the knowledge base.

" + }, + "knowledgeBaseId":{ + "shape":"Uuid", + "documentation":"

The identifier of the knowledge base.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + }, + "documentation":"

Information about the content association.

" + }, + "ContentAssociationSummary":{ + "type":"structure", + "required":[ + "associationData", + "associationType", + "contentArn", + "contentAssociationArn", + "contentAssociationId", + "contentId", + "knowledgeBaseArn", + "knowledgeBaseId" + ], + "members":{ + "associationData":{ + "shape":"ContentAssociationContents", + "documentation":"

The content association.

" + }, + "associationType":{ + "shape":"ContentAssociationType", + "documentation":"

The type of association.

" + }, + "contentArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the content.

" + }, + "contentAssociationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the content association.

" + }, + "contentAssociationId":{ + "shape":"Uuid", + "documentation":"

The identifier of the content association. Can be either the ID or the ARN. URLs cannot contain the ARN.

" + }, + "contentId":{ + "shape":"Uuid", + "documentation":"

The identifier of the content.

" + }, + "knowledgeBaseArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the knowledge base.

" + }, + "knowledgeBaseId":{ + "shape":"Uuid", + "documentation":"

The identifier of the knowledge base.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + }, + "documentation":"

Summary information about a content association.

" + }, + "ContentAssociationSummaryList":{ + "type":"list", + "member":{"shape":"ContentAssociationSummary"} + }, + "ContentAssociationType":{ + "type":"string", + "enum":["AMAZON_CONNECT_GUIDE"] + }, "ContentData":{ "type":"structure", "required":[ @@ -1405,6 +1606,55 @@ } } }, + "CreateContentAssociationRequest":{ + "type":"structure", + "required":[ + "association", + "associationType", + "contentId", + "knowledgeBaseId" + ], + "members":{ + "association":{ + "shape":"ContentAssociationContents", + "documentation":"

The identifier of the associated resource.

" + }, + "associationType":{ + "shape":"ContentAssociationType", + "documentation":"

The type of association.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs.

", + "idempotencyToken":true + }, + "contentId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the content.

", + "location":"uri", + "locationName":"contentId" + }, + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the knowledge base.

", + "location":"uri", + "locationName":"knowledgeBaseId" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "CreateContentAssociationResponse":{ + "type":"structure", + "members":{ + "contentAssociation":{ + "shape":"ContentAssociationData", + "documentation":"

The association between Amazon Q in Connect content and another resource.

" + } + } + }, "CreateContentRequest":{ "type":"structure", "required":[ @@ -1721,6 +1971,39 @@ "members":{ } }, + "DeleteContentAssociationRequest":{ + "type":"structure", + "required":[ + "contentAssociationId", + "contentId", + "knowledgeBaseId" + ], + "members":{ + "contentAssociationId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the content association. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"contentAssociationId" + }, + "contentId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the content.

", + "location":"uri", + "locationName":"contentId" + }, + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the knowledge base.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "DeleteContentAssociationResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteContentRequest":{ "type":"structure", "required":[ @@ -2017,6 +2300,43 @@ } } }, + "GetContentAssociationRequest":{ + "type":"structure", + "required":[ + "contentAssociationId", + "contentId", + "knowledgeBaseId" + ], + "members":{ + "contentAssociationId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the content association. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"contentAssociationId" + }, + "contentId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the content.

", + "location":"uri", + "locationName":"contentId" + }, + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the knowledge base.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "GetContentAssociationResponse":{ + "type":"structure", + "members":{ + "contentAssociation":{ + "shape":"ContentAssociationData", + "documentation":"

The association between Amazon Q in Connect content and another resource.

" + } + } + }, "GetContentRequest":{ "type":"structure", "required":[ @@ -2311,7 +2631,7 @@ "externalSourceConfiguration":{"shape":"ExternalSourceConfiguration"}, "failedRecordReport":{ "shape":"Url", - "documentation":"

The link to donwload the information of resource data that failed to be imported.

" + "documentation":"

The link to download the information of resource data that failed to be imported.

" }, "importJobId":{ "shape":"Uuid", @@ -2652,6 +2972,53 @@ } } }, + "ListContentAssociationsRequest":{ + "type":"structure", + "required":[ + "contentId", + "knowledgeBaseId" + ], + "members":{ + "contentId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the content.

", + "location":"uri", + "locationName":"contentId" + }, + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the knowledge base.

", + "location":"uri", + "locationName":"knowledgeBaseId" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListContentAssociationsResponse":{ + "type":"structure", + "required":["contentAssociationSummaries"], + "members":{ + "contentAssociationSummaries":{ + "shape":"ContentAssociationSummaryList", + "documentation":"

Summary information about content associations.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, "ListContentsRequest":{ "type":"structure", "required":["knowledgeBaseId"], @@ -4344,6 +4711,19 @@ }, "documentation":"

Details about the source content text data.

" }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The throttling limit has been exceeded.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, "TimeToLive":{ "type":"integer", "documentation":"

Expiration time in minutes

", @@ -4610,7 +4990,7 @@ }, "UuidOrArn":{ "type":"string", - "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})?$" + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$" }, "ValidationException":{ "type":"structure", diff --git a/botocore/data/quicksight/2018-04-01/service-2.json b/botocore/data/quicksight/2018-04-01/service-2.json index efdbb232c8..4e7264dca5 100644 --- a/botocore/data/quicksight/2018-04-01/service-2.json +++ b/botocore/data/quicksight/2018-04-01/service-2.json @@ -9,9 +9,45 @@ "serviceFullName":"Amazon QuickSight", "serviceId":"QuickSight", "signatureVersion":"v4", - "uid":"quicksight-2018-04-01" + "uid":"quicksight-2018-04-01", + "auth":["aws.auth#sigv4"] }, "operations":{ + "BatchCreateTopicReviewedAnswer":{ + "name":"BatchCreateTopicReviewedAnswer", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/topics/{TopicId}/batch-create-reviewed-answers" + }, + "input":{"shape":"BatchCreateTopicReviewedAnswerRequest"}, + "output":{"shape":"BatchCreateTopicReviewedAnswerResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Creates new reviewed answers for a Q Topic.

" + }, + "BatchDeleteTopicReviewedAnswer":{ + "name":"BatchDeleteTopicReviewedAnswer", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/topics/{TopicId}/batch-delete-reviewed-answers" + }, + "input":{"shape":"BatchDeleteTopicReviewedAnswerRequest"}, + "output":{"shape":"BatchDeleteTopicReviewedAnswerResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Deletes reviewed answers for Q Topic.

" + }, "CancelIngestion":{ "name":"CancelIngestion", "http":{ @@ -148,6 +184,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceExistsException"}, {"shape":"ThrottlingException"}, + {"shape":"CustomerManagedKeyUnavailableException"}, {"shape":"InternalFailureException"} ], "documentation":"

Creates a data source.

" @@ -1421,6 +1458,22 @@ ], "documentation":"

Provides a summary and status of IP rules.

" }, + "DescribeKeyRegistration":{ + "name":"DescribeKeyRegistration", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/key-registration" + }, + "input":{"shape":"DescribeKeyRegistrationRequest"}, + "output":{"shape":"DescribeKeyRegistrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Describes all customer managed key registrations in a Amazon QuickSight account.

" + }, "DescribeNamespace":{ "name":"DescribeNamespace", "http":{ @@ -2274,6 +2327,23 @@ ], "documentation":"

Lists all of the refresh schedules for a topic.

" }, + "ListTopicReviewedAnswers":{ + "name":"ListTopicReviewedAnswers", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/topics/{TopicId}/reviewed-answers" + }, + "input":{"shape":"ListTopicReviewedAnswersRequest"}, + "output":{"shape":"ListTopicReviewedAnswersResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists all reviewed answers for a Q Topic.

" + }, "ListTopics":{ "name":"ListTopics", "http":{ @@ -2815,6 +2885,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"CustomerManagedKeyUnavailableException"}, {"shape":"InternalFailureException"} ], "documentation":"

Updates a data source.

" @@ -2949,6 +3020,22 @@ ], "documentation":"

Updates the content and status of IP rules. Traffic from a source is allowed when the source satisfies either the IpRestrictionRule, VpcIdRestrictionRule, or VpcEndpointIdRestrictionRule. To use this operation, you must provide the entire map of rules. You can use the DescribeIpRestriction operation to get the current rule map.

" }, + "UpdateKeyRegistration":{ + "name":"UpdateKeyRegistration", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/key-registration" + }, + "input":{"shape":"UpdateKeyRegistrationRequest"}, + "output":{"shape":"UpdateKeyRegistrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Updates a customer managed key in a Amazon QuickSight account.

" + }, "UpdatePublicSharingSettings":{ "name":"UpdatePublicSharingSettings", "http":{ @@ -3371,6 +3458,67 @@ "max":20, "min":1 }, + "AggFunction":{ + "type":"structure", + "members":{ + "Aggregation":{ + "shape":"AggType", + "documentation":"

The aggregation of an Agg function.

" + }, + "AggregationFunctionParameters":{ + "shape":"AggFunctionParamMap", + "documentation":"

The aggregation parameters for an Agg function.

" + }, + "Period":{ + "shape":"TopicTimeGranularity", + "documentation":"

The period of an Agg function.

" + }, + "PeriodField":{ + "shape":"LimitedString", + "documentation":"

The period field for an Agg function.

" + } + }, + "documentation":"

The definition of an Agg function.

" + }, + "AggFunctionParamKey":{ + "type":"string", + "max":256, + "min":1 + }, + "AggFunctionParamMap":{ + "type":"map", + "key":{"shape":"AggFunctionParamKey"}, + "value":{"shape":"AggFunctionParamValue"} + }, + "AggFunctionParamValue":{ + "type":"string", + "max":1024 + }, + "AggType":{ + "type":"string", + "enum":[ + "SUM", + "MIN", + "MAX", + "COUNT", + "AVERAGE", + "DISTINCT_COUNT", + "STDEV", + "STDEVP", + "VAR", + "VARP", + "PERCENTILE", + "MEDIAN", + "PTD_SUM", + "PTD_MIN", + "PTD_MAX", + "PTD_COUNT", + "PTD_DISTINCT_COUNT", + "PTD_AVERAGE", + "COLUMN", + "CUSTOM" + ] + }, "AggregationFunction":{ "type":"structure", "members":{ @@ -3398,6 +3546,25 @@ "key":{"shape":"LimitedString"}, "value":{"shape":"LimitedString"} }, + "AggregationPartitionBy":{ + "type":"structure", + "members":{ + "FieldName":{ + "shape":"LimitedString", + "documentation":"

The field Name for an AggregationPartitionBy.

" + }, + "TimeGranularity":{ + "shape":"TimeGranularity", + "documentation":"

The TimeGranularity for an AggregationPartitionBy.

" + } + }, + "documentation":"

The definition of an AggregationPartitionBy.

" + }, + "AggregationPartitionByList":{ + "type":"list", + "member":{"shape":"AggregationPartitionBy"}, + "max":50 + }, "AggregationSortConfiguration":{ "type":"structure", "required":[ @@ -3694,6 +3861,24 @@ "member":{"shape":"AnalysisSummary"}, "max":100 }, + "Anchor":{ + "type":"structure", + "members":{ + "AnchorType":{ + "shape":"AnchorType", + "documentation":"

The AnchorType for the Anchor.

" + }, + "TimeGranularity":{ + "shape":"TimeGranularity", + "documentation":"

The TimeGranularity of the Anchor.

" + }, + "Offset":{ + "shape":"Integer", + "documentation":"

The offset of the Anchor.

" + } + }, + "documentation":"

The definition of the Anchor.

" + }, "AnchorDateConfiguration":{ "type":"structure", "members":{ @@ -3712,6 +3897,10 @@ "type":"string", "enum":["NOW"] }, + "AnchorType":{ + "type":"string", + "enum":["TODAY"] + }, "AnonymousUserDashboardEmbeddingConfiguration":{ "type":"structure", "required":["InitialDashboardId"], @@ -3792,6 +3981,15 @@ "type":"list", "member":{"shape":"AnonymousUserSnapshotJobResult"} }, + "AnswerId":{ + "type":"string", + "max":256, + "pattern":"^[A-Za-z0-9-_.\\\\+]*$" + }, + "AnswerIds":{ + "type":"list", + "member":{"shape":"AnswerId"} + }, "ArcAxisConfiguration":{ "type":"structure", "members":{ @@ -5592,6 +5790,117 @@ "IMAGERY" ] }, + "BatchCreateTopicReviewedAnswerRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TopicId", + "Answers" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the Amazon Web Services account that you want to create a reviewed answer in.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TopicId":{ + "shape":"TopicId", + "documentation":"

The ID for the topic reviewed answer that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", + "location":"uri", + "locationName":"TopicId" + }, + "Answers":{ + "shape":"CreateTopicReviewedAnswers", + "documentation":"

The definition of the Answers to be created.

" + } + } + }, + "BatchCreateTopicReviewedAnswerResponse":{ + "type":"structure", + "members":{ + "TopicId":{ + "shape":"TopicId", + "documentation":"

The ID for the topic reviewed answer that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

" + }, + "TopicArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the topic.

" + }, + "SucceededAnswers":{ + "shape":"SucceededTopicReviewedAnswers", + "documentation":"

The definition of Answers that are successfully created.

" + }, + "InvalidAnswers":{ + "shape":"InvalidTopicReviewedAnswers", + "documentation":"

The definition of Answers that are invalid and not created.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this operation.

" + } + } + }, + "BatchDeleteTopicReviewedAnswerRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TopicId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the Amazon Web Services account that you want to delete a reviewed answers in.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TopicId":{ + "shape":"TopicId", + "documentation":"

The ID for the topic reviewed answer that you want to delete. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", + "location":"uri", + "locationName":"TopicId" + }, + "AnswerIds":{ + "shape":"AnswerIds", + "documentation":"

The Answer IDs of the Answers to be deleted.

" + } + } + }, + "BatchDeleteTopicReviewedAnswerResponse":{ + "type":"structure", + "members":{ + "TopicId":{ + "shape":"TopicId", + "documentation":"

The ID of the topic reviewed answer that you want to delete. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

" + }, + "TopicArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the topic.

" + }, + "SucceededAnswers":{ + "shape":"SucceededTopicReviewedAnswers", + "documentation":"

The definition of Answers that are successfully deleted.

" + }, + "InvalidAnswers":{ + "shape":"InvalidTopicReviewedAnswers", + "documentation":"

The definition of Answers that are invalid and not deleted.

" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this operation.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + } + } + }, "BigQueryParameters":{ "type":"structure", "required":["ProjectId"], @@ -5666,6 +5975,10 @@ "PageBreakConfiguration":{ "shape":"SectionPageBreakConfiguration", "documentation":"

The configuration of a page break for a section.

" + }, + "RepeatConfiguration":{ + "shape":"BodySectionRepeatConfiguration", + "documentation":"

Describes the configurations that are required to declare a section as repeating.

" } }, "documentation":"

The configuration of a body section.

" @@ -5685,6 +5998,94 @@ }, "documentation":"

The configuration of content in a body section.

" }, + "BodySectionDynamicCategoryDimensionConfiguration":{ + "type":"structure", + "required":["Column"], + "members":{ + "Column":{"shape":"ColumnIdentifier"}, + "Limit":{ + "shape":"BodySectionDynamicDimensionLimit", + "documentation":"

Number of values to use from the column for repetition.

", + "box":true + }, + "SortByMetrics":{ + "shape":"BodySectionDynamicDimensionSortConfigurationList", + "documentation":"

Sort criteria on the column values that you use for repetition.

" + } + }, + "documentation":"

Describes the Category dataset column and constraints for the dynamic values used to repeat the contents of a section.

" + }, + "BodySectionDynamicDimensionLimit":{ + "type":"integer", + "max":1000, + "min":1 + }, + "BodySectionDynamicDimensionSortConfigurationList":{ + "type":"list", + "member":{"shape":"ColumnSort"}, + "max":100 + }, + "BodySectionDynamicNumericDimensionConfiguration":{ + "type":"structure", + "required":["Column"], + "members":{ + "Column":{"shape":"ColumnIdentifier"}, + "Limit":{ + "shape":"BodySectionDynamicDimensionLimit", + "documentation":"

Number of values to use from the column for repetition.

", + "box":true + }, + "SortByMetrics":{ + "shape":"BodySectionDynamicDimensionSortConfigurationList", + "documentation":"

Sort criteria on the column values that you use for repetition.

" + } + }, + "documentation":"

Describes the Numeric dataset column and constraints for the dynamic values used to repeat the contents of a section.

" + }, + "BodySectionRepeatConfiguration":{ + "type":"structure", + "members":{ + "DimensionConfigurations":{ + "shape":"BodySectionRepeatDimensionConfigurationList", + "documentation":"

List of BodySectionRepeatDimensionConfiguration values that describe the dataset column and constraints for the column used to repeat the contents of a section.

" + }, + "PageBreakConfiguration":{ + "shape":"BodySectionRepeatPageBreakConfiguration", + "documentation":"

Page break configuration to apply for each repeating instance.

" + }, + "NonRepeatingVisuals":{ + "shape":"NonRepeatingVisualsList", + "documentation":"

List of visuals to exclude from repetition in repeating sections. The visuals will render identically, and ignore the repeating configurations in all repeating instances.

" + } + }, + "documentation":"

Describes the configurations that are required to declare a section as repeating.

" + }, + "BodySectionRepeatDimensionConfiguration":{ + "type":"structure", + "members":{ + "DynamicCategoryDimensionConfiguration":{ + "shape":"BodySectionDynamicCategoryDimensionConfiguration", + "documentation":"

Describes the Category dataset column and constraints around the dynamic values that will be used in repeating the section contents.

" + }, + "DynamicNumericDimensionConfiguration":{ + "shape":"BodySectionDynamicNumericDimensionConfiguration", + "documentation":"

Describes the Numeric dataset column and constraints around the dynamic values used to repeat the contents of a section.

" + } + }, + "documentation":"

Describes the dataset column and constraints for the dynamic values used to repeat the contents of a section. The dataset column is either Category or Numeric column configuration

" + }, + "BodySectionRepeatDimensionConfigurationList":{ + "type":"list", + "member":{"shape":"BodySectionRepeatDimensionConfiguration"}, + "max":3 + }, + "BodySectionRepeatPageBreakConfiguration":{ + "type":"structure", + "members":{ + "After":{"shape":"SectionAfterPageBreak"} + }, + "documentation":"

The page break configuration to apply for each repeating instance.

" + }, "BookmarksConfigurations":{ "type":"structure", "required":["Enabled"], @@ -5936,6 +6337,11 @@ "min":1, "sensitive":true }, + "CalculatedFieldReferenceList":{ + "type":"list", + "member":{"shape":"Identifier"}, + "max":250 + }, "CalculatedFields":{ "type":"list", "member":{"shape":"CalculatedField"}, @@ -6220,9 +6626,22 @@ "FILTER_LIST" ] }, - "CategoryValue":{ - "type":"string", - "max":512 + "CategoryInnerFilter":{ + "type":"structure", + "required":[ + "Column", + "Configuration" + ], + "members":{ + "Column":{"shape":"ColumnIdentifier"}, + "Configuration":{"shape":"CategoryFilterConfiguration"}, + "DefaultFilterControlConfiguration":{"shape":"DefaultFilterControlConfiguration"} + }, + "documentation":"

A CategoryInnerFilter filters text values for the NestedFilter.

" + }, + "CategoryValue":{ + "type":"string", + "max":512 }, "CategoryValueList":{ "type":"list", @@ -6300,6 +6719,25 @@ }, "documentation":"

A structure that represents a collective constant.

" }, + "CollectiveConstantEntry":{ + "type":"structure", + "members":{ + "ConstantType":{ + "shape":"ConstantType", + "documentation":"

The ConstantType of a CollectiveConstantEntry.

" + }, + "Value":{ + "shape":"ConstantValueString", + "documentation":"

The value of a CollectiveConstantEntry.

" + } + }, + "documentation":"

The definition for a CollectiveConstantEntry.

" + }, + "CollectiveConstantEntryList":{ + "type":"list", + "member":{"shape":"CollectiveConstantEntry"}, + "max":2000 + }, "ColorFillType":{ "type":"string", "enum":[ @@ -6881,6 +7319,21 @@ "PERCENT" ] }, + "ComparisonMethodType":{ + "type":"string", + "enum":[ + "DIFF", + "PERC_DIFF", + "DIFF_AS_PERC", + "POP_CURRENT_DIFF_AS_PERC", + "POP_CURRENT_DIFF", + "POP_OVERTIME_DIFF_AS_PERC", + "POP_OVERTIME_DIFF", + "PERCENT_OF_TOTAL", + "RUNNING_SUM", + "MOVING_AVERAGE" + ] + }, "Computation":{ "type":"structure", "members":{ @@ -7109,6 +7562,10 @@ "COLLECTIVE" ] }, + "ConstantValueString":{ + "type":"string", + "max":1024 + }, "ContextMenuOption":{ "type":"structure", "members":{ @@ -7143,6 +7600,52 @@ "max":200, "min":1 }, + "ContributionAnalysisDirection":{ + "type":"string", + "enum":[ + "INCREASE", + "DECREASE", + "NEUTRAL" + ] + }, + "ContributionAnalysisFactor":{ + "type":"structure", + "members":{ + "FieldName":{ + "shape":"LimitedString", + "documentation":"

The field name of the ContributionAnalysisFactor.

" + } + }, + "documentation":"

The definition for the ContributionAnalysisFactor.

" + }, + "ContributionAnalysisFactorsList":{ + "type":"list", + "member":{"shape":"ContributionAnalysisFactor"}, + "max":50 + }, + "ContributionAnalysisSortType":{ + "type":"string", + "enum":[ + "ABSOLUTE_DIFFERENCE", + "CONTRIBUTION_PERCENTAGE", + "DEVIATION_FROM_EXPECTED", + "PERCENTAGE_DIFFERENCE" + ] + }, + "ContributionAnalysisTimeRanges":{ + "type":"structure", + "members":{ + "StartRange":{ + "shape":"TopicIRFilterOption", + "documentation":"

The start range for the ContributionAnalysisTimeRanges.

" + }, + "EndRange":{ + "shape":"TopicIRFilterOption", + "documentation":"

The end range for the ContributionAnalysisTimeRanges.

" + } + }, + "documentation":"

The definition for the ContributionAnalysisTimeRanges.

" + }, "ContributorDimensionList":{ "type":"list", "member":{"shape":"ColumnIdentifier"}, @@ -8592,6 +9095,47 @@ } } }, + "CreateTopicReviewedAnswer":{ + "type":"structure", + "required":[ + "AnswerId", + "DatasetArn", + "Question" + ], + "members":{ + "AnswerId":{ + "shape":"AnswerId", + "documentation":"

The answer ID for the CreateTopicReviewedAnswer.

" + }, + "DatasetArn":{ + "shape":"Arn", + "documentation":"

The Dataset arn for the CreateTopicReviewedAnswer.

" + }, + "Question":{ + "shape":"LimitedString", + "documentation":"

The Question to be created.

" + }, + "Mir":{ + "shape":"TopicIR", + "documentation":"

The Mir for the CreateTopicReviewedAnswer.

" + }, + "PrimaryVisual":{ + "shape":"TopicVisual", + "documentation":"

The PrimaryVisual for the CreateTopicReviewedAnswer.

" + }, + "Template":{ + "shape":"TopicTemplate", + "documentation":"

The template for the CreateTopicReviewedAnswer.

" + } + }, + "documentation":"

The definition for a CreateTopicReviewedAnswer.

" + }, + "CreateTopicReviewedAnswers":{ + "type":"list", + "member":{"shape":"CreateTopicReviewedAnswer"}, + "max":100, + "min":0 + }, "CreateVPCConnectionRequest":{ "type":"structure", "required":[ @@ -9036,6 +9580,19 @@ }, "documentation":"

The configuration of custom values for the destination parameter in DestinationParameterValueConfiguration.

" }, + "CustomerManagedKeyUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this operation.

" + } + }, + "documentation":"

The customer managed key that is registered to your Amazon QuickSight account is unavailable.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "Dashboard":{ "type":"structure", "members":{ @@ -10763,6 +11320,14 @@ "InfoIconLabelOptions":{ "shape":"SheetControlInfoIconLabelOptions", "documentation":"

The configuration of info icon label options.

" + }, + "HelperTextVisibility":{ + "shape":"Visibility", + "documentation":"

The helper text visibility of the DateTimePickerControlDisplayOptions.

" + }, + "DateIconVisibility":{ + "shape":"Visibility", + "documentation":"

The date icon visibility of the DateTimePickerControlDisplayOptions.

" } }, "documentation":"

The display options of a control.

" @@ -13793,6 +14358,45 @@ } } }, + "DescribeKeyRegistrationRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the Amazon Web Services account that contains the customer managed key registration that you want to describe.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DefaultKeyOnly":{ + "shape":"Boolean", + "documentation":"

Determines whether the request returns the default key only.

", + "location":"querystring", + "locationName":"default-key-only" + } + } + }, + "DescribeKeyRegistrationResponse":{ + "type":"structure", + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the Amazon Web Services account that contains the customer managed key registration specified in the request.

" + }, + "KeyRegistration":{ + "shape":"KeyRegistration", + "documentation":"

A list of RegisteredCustomerManagedKey objects in a Amazon QuickSight account.

" + }, + "RequestId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Web Services request ID for this operation.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

" + } + } + }, "DescribeNamespaceRequest":{ "type":"structure", "required":[ @@ -14987,6 +15591,37 @@ "min":1, "sensitive":true }, + "FailedKeyRegistrationEntries":{ + "type":"list", + "member":{"shape":"FailedKeyRegistrationEntry"} + }, + "FailedKeyRegistrationEntry":{ + "type":"structure", + "required":[ + "Message", + "StatusCode", + "SenderFault" + ], + "members":{ + "KeyArn":{ + "shape":"String", + "documentation":"

The ARN of the KMS key that failed to update.

" + }, + "Message":{ + "shape":"NonEmptyString", + "documentation":"

A message that provides information about why a FailedKeyRegistrationEntry error occurred.

" + }, + "StatusCode":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of a FailedKeyRegistrationEntry error.

" + }, + "SenderFault":{ + "shape":"Boolean", + "documentation":"

A boolean that indicates whether a FailedKeyRegistrationEntry resulted from user error. If the value of this property is True, the error was caused by user error. If the value of this property is False, the error occurred on the backend. If your job continues fail and with a False SenderFault value, contact Amazon Web Services Support.

" + } + }, + "documentation":"

An entry that appears when a KeyRegistration update to Amazon QuickSight fails.

" + }, "FieldBasedTooltip":{ "type":"structure", "members":{ @@ -15340,10 +15975,37 @@ "TopBottomFilter":{ "shape":"TopBottomFilter", "documentation":"

A TopBottomFilter filters data to the top or bottom values for a given column.

" + }, + "NestedFilter":{ + "shape":"NestedFilter", + "documentation":"

A NestedFilter filters data with a subset of data that is defined by the nested inner filter.

" } }, "documentation":"

With a Filter, you can remove portions of data from a particular visual or view.

This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

" }, + "FilterAggMetrics":{ + "type":"structure", + "members":{ + "MetricOperand":{ + "shape":"Identifier", + "documentation":"

The metric operand of the FilterAggMetrics.

" + }, + "Function":{ + "shape":"AggType", + "documentation":"

The function for the FilterAggMetrics.

" + }, + "SortDirection":{ + "shape":"TopicSortDirection", + "documentation":"

The sort direction for FilterAggMetrics.

" + } + }, + "documentation":"

The definition for the FilterAggMetrics.

" + }, + "FilterAggMetricsList":{ + "type":"list", + "member":{"shape":"FilterAggMetrics"}, + "max":100 + }, "FilterClass":{ "type":"string", "enum":[ @@ -17837,6 +18499,17 @@ "X" ] }, + "Identifier":{ + "type":"structure", + "required":["Identity"], + "members":{ + "Identity":{ + "shape":"LimitedString", + "documentation":"

The identity of the identifier.

" + } + }, + "documentation":"

The definition for the identifier.

" + }, "IdentityCenterConfiguration":{ "type":"structure", "members":{ @@ -18050,6 +18723,16 @@ "type":"list", "member":{"shape":"Ingestion"} }, + "InnerFilter":{ + "type":"structure", + "members":{ + "CategoryInnerFilter":{ + "shape":"CategoryInnerFilter", + "documentation":"

A CategoryInnerFilter filters text values for the NestedFilter.

" + } + }, + "documentation":"

The InnerFilter defines the subset of data to be used with the NestedFilter.

" + }, "InputColumn":{ "type":"structure", "required":[ @@ -18332,6 +19015,24 @@ "error":{"httpStatusCode":400}, "exception":true }, + "InvalidTopicReviewedAnswer":{ + "type":"structure", + "members":{ + "AnswerId":{ + "shape":"AnswerId", + "documentation":"

The answer ID for the InvalidTopicReviewedAnswer.

" + }, + "Error":{ + "shape":"ReviewedAnswerErrorCode", + "documentation":"

The error that is returned for the InvalidTopicReviewedAnswer.

" + } + }, + "documentation":"

The definition for a InvalidTopicReviewedAnswer.

" + }, + "InvalidTopicReviewedAnswers":{ + "type":"list", + "member":{"shape":"InvalidTopicReviewedAnswer"} + }, "IpRestrictionRuleDescription":{ "type":"string", "max":150, @@ -18698,6 +19399,10 @@ "VERTICAL" ] }, + "KeyRegistration":{ + "type":"list", + "member":{"shape":"RegisteredCustomerManagedKey"} + }, "LabelOptions":{ "type":"structure", "members":{ @@ -20531,6 +21236,53 @@ } } }, + "ListTopicReviewedAnswersRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TopicId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the Amazon Web Services account that containd the reviewed answers that you want listed.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TopicId":{ + "shape":"TopicId", + "documentation":"

The ID for the topic that contains the reviewed answer that you want to list. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", + "location":"uri", + "locationName":"TopicId" + } + } + }, + "ListTopicReviewedAnswersResponse":{ + "type":"structure", + "members":{ + "TopicId":{ + "shape":"TopicId", + "documentation":"

The ID for the topic that contains the reviewed answer that you want to list. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

" + }, + "TopicArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the topic.

" + }, + "Answers":{ + "shape":"TopicReviewedAnswers", + "documentation":"

The definition of all Answers in the topic.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this operation.

" + } + } + }, "ListTopicsRequest":{ "type":"structure", "required":["AwsAccountId"], @@ -21217,6 +21969,16 @@ "type":"list", "member":{"shape":"NamedEntityDefinition"} }, + "NamedEntityRef":{ + "type":"structure", + "members":{ + "NamedEntityName":{ + "shape":"LimitedString", + "documentation":"

The NamedEntityName for the NamedEntityRef.

" + } + }, + "documentation":"

The definition for a NamedEntityRef.

" + }, "NamedFilterAggType":{ "type":"string", "enum":[ @@ -21350,6 +22112,34 @@ "NEGATIVE" ] }, + "NestedFilter":{ + "type":"structure", + "required":[ + "FilterId", + "Column", + "IncludeInnerSet", + "InnerFilter" + ], + "members":{ + "FilterId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

An identifier that uniquely identifies a filter within a dashboard, analysis, or template.

" + }, + "Column":{ + "shape":"ColumnIdentifier", + "documentation":"

The column that the filter is applied to.

" + }, + "IncludeInnerSet":{ + "shape":"Boolean", + "documentation":"

A boolean condition to include or exclude the subset that is defined by the values of the nested inner filter.

" + }, + "InnerFilter":{ + "shape":"InnerFilter", + "documentation":"

The InnerFilter defines the subset of data to be used with the NestedFilter.

" + } + }, + "documentation":"

A NestedFilter filters data with a subset of data that is defined by the nested inner filter.

" + }, "NetworkInterface":{ "type":"structure", "members":{ @@ -21426,6 +22216,19 @@ "type":"string", "pattern":".*\\S.*" }, + "NonRepeatingVisualsList":{ + "type":"list", + "member":{"shape":"ShortRestrictiveResourceId"}, + "max":20 + }, + "NullFilterOption":{ + "type":"string", + "enum":[ + "ALL_VALUES", + "NON_NULLS_ONLY", + "NULLS_ONLY" + ] + }, "NullString":{ "type":"string", "max":128, @@ -21772,6 +22575,11 @@ "max":512, "min":1 }, + "OperandList":{ + "type":"list", + "member":{"shape":"Identifier"}, + "max":25 + }, "OptionalPort":{ "type":"integer", "max":65535, @@ -23630,10 +24438,7 @@ }, "RedshiftIAMParameters":{ "type":"structure", - "required":[ - "RoleArn", - "DatabaseUser" - ], + "required":["RoleArn"], "members":{ "RoleArn":{ "shape":"RoleArn", @@ -23977,7 +24782,7 @@ }, "UserRole":{ "shape":"UserRole", - "documentation":"

The Amazon QuickSight role for the user. The user role can be one of the following:

  • READER: A user who has read-only access to dashboards.

  • AUTHOR: A user who can create data sources, datasets, analyses, and dashboards.

  • ADMIN: A user who is an author, who can also manage Amazon QuickSight settings.

  • RESTRICTED_READER: This role isn't currently available for use.

  • RESTRICTED_AUTHOR: This role isn't currently available for use.

" + "documentation":"

The Amazon QuickSight role for the user. The user role can be one of the following:

  • READER: A user who has read-only access to dashboards.

  • AUTHOR: A user who can create data sources, datasets, analyses, and dashboards.

  • ADMIN: A user who is an author, who can also manage Amazon QuickSight settings.

  • READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q in Amazon QuickSight, can build stories with Amazon Q, and can generate executive summaries from dashboards.

  • AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards.

  • ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing.

  • RESTRICTED_READER: This role isn't currently available for use.

  • RESTRICTED_AUTHOR: This role isn't currently available for use.

" }, "IamArn":{ "shape":"String", @@ -24047,6 +24852,20 @@ } } }, + "RegisteredCustomerManagedKey":{ + "type":"structure", + "members":{ + "KeyArn":{ + "shape":"String", + "documentation":"

The ARN of the KMS key that is registered to a Amazon QuickSight account for encryption and decryption use.

" + }, + "DefaultKey":{ + "shape":"Boolean", + "documentation":"

Indicates whether a RegisteredCustomerManagedKey is set as the default key for encryption and decryption use.

" + } + }, + "documentation":"

A customer managed key structure that contains the information listed below:

  • KeyArn - The ARN of a KMS key that is registered to a Amazon QuickSight account for encryption and decryption use.

  • DefaultKey - Indicates whether the current key is set as the default key for encryption and decryption use.

" + }, "RegisteredUserConsoleFeatureConfigurations":{ "type":"structure", "members":{ @@ -24465,10 +25284,22 @@ "min":1, "pattern":"[\\w\\-]+" }, - "Role":{ + "ReviewedAnswerErrorCode":{ "type":"string", "enum":[ - "ADMIN", + "INTERNAL_ERROR", + "MISSING_ANSWER", + "DATASET_DOES_NOT_EXIST", + "INVALID_DATASET_ARN", + "DUPLICATED_ANSWER", + "INVALID_DATA", + "MISSING_REQUIRED_FIELDS" + ] + }, + "Role":{ + "type":"string", + "enum":[ + "ADMIN", "AUTHOR", "READER", "ADMIN_PRO", @@ -26163,6 +26994,24 @@ }, "documentation":"

The display options of a control.

" }, + "Slot":{ + "type":"structure", + "members":{ + "SlotId":{ + "shape":"LimitedString", + "documentation":"

The slot ID of the slot.

" + }, + "VisualId":{ + "shape":"LimitedString", + "documentation":"

The visual ID for the slot.

" + } + }, + "documentation":"

The definition for the slot.

" + }, + "Slots":{ + "type":"list", + "member":{"shape":"Slot"} + }, "SmallMultiplesAxisPlacement":{ "type":"string", "enum":[ @@ -27075,6 +27924,42 @@ }, "documentation":"

The subtotal options.

" }, + "SucceededTopicReviewedAnswer":{ + "type":"structure", + "members":{ + "AnswerId":{ + "shape":"AnswerId", + "documentation":"

The answer ID for the SucceededTopicReviewedAnswer.

" + } + }, + "documentation":"

The definition for a SucceededTopicReviewedAnswer.

" + }, + "SucceededTopicReviewedAnswers":{ + "type":"list", + "member":{"shape":"SucceededTopicReviewedAnswer"} + }, + "SuccessfulKeyRegistrationEntries":{ + "type":"list", + "member":{"shape":"SuccessfulKeyRegistrationEntry"} + }, + "SuccessfulKeyRegistrationEntry":{ + "type":"structure", + "required":[ + "KeyArn", + "StatusCode" + ], + "members":{ + "KeyArn":{ + "shape":"String", + "documentation":"

The ARN of the KMS key that is associated with the SuccessfulKeyRegistrationEntry entry.

" + }, + "StatusCode":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of a SuccessfulKeyRegistrationEntry entry.

" + } + }, + "documentation":"

A success entry that occurs when a KeyRegistration job is successfully applied to the Amazon QuickSight account.

" + }, "Suffix":{ "type":"string", "max":128, @@ -28955,6 +29840,32 @@ "type":"list", "member":{"shape":"TopicColumn"} }, + "TopicConstantValue":{ + "type":"structure", + "members":{ + "ConstantType":{ + "shape":"ConstantType", + "documentation":"

The constant type of a TopicConstantValue.

" + }, + "Value":{ + "shape":"ConstantValueString", + "documentation":"

The value of the TopicConstantValue.

" + }, + "Minimum":{ + "shape":"ConstantValueString", + "documentation":"

The minimum for the TopicConstantValue.

" + }, + "Maximum":{ + "shape":"ConstantValueString", + "documentation":"

The maximum for the TopicConstantValue.

" + }, + "ValueList":{ + "shape":"CollectiveConstantEntryList", + "documentation":"

The value list of the TopicConstantValue.

" + } + }, + "documentation":"

The definition for a TopicConstantValue.

" + }, "TopicDateRangeFilter":{ "type":"structure", "members":{ @@ -29049,6 +29960,269 @@ "type":"list", "member":{"shape":"TopicFilter"} }, + "TopicIR":{ + "type":"structure", + "members":{ + "Metrics":{ + "shape":"TopicIRMetricList", + "documentation":"

The metrics for the TopicIR.

" + }, + "GroupByList":{ + "shape":"TopicIRGroupByList", + "documentation":"

The GroupBy list for the TopicIR.

" + }, + "Filters":{ + "shape":"TopicIRFilterList", + "documentation":"

The filters for the TopicIR.

" + }, + "Sort":{ + "shape":"TopicSortClause", + "documentation":"

The sort for the TopicIR.

" + }, + "ContributionAnalysis":{ + "shape":"TopicIRContributionAnalysis", + "documentation":"

The contribution analysis for the TopicIR.

" + }, + "Visual":{ + "shape":"VisualOptions", + "documentation":"

The visual for the TopicIR.

" + } + }, + "documentation":"

The definition for a TopicIR.

" + }, + "TopicIRComparisonMethod":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"ComparisonMethodType", + "documentation":"

The type for the TopicIRComparisonMethod.

" + }, + "Period":{ + "shape":"TopicTimeGranularity", + "documentation":"

The period for the TopicIRComparisonMethod.

" + }, + "WindowSize":{ + "shape":"Integer", + "documentation":"

The window size for the TopicIRComparisonMethod.

" + } + }, + "documentation":"

The definition of a TopicIRComparisonMethod.

" + }, + "TopicIRContributionAnalysis":{ + "type":"structure", + "members":{ + "Factors":{ + "shape":"ContributionAnalysisFactorsList", + "documentation":"

The factors for a TopicIRContributionAnalysis.

" + }, + "TimeRanges":{ + "shape":"ContributionAnalysisTimeRanges", + "documentation":"

The time ranges for the TopicIRContributionAnalysis.

" + }, + "Direction":{ + "shape":"ContributionAnalysisDirection", + "documentation":"

The direction for the TopicIRContributionAnalysis.

" + }, + "SortType":{ + "shape":"ContributionAnalysisSortType", + "documentation":"

The sort type for the TopicIRContributionAnalysis.

" + } + }, + "documentation":"

The definition for a TopicIRContributionAnalysis.

" + }, + "TopicIRFilterEntry":{ + "type":"list", + "member":{"shape":"TopicIRFilterOption"}, + "max":2000 + }, + "TopicIRFilterFunction":{ + "type":"string", + "enum":[ + "CONTAINS", + "EXACT", + "STARTS_WITH", + "ENDS_WITH", + "CONTAINS_STRING", + "PREVIOUS", + "THIS", + "LAST", + "NEXT", + "NOW" + ] + }, + "TopicIRFilterList":{ + "type":"list", + "member":{"shape":"TopicIRFilterEntry"}, + "max":2000 + }, + "TopicIRFilterOption":{ + "type":"structure", + "members":{ + "FilterType":{ + "shape":"TopicIRFilterType", + "documentation":"

The filter type for the TopicIRFilterOption.

" + }, + "FilterClass":{ + "shape":"FilterClass", + "documentation":"

The filter class for the TopicIRFilterOption.

" + }, + "OperandField":{ + "shape":"Identifier", + "documentation":"

The operand field for the TopicIRFilterOption.

" + }, + "Function":{ + "shape":"TopicIRFilterFunction", + "documentation":"

The function for the TopicIRFilterOption.

" + }, + "Constant":{ + "shape":"TopicConstantValue", + "documentation":"

The constant for the TopicIRFilterOption.

" + }, + "Inverse":{ + "shape":"Boolean", + "documentation":"

The inverse for the TopicIRFilterOption.

" + }, + "NullFilter":{ + "shape":"NullFilterOption", + "documentation":"

The null filter for the TopicIRFilterOption.

" + }, + "Aggregation":{ + "shape":"AggType", + "documentation":"

The aggregation for the TopicIRFilterOption.

" + }, + "AggregationFunctionParameters":{ + "shape":"AggFunctionParamMap", + "documentation":"

The aggregation function parameters for the TopicIRFilterOption.

" + }, + "AggregationPartitionBy":{ + "shape":"AggregationPartitionByList", + "documentation":"

The AggregationPartitionBy for the TopicIRFilterOption.

" + }, + "Range":{ + "shape":"TopicConstantValue", + "documentation":"

The range for the TopicIRFilterOption.

" + }, + "Inclusive":{ + "shape":"Boolean", + "documentation":"

The inclusive for the TopicIRFilterOption.

" + }, + "TimeGranularity":{ + "shape":"TimeGranularity", + "documentation":"

The time granularity for the TopicIRFilterOption.

" + }, + "LastNextOffset":{ + "shape":"TopicConstantValue", + "documentation":"

The last next offset for the TopicIRFilterOption.

" + }, + "AggMetrics":{ + "shape":"FilterAggMetricsList", + "documentation":"

The agg metrics for the TopicIRFilterOption.

" + }, + "TopBottomLimit":{ + "shape":"TopicConstantValue", + "documentation":"

The TopBottomLimit for the TopicIRFilterOption.

" + }, + "SortDirection":{ + "shape":"TopicSortDirection", + "documentation":"

The sort direction for the TopicIRFilterOption.

" + }, + "Anchor":{ + "shape":"Anchor", + "documentation":"

The anchor for the TopicIRFilterOption.

" + } + }, + "documentation":"

The definition for a TopicIRFilterOption.

" + }, + "TopicIRFilterType":{ + "type":"string", + "enum":[ + "CATEGORY_FILTER", + "NUMERIC_EQUALITY_FILTER", + "NUMERIC_RANGE_FILTER", + "DATE_RANGE_FILTER", + "RELATIVE_DATE_FILTER", + "TOP_BOTTOM_FILTER", + "EQUALS", + "RANK_LIMIT_FILTER", + "ACCEPT_ALL_FILTER" + ] + }, + "TopicIRGroupBy":{ + "type":"structure", + "members":{ + "FieldName":{ + "shape":"Identifier", + "documentation":"

The field name for the TopicIRGroupBy.

" + }, + "TimeGranularity":{ + "shape":"TopicTimeGranularity", + "documentation":"

The time granularity for the TopicIRGroupBy.

" + }, + "Sort":{ + "shape":"TopicSortClause", + "documentation":"

The sort for the TopicIRGroupBy.

" + }, + "DisplayFormat":{ + "shape":"DisplayFormat", + "documentation":"

The display format for the TopicIRGroupBy.

" + }, + "DisplayFormatOptions":{"shape":"DisplayFormatOptions"}, + "NamedEntity":{ + "shape":"NamedEntityRef", + "documentation":"

The named entity for the TopicIRGroupBy.

" + } + }, + "documentation":"

The definition for a TopicIRGroupBy.

" + }, + "TopicIRGroupByList":{ + "type":"list", + "member":{"shape":"TopicIRGroupBy"}, + "max":2000 + }, + "TopicIRMetric":{ + "type":"structure", + "members":{ + "MetricId":{ + "shape":"Identifier", + "documentation":"

The metric ID for the TopicIRMetric.

" + }, + "Function":{ + "shape":"AggFunction", + "documentation":"

The function for the TopicIRMetric.

" + }, + "Operands":{ + "shape":"OperandList", + "documentation":"

The operands for the TopicIRMetric.

" + }, + "ComparisonMethod":{ + "shape":"TopicIRComparisonMethod", + "documentation":"

The comparison method for the TopicIRMetric.

" + }, + "Expression":{ + "shape":"Expression", + "documentation":"

The expression for the TopicIRMetric.

" + }, + "CalculatedFieldReferences":{ + "shape":"CalculatedFieldReferenceList", + "documentation":"

The calculated field references for the TopicIRMetric.

" + }, + "DisplayFormat":{ + "shape":"DisplayFormat", + "documentation":"

The display format for the TopicIRMetric.

" + }, + "DisplayFormatOptions":{"shape":"DisplayFormatOptions"}, + "NamedEntity":{ + "shape":"NamedEntityRef", + "documentation":"

The named entity for the TopicIRMetric.

" + } + }, + "documentation":"

The definition for a TopicIRMetric.

" + }, + "TopicIRMetricList":{ + "type":"list", + "member":{"shape":"TopicIRMetric"}, + "max":2000 + }, "TopicId":{ "type":"string", "max":256, @@ -29255,6 +30429,49 @@ "NOW" ] }, + "TopicReviewedAnswer":{ + "type":"structure", + "required":[ + "AnswerId", + "DatasetArn", + "Question" + ], + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the reviewed answer.

" + }, + "AnswerId":{ + "shape":"AnswerId", + "documentation":"

The answer ID of the reviewed answer.

" + }, + "DatasetArn":{ + "shape":"Arn", + "documentation":"

The Dataset ARN for the TopicReviewedAnswer.

" + }, + "Question":{ + "shape":"LimitedString", + "documentation":"

The question for the TopicReviewedAnswer.

" + }, + "Mir":{ + "shape":"TopicIR", + "documentation":"

The mir for the TopicReviewedAnswer.

" + }, + "PrimaryVisual":{ + "shape":"TopicVisual", + "documentation":"

The primary visual for the TopicReviewedAnswer.

" + }, + "Template":{ + "shape":"TopicTemplate", + "documentation":"

The template for the TopicReviewedAnswer.

" + } + }, + "documentation":"

The deinition for a TopicReviewedAnswer.

" + }, + "TopicReviewedAnswers":{ + "type":"list", + "member":{"shape":"TopicReviewedAnswer"} + }, "TopicScheduleType":{ "type":"string", "enum":[ @@ -29279,6 +30496,27 @@ "documentation":"

A structure that represents a singular filter constant, used in filters to specify a single value to match against.

", "sensitive":true }, + "TopicSortClause":{ + "type":"structure", + "members":{ + "Operand":{ + "shape":"Identifier", + "documentation":"

The operand for a TopicSortClause.

" + }, + "SortDirection":{ + "shape":"TopicSortDirection", + "documentation":"

The sort direction for the TopicSortClause.

" + } + }, + "documentation":"

The definition for a TopicSortClause.

" + }, + "TopicSortDirection":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, "TopicSummaries":{ "type":"list", "member":{"shape":"TopicSummary"} @@ -29305,6 +30543,20 @@ }, "documentation":"

A topic summary.

" }, + "TopicTemplate":{ + "type":"structure", + "members":{ + "TemplateType":{ + "shape":"LimitedString", + "documentation":"

The template type for the TopicTemplate.

" + }, + "Slots":{ + "shape":"Slots", + "documentation":"

The slots for the TopicTemplate.

" + } + }, + "documentation":"

The definition for a TopicTemplate.

" + }, "TopicTimeGranularity":{ "type":"string", "enum":[ @@ -29325,6 +30577,32 @@ "NEW_READER_EXPERIENCE" ] }, + "TopicVisual":{ + "type":"structure", + "members":{ + "VisualId":{ + "shape":"LimitedString", + "documentation":"

The visual ID for the TopicVisual.

" + }, + "Role":{ + "shape":"VisualRole", + "documentation":"

The role for the TopicVisual.

" + }, + "Ir":{ + "shape":"TopicIR", + "documentation":"

The ir for the TopicVisual.

" + }, + "SupportingVisuals":{ + "shape":"TopicVisuals", + "documentation":"

The supporting visuals for the TopicVisual.

" + } + }, + "documentation":"

The definition for a TopicVisual.

" + }, + "TopicVisuals":{ + "type":"list", + "member":{"shape":"TopicVisual"} + }, "TotalAggregationComputation":{ "type":"structure", "required":["ComputationId"], @@ -30914,6 +32192,42 @@ } } }, + "UpdateKeyRegistrationRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "KeyRegistration" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the Amazon Web Services account that contains the customer managed key registration that you want to update.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "KeyRegistration":{ + "shape":"KeyRegistration", + "documentation":"

A list of RegisteredCustomerManagedKey objects to be updated to the Amazon QuickSight account.

" + } + } + }, + "UpdateKeyRegistrationResponse":{ + "type":"structure", + "members":{ + "FailedKeyRegistration":{ + "shape":"FailedKeyRegistrationEntries", + "documentation":"

A list of all customer managed key registrations that failed to update.

" + }, + "SuccessfulKeyRegistration":{ + "shape":"SuccessfulKeyRegistrationEntries", + "documentation":"

A list of all customer managed key registrations that were successfully updated.

" + }, + "RequestId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Web Services request ID for this operation.

" + } + } + }, "UpdateLinkPermissionList":{ "type":"list", "member":{"shape":"ResourcePermission"}, @@ -31632,7 +32946,7 @@ }, "Role":{ "shape":"UserRole", - "documentation":"

The Amazon QuickSight role of the user. The role can be one of the following default security cohorts:

  • READER: A user who has read-only access to dashboards.

  • AUTHOR: A user who can create data sources, datasets, analyses, and dashboards.

  • ADMIN: A user who is an author, who can also manage Amazon QuickSight settings.

  • READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q Business, can build stories with Amazon Q, and can generate executive summaries from dashboards.

  • AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards.

  • ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing.

The name of the Amazon QuickSight role is invisible to the user except for the console screens dealing with permissions.

" + "documentation":"

The Amazon QuickSight role of the user. The role can be one of the following default security cohorts:

  • READER: A user who has read-only access to dashboards.

  • AUTHOR: A user who can create data sources, datasets, analyses, and dashboards.

  • ADMIN: A user who is an author, who can also manage Amazon QuickSight settings.

  • READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q in Amazon QuickSight, can build stories with Amazon Q, and can generate executive summaries from dashboards.

  • AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards.

  • ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing.

The name of the Amazon QuickSight role is invisible to the user except for the console screens dealing with permissions.

" }, "CustomPermissionsName":{ "shape":"RoleName", @@ -31794,7 +33108,7 @@ }, "Role":{ "shape":"UserRole", - "documentation":"

The Amazon QuickSight role for the user. The user role can be one of the following:.

  • READER: A user who has read-only access to dashboards.

  • AUTHOR: A user who can create data sources, datasets, analyses, and dashboards.

  • ADMIN: A user who is an author, who can also manage Amazon Amazon QuickSight settings.

  • READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q Business, can build stories with Amazon Q, and can generate executive summaries from dashboards.

  • AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards.

  • ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing.

  • RESTRICTED_READER: This role isn't currently available for use.

  • RESTRICTED_AUTHOR: This role isn't currently available for use.

" + "documentation":"

The Amazon QuickSight role for the user. The user role can be one of the following:.

  • READER: A user who has read-only access to dashboards.

  • AUTHOR: A user who can create data sources, datasets, analyses, and dashboards.

  • ADMIN: A user who is an author, who can also manage Amazon Amazon QuickSight settings.

  • READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q in Amazon QuickSight, can build stories with Amazon Q, and can generate executive summaries from dashboards.

  • AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards.

  • ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing.

  • RESTRICTED_READER: This role isn't currently available for use.

  • RESTRICTED_AUTHOR: This role isn't currently available for use.

" }, "IdentityType":{ "shape":"IdentityType", @@ -32278,6 +33592,16 @@ }, "documentation":"

The menu options for a visual.

" }, + "VisualOptions":{ + "type":"structure", + "members":{ + "type":{ + "shape":"LimitedString", + "documentation":"

The type for a VisualOptions.

" + } + }, + "documentation":"

The definition for a VisualOptions.

" + }, "VisualPalette":{ "type":"structure", "members":{ @@ -32292,6 +33616,16 @@ }, "documentation":"

The visual display options for the visual palette.

" }, + "VisualRole":{ + "type":"string", + "enum":[ + "PRIMARY", + "COMPLIMENTARY", + "MULTI_INTENT", + "FALLBACK", + "FRAGMENT" + ] + }, "VisualSubtitleLabelOptions":{ "type":"structure", "members":{ diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 84c323b732..b9ec58a6d4 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -10,7 +10,8 @@ "serviceId":"RDS", "signatureVersion":"v4", "uid":"rds-2014-10-31", - "xmlNamespace":"http://rds.amazonaws.com/doc/2014-10-31/" + "xmlNamespace":"http://rds.amazonaws.com/doc/2014-10-31/", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddRoleToDBCluster":{ @@ -78,7 +79,7 @@ {"shape":"TenantDatabaseNotFoundFault"}, {"shape":"DBSnapshotTenantDatabaseNotFoundFault"} ], - "documentation":"

Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.

For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.

" + "documentation":"

Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.

For an overview on tagging your relational database resources, see Tagging Amazon RDS Resources or Tagging Amazon Aurora and Amazon RDS Resources.

" }, "ApplyPendingMaintenanceAction":{ "name":"ApplyPendingMaintenanceAction", @@ -1683,7 +1684,7 @@ "errors":[ {"shape":"ResourceNotFoundFault"} ], - "documentation":"

Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.

" + "documentation":"

Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.

This API follows an eventual consistency model. This means that the result of the DescribePendingMaintenanceActions command might not be immediately visible to all subsequent RDS commands. Keep this in mind when you use DescribePendingMaintenanceActions immediately after using a previous API command such as ApplyPendingMaintenanceActions.

" }, "DescribeReservedDBInstances":{ "name":"DescribeReservedDBInstances", @@ -2904,7 +2905,7 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"InvalidExportSourceStateFault"} ], - "documentation":"

Starts an export of DB snapshot or DB cluster data to Amazon S3. The provided IAM role must have access to the S3 bucket.

You can't export snapshot data from Db2 or RDS Custom DB instances.

You can't export cluster data from Multi-AZ DB clusters.

For more information on exporting DB snapshot data, see Exporting DB snapshot data to Amazon S3 in the Amazon RDS User Guide or Exporting DB cluster snapshot data to Amazon S3 in the Amazon Aurora User Guide.

For more information on exporting DB cluster data, see Exporting DB cluster data to Amazon S3 in the Amazon Aurora User Guide.

" + "documentation":"

Starts an export of DB snapshot or DB cluster data to Amazon S3. The provided IAM role must have access to the S3 bucket.

You can't export snapshot data from Db2 or RDS Custom DB instances.

For more information on exporting DB snapshot data, see Exporting DB snapshot data to Amazon S3 in the Amazon RDS User Guide or Exporting DB cluster snapshot data to Amazon S3 in the Amazon Aurora User Guide.

For more information on exporting DB cluster data, see Exporting DB cluster data to Amazon S3 in the Amazon Aurora User Guide.

" }, "StopActivityStream":{ "name":"StopActivityStream", @@ -3598,7 +3599,7 @@ "documentation":"

The expiration date of the DB instance’s server certificate.

" } }, - "documentation":"

Returns the details of the DB instance’s server certificate.

For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

" + "documentation":"

The details of the DB instance’s server certificate.

For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

" }, "CertificateList":{ "type":"list", @@ -4115,7 +4116,7 @@ "members":{ "AvailabilityZones":{ "shape":"AvailabilityZones", - "documentation":"

A list of Availability Zones (AZs) where DB instances in the DB cluster can be created.

For information on Amazon Web Services Regions and Availability Zones, see Choosing the Regions and Availability Zones in the Amazon Aurora User Guide.

Valid for Cluster Type: Aurora DB clusters only

" + "documentation":"

A list of Availability Zones (AZs) where you specifically want to create DB instances in the DB cluster.

For information on AZs, see Availability Zones in the Amazon Aurora User Guide.

Valid for Cluster Type: Aurora DB clusters only

Constraints:

  • Can't specify more than three AZs.

" }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", @@ -4127,7 +4128,7 @@ }, "DatabaseName":{ "shape":"String", - "documentation":"

The name for your database of up to 64 alphanumeric characters. If you don't provide a name, Amazon RDS doesn't create a database in the DB cluster you are creating.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" + "documentation":"

The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional database with this name is created.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" }, "DBClusterIdentifier":{ "shape":"String", @@ -4143,11 +4144,11 @@ }, "DBSubnetGroupName":{ "shape":"String", - "documentation":"

A DB subnet group to associate with this DB cluster.

This setting is required to create a Multi-AZ DB cluster.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Constraints:

  • Must match the name of an existing DB subnet group.

  • Must not be default.

Example: mydbsubnetgroup

" + "documentation":"

A DB subnet group to associate with this DB cluster.

This setting is required to create a Multi-AZ DB cluster.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Constraints:

  • Must match the name of an existing DB subnet group.

Example: mydbsubnetgroup

" }, "Engine":{ "shape":"String", - "documentation":"

The database engine to use for this DB cluster.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: aurora-mysql | aurora-postgresql | mysql | postgres

" + "documentation":"

The database engine to use for this DB cluster.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values:

  • aurora-mysql

  • aurora-postgresql

  • mysql

  • postgres

  • neptune - For information about using Amazon Neptune, see the Amazon Neptune User Guide .

" }, "EngineVersion":{ "shape":"String", @@ -4267,7 +4268,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

Specifies whether the DB cluster is publicly accessible.

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

Valid for Cluster Type: Multi-AZ DB clusters only

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

  • If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.

  • If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

  • If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.

  • If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.

" + "documentation":"

Specifies whether the DB cluster is publicly accessible.

When the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

Valid for Cluster Type: Multi-AZ DB clusters only

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

  • If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.

  • If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

  • If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.

  • If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -4321,6 +4322,10 @@ "CACertificateIdentifier":{ "shape":"String", "documentation":"

The CA certificate identifier to use for the DB cluster's server certificate.

For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide.

Valid for Cluster Type: Multi-AZ DB clusters

" + }, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for this DB cluster.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date.

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" } }, "documentation":"

" @@ -4402,7 +4407,7 @@ "members":{ "DBName":{ "shape":"String", - "documentation":"

The meaning of this parameter differs according to the database engine you use.

Amazon Aurora MySQL

The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.

Constraints:

  • Must contain 1 to 64 alphanumeric characters.

  • Can't be a word reserved by the database engine.

Amazon Aurora PostgreSQL

The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres is created in the DB cluster.

Constraints:

  • It must contain 1 to 63 alphanumeric characters.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).

  • Can't be a word reserved by the database engine.

Amazon RDS Custom for Oracle

The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for non-CDBs and RDSCDB for CDBs.

Default: ORCL

Constraints:

  • Must contain 1 to 8 alphanumeric characters.

  • Must contain a letter.

  • Can't be a word reserved by the database engine.

Amazon RDS Custom for SQL Server

Not applicable. Must be null.

RDS for Db2

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. In some cases, we recommend that you don't add a database name. For more information, see Additional considerations in the Amazon RDS User Guide.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for MariaDB

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for MySQL

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for Oracle

The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL. You can't specify the string null, or any other reserved word, for DBName.

Default: ORCL

Constraints:

  • Can't be longer than 8 characters.

RDS for PostgreSQL

The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres is created in the DB instance.

Constraints:

  • Must contain 1 to 63 letters, numbers, or underscores.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for SQL Server

Not applicable. Must be null.

" + "documentation":"

The meaning of this parameter differs according to the database engine you use.

Amazon Aurora MySQL

The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.

Constraints:

  • Must contain 1 to 64 alphanumeric characters.

  • Can't be a word reserved by the database engine.

Amazon Aurora PostgreSQL

The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. A database named postgres is always created. If this parameter is specified, an additional database with this name is created.

Constraints:

  • It must contain 1 to 63 alphanumeric characters.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).

  • Can't be a word reserved by the database engine.

Amazon RDS Custom for Oracle

The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for non-CDBs and RDSCDB for CDBs.

Default: ORCL

Constraints:

  • Must contain 1 to 8 alphanumeric characters.

  • Must contain a letter.

  • Can't be a word reserved by the database engine.

Amazon RDS Custom for SQL Server

Not applicable. Must be null.

RDS for Db2

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. In some cases, we recommend that you don't add a database name. For more information, see Additional considerations in the Amazon RDS User Guide.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for MariaDB

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for MySQL

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for Oracle

The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL. You can't specify the string null, or any other reserved word, for DBName.

Default: ORCL

Constraints:

  • Can't be longer than 8 characters.

RDS for PostgreSQL

The name of the database to create when the DB instance is created. A database named postgres is always created. If this parameter is specified, an additional database with this name is created.

Constraints:

  • Must contain 1 to 63 letters, numbers, or underscores.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for SQL Server

Not applicable. Must be null.

" }, "DBInstanceIdentifier":{ "shape":"String", @@ -4478,7 +4483,7 @@ }, "LicenseModel":{ "shape":"String", - "documentation":"

The license model information for this DB instance.

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

Valid Values:

  • RDS for Db2 - bring-your-own-license

  • RDS for MariaDB - general-public-license

  • RDS for Microsoft SQL Server - license-included

  • RDS for MySQL - general-public-license

  • RDS for Oracle - bring-your-own-license | license-included

  • RDS for PostgreSQL - postgresql-license

" + "documentation":"

The license model information for this DB instance.

License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide.

The default for RDS for Db2 is bring-your-own-license.

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

Valid Values:

  • RDS for Db2 - bring-your-own-license | marketplace-license

  • RDS for MariaDB - general-public-license

  • RDS for Microsoft SQL Server - license-included

  • RDS for MySQL - general-public-license

  • RDS for Oracle - bring-your-own-license | license-included

  • RDS for PostgreSQL - postgresql-license

" }, "Iops":{ "shape":"IntegerOptional", @@ -4498,7 +4503,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

Specifies whether the DB instance is publicly accessible.

When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

  • If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private.

  • If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public.

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

  • If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private.

  • If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public.

" + "documentation":"

Specifies whether the DB instance is publicly accessible.

When the DB instance is publicly accessible and you connect from outside of the DB instance's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB instance, the endpoint resolves to the private IP address. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

  • If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private.

  • If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public.

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

  • If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private.

  • If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public.

" }, "Tags":{ "shape":"TagList", @@ -4647,6 +4652,10 @@ "MultiTenant":{ "shape":"BooleanOptional", "documentation":"

Specifies whether to use the multi-tenant configuration or the single-tenant configuration (default). This parameter only applies to RDS for Oracle container database (CDB) engines.

Note the following restrictions:

  • The DB engine that you specify in the request must support the multi-tenant configuration. If you attempt to enable the multi-tenant configuration on a DB engine that doesn't support it, the request fails.

  • If you specify the multi-tenant configuration when you create your DB instance, you can't later modify this DB instance to use the single-tenant configuration.

" + }, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for this DB instance.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB instance will fail if the DB major version is past its end of standard support date.

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" } }, "documentation":"

" @@ -5032,6 +5041,10 @@ "shape":"DoubleOptional", "documentation":"

The maximum capacity of the DB shard group in Aurora capacity units (ACUs).

" }, + "MinACU":{ + "shape":"DoubleOptional", + "documentation":"

The minimum capacity of the DB shard group in Aurora capacity units (ACUs).

" + }, "PubliclyAccessible":{ "shape":"BooleanOptional", "documentation":"

Specifies whether the DB shard group is publicly accessible.

When the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB shard group's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB shard group's VPC. Access to the DB shard group is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB shard group doesn't permit it.

When the DB shard group isn't publicly accessible, it is an internal DB shard group with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

  • If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB shard group is private.

  • If the default VPC in the target Region has an internet gateway attached to it, the DB shard group is public.

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

  • If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB shard group is private.

  • If the subnets are part of a VPC that has an internet gateway attached to it, the DB shard group is public.

" @@ -5113,7 +5126,7 @@ }, "SourceType":{ "shape":"String", - "documentation":"

The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned.

Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy

" + "documentation":"

The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned.

Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment

" }, "EventCategories":{ "shape":"EventCategoriesList", @@ -5156,6 +5169,10 @@ "shape":"String", "documentation":"

The engine version to use for this global database cluster.

Constraints:

  • Can't be specified if SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the engine version of the source DB cluster.

" }, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for this global database cluster.

By default, this value is set to open-source-rds-extended-support, which enrolls your global cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the global cluster will fail if the DB major version is past its end of standard support date.

This setting only applies to Aurora PostgreSQL-based global databases.

You can use this setting to enroll your global cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your global cluster past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon Aurora User Guide.

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" + }, "DeletionProtection":{ "shape":"BooleanOptional", "documentation":"

Specifies whether to enable deletion protection for the new global database cluster. The global database can't be deleted when deletion protection is enabled.

" @@ -5630,7 +5647,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

Indicates whether the DB cluster is publicly accessible.

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

For more information, see CreateDBCluster.

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "documentation":"

Indicates whether the DB cluster is publicly accessible.

When the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

For more information, see CreateDBCluster.

This setting is only for non-Aurora Multi-AZ DB clusters.

" }, "AutoMinorVersionUpgrade":{ "shape":"Boolean", @@ -5689,7 +5706,11 @@ "shape":"IntegerOptional", "documentation":"

The storage throughput for the DB cluster. The throughput is automatically set based on the IOPS that you provision, and is not configurable.

This setting is only for non-Aurora Multi-AZ DB clusters.

" }, - "CertificateDetails":{"shape":"CertificateDetails"} + "CertificateDetails":{"shape":"CertificateDetails"}, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for the DB cluster.

For more information, see CreateDBCluster.

" + } }, "documentation":"

Contains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster.

For an Amazon Aurora DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, PromoteReadReplicaDBCluster, RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot, RestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster.

For a Multi-AZ DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, RebootDBCluster, RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", "wrapper":true @@ -6778,7 +6799,7 @@ }, "LicenseModel":{ "shape":"String", - "documentation":"

The license model information for this DB instance. This setting doesn't apply to RDS Custom DB instances.

" + "documentation":"

The license model information for this DB instance. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

" }, "Iops":{ "shape":"IntegerOptional", @@ -6802,7 +6823,7 @@ }, "PubliclyAccessible":{ "shape":"Boolean", - "documentation":"

Indicates whether the DB instance is publicly accessible.

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

For more information, see CreateDBInstance.

" + "documentation":"

Indicates whether the DB instance is publicly accessible.

When the DB instance is publicly accessible and you connect from outside of the DB instance's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB instance, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

For more information, see CreateDBInstance.

" }, "StatusInfos":{ "shape":"DBInstanceStatusInfoList", @@ -7004,6 +7025,10 @@ "MultiTenant":{ "shape":"BooleanOptional", "documentation":"

Specifies whether the DB instance is in the multi-tenant configuration (TRUE) or the single-tenant configuration (FALSE).

" + }, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for the DB instance.

For more information, see CreateDBInstance.

" } }, "documentation":"

Contains the details of an Amazon RDS DB instance.

This data type is used as a response element in the operations CreateDBInstance, CreateDBInstanceReadReplica, DeleteDBInstance, DescribeDBInstances, ModifyDBInstance, PromoteReadReplica, RebootDBInstance, RestoreDBInstanceFromDBSnapshot, RestoreDBInstanceFromS3, RestoreDBInstanceToPointInTime, StartDBInstance, and StopDBInstance.

", @@ -8086,6 +8111,10 @@ "shape":"DoubleOptional", "documentation":"

The maximum capacity of the DB shard group in Aurora capacity units (ACUs).

" }, + "MinACU":{ + "shape":"DoubleOptional", + "documentation":"

The minimum capacity of the DB shard group in Aurora capacity units (ACUs).

" + }, "ComputeRedundancy":{ "shape":"IntegerOptional", "documentation":"

Specifies whether to create standby instances for the DB shard group. Valid values are the following:

  • 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview.

  • 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard.

  • 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard.

" @@ -8684,11 +8713,11 @@ }, "SkipFinalSnapshot":{ "shape":"Boolean", - "documentation":"

Specifies whether to skip the creation of a final DB cluster snapshot before the DB cluster is deleted. If skip is specified, no DB cluster snapshot is created. If skip isn't specified, a DB cluster snapshot is created before the DB cluster is deleted. By default, skip isn't specified, and the DB cluster snapshot is created. By default, this parameter is disabled.

You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is disabled.

" + "documentation":"

Specifies whether to skip the creation of a final DB cluster snapshot before RDS deletes the DB cluster. If you set this value to true, RDS doesn't create a final DB cluster snapshot. If you set this value to false or don't specify it, RDS creates a DB cluster snapshot before it deletes the DB cluster. By default, this parameter is disabled, so RDS creates a final DB cluster snapshot.

If SkipFinalSnapshot is disabled, you must specify a value for the FinalDBSnapshotIdentifier parameter.

" }, "FinalDBSnapshotIdentifier":{ "shape":"String", - "documentation":"

The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is disabled.

Specifying this parameter and also skipping the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter results in an error.

Constraints:

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter

  • Can't end with a hyphen or contain two consecutive hyphens

" + "documentation":"

The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is disabled.

If you specify this parameter and also skip the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter, the request results in an error.

Constraints:

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter

  • Can't end with a hyphen or contain two consecutive hyphens

" }, "DeleteAutomatedBackups":{ "shape":"BooleanOptional", @@ -11051,6 +11080,10 @@ "shape":"String", "documentation":"

Indicates the database engine version.

" }, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for the global cluster.

For more information, see CreateGlobalCluster.

" + }, "DatabaseName":{ "shape":"String", "documentation":"

The default database name within the new global database cluster.

" @@ -12538,7 +12571,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

Specifies whether the DB instance is publicly accessible.

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled for it to be publicly accessible.

Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter.

" + "documentation":"

Specifies whether the DB instance is publicly accessible.

When the DB instance is publicly accessible and you connect from outside of the DB instance's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB instance, the endpoint resolves to the private IP address. Access to the DB instance is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled for it to be publicly accessible.

Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter.

" }, "MonitoringRoleArn":{ "shape":"String", @@ -12816,6 +12849,10 @@ "MaxACU":{ "shape":"DoubleOptional", "documentation":"

The maximum capacity of the DB shard group in Aurora capacity units (ACUs).

" + }, + "MinACU":{ + "shape":"DoubleOptional", + "documentation":"

The minimum capacity of the DB shard group in Aurora capacity units (ACUs).

" } } }, @@ -12917,7 +12954,7 @@ }, "SourceType":{ "shape":"String", - "documentation":"

The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned.

Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy

" + "documentation":"

The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned.

Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment

" }, "EventCategories":{ "shape":"EventCategoriesList", @@ -13121,18 +13158,18 @@ }, "DBSecurityGroupMemberships":{ "shape":"DBSecurityGroupNameList", - "documentation":"

A list of DBSecurityGroupMembership name strings used for this option.

" + "documentation":"

A list of DB security groups used for this option.

" }, "VpcSecurityGroupMemberships":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

A list of VpcSecurityGroupMembership name strings used for this option.

" + "documentation":"

A list of VPC security group names used for this option.

" }, "OptionSettings":{ "shape":"OptionSettingsList", "documentation":"

The option settings to include in an option group.

" } }, - "documentation":"

A list of all available options

" + "documentation":"

A list of all available options for an option group.

" }, "OptionConfigurationList":{ "type":"list", @@ -13768,7 +13805,7 @@ "members":{ "Action":{ "shape":"String", - "documentation":"

The type of pending maintenance action that is available for the resource. Valid actions are system-update, db-upgrade, hardware-maintenance, and ca-certificate-rotation.

" + "documentation":"

The type of pending maintenance action that is available for the resource.

For more information about maintenance actions, see Maintaining a DB instance.

Valid Values: system-update | db-upgrade | hardware-maintenance | ca-certificate-rotation

" }, "AutoAppliedAfterDate":{ "shape":"TStamp", @@ -13987,10 +14024,10 @@ }, "Value":{ "shape":"String", - "documentation":"

The value of a processor feature name.

" + "documentation":"

The value of a processor feature.

" } }, - "documentation":"

Contains the processor features of a DB instance class.

To specify the number of CPU cores, use the coreCount feature name for the Name parameter. To specify the number of threads per core, use the threadsPerCore feature name for the Name parameter.

You can set the processor features of the DB instance class for a DB instance when you call one of the following actions:

  • CreateDBInstance

  • ModifyDBInstance

  • RestoreDBInstanceFromDBSnapshot

  • RestoreDBInstanceFromS3

  • RestoreDBInstanceToPointInTime

You can view the valid processor values for a particular instance class by calling the DescribeOrderableDBInstanceOptions action and specifying the instance class for the DBInstanceClass parameter.

In addition, you can use the following actions for DB instance class processor information:

  • DescribeDBInstances

  • DescribeDBSnapshots

  • DescribeValidDBInstanceModifications

If you call DescribeDBInstances, ProcessorFeature returns non-null values only if the following conditions are met:

  • You are accessing an Oracle DB instance.

  • Your Oracle DB instance class supports configuring the number of CPU cores and threads per core.

  • The current number CPU cores and threads is set to a non-default value.

For more information, see Configuring the Processor of the DB Instance Class in the Amazon RDS User Guide.

" + "documentation":"

Contains the processor features of a DB instance class.

To specify the number of CPU cores, use the coreCount feature name for the Name parameter. To specify the number of threads per core, use the threadsPerCore feature name for the Name parameter.

You can set the processor features of the DB instance class for a DB instance when you call one of the following actions:

  • CreateDBInstance

  • ModifyDBInstance

  • RestoreDBInstanceFromDBSnapshot

  • RestoreDBInstanceFromS3

  • RestoreDBInstanceToPointInTime

You can view the valid processor values for a particular instance class by calling the DescribeOrderableDBInstanceOptions action and specifying the instance class for the DBInstanceClass parameter.

In addition, you can use the following actions for DB instance class processor information:

  • DescribeDBInstances

  • DescribeDBSnapshots

  • DescribeValidDBInstanceModifications

If you call DescribeDBInstances, ProcessorFeature returns non-null values only if the following conditions are met:

  • You are accessing an Oracle DB instance.

  • Your Oracle DB instance class supports configuring the number of CPU cores and threads per core.

  • The current number CPU cores and threads is set to a non-default value.

For more information, see Configuring the processor for a DB instance class in RDS for Oracle in the Amazon RDS User Guide.

" }, "ProcessorFeatureList":{ "type":"list", @@ -14876,6 +14913,10 @@ "StorageType":{ "shape":"String", "documentation":"

Specifies the storage type to be associated with the DB cluster.

Valid Values: aurora, aurora-iopt1

Default: aurora

Valid for: Aurora DB clusters only

" + }, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for this DB cluster.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" } } }, @@ -15005,6 +15046,10 @@ "RdsCustomClusterConfiguration":{ "shape":"RdsCustomClusterConfiguration", "documentation":"

Reserved for future use.

" + }, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for this DB cluster.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" } }, "documentation":"

" @@ -15128,6 +15173,10 @@ "RdsCustomClusterConfiguration":{ "shape":"RdsCustomClusterConfiguration", "documentation":"

Reserved for future use.

" + }, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for this DB cluster.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" } }, "documentation":"

" @@ -15180,7 +15229,7 @@ }, "LicenseModel":{ "shape":"String", - "documentation":"

License model information for the restored DB instance.

This setting doesn't apply to RDS Custom.

Default: Same as source.

Valid Values: license-included | bring-your-own-license | general-public-license

" + "documentation":"

License model information for the restored DB instance.

License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide.

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

Valid Values:

  • RDS for Db2 - bring-your-own-license | marketplace-license

  • RDS for MariaDB - general-public-license

  • RDS for Microsoft SQL Server - license-included

  • RDS for MySQL - general-public-license

  • RDS for Oracle - bring-your-own-license | license-included

  • RDS for PostgreSQL - postgresql-license

Default: Same as the source.

" }, "DBName":{ "shape":"String", @@ -15302,6 +15351,10 @@ "CACertificateIdentifier":{ "shape":"String", "documentation":"

The CA certificate identifier to use for the DB instance's server certificate.

This setting doesn't apply to RDS Custom DB instances.

For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

" + }, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for this DB instance.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" } }, "documentation":"

" @@ -15466,7 +15519,7 @@ }, "S3IngestionRoleArn":{ "shape":"String", - "documentation":"

An Amazon Web Services Identity and Access Management (IAM) role to allow Amazon RDS to access your Amazon S3 bucket.

" + "documentation":"

An Amazon Web Services Identity and Access Management (IAM) role with a trust policy and a permissions policy that allows Amazon RDS to access your Amazon S3 bucket. For information about this role, see Creating an IAM role manually in the Amazon RDS User Guide.

" }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", @@ -15523,6 +15576,10 @@ "CACertificateIdentifier":{ "shape":"String", "documentation":"

The CA certificate identifier to use for the DB instance's server certificate.

This setting doesn't apply to RDS Custom DB instances.

For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

" + }, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for this DB instance.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" } } }, @@ -15582,7 +15639,7 @@ }, "LicenseModel":{ "shape":"String", - "documentation":"

The license model information for the restored DB instance.

This setting doesn't apply to RDS Custom.

Valid Values: license-included | bring-your-own-license | general-public-license

Default: Same as the source.

" + "documentation":"

The license model information for the restored DB instance.

License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide.

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

Valid Values:

  • RDS for Db2 - bring-your-own-license | marketplace-license

  • RDS for MariaDB - general-public-license

  • RDS for Microsoft SQL Server - license-included

  • RDS for MySQL - general-public-license

  • RDS for Oracle - bring-your-own-license | license-included

  • RDS for PostgreSQL - postgresql-license

Default: Same as the source.

" }, "DBName":{ "shape":"String", @@ -15712,6 +15769,10 @@ "CACertificateIdentifier":{ "shape":"String", "documentation":"

The CA certificate identifier to use for the DB instance's server certificate.

This setting doesn't apply to RDS Custom DB instances.

For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

" + }, + "EngineLifecycleSupport":{ + "shape":"String", + "documentation":"

The life cycle type for this DB instance.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" } }, "documentation":"

" @@ -16520,7 +16581,7 @@ "documentation":"

A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with aws: or rds:. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").

" } }, - "documentation":"

Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

" + "documentation":"

Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

For more information, see Tagging Amazon RDS resources in the Amazon RDS User Guide or Tagging Amazon Aurora and Amazon RDS resources in the Amazon Aurora User Guide.

" }, "TagList":{ "type":"list", @@ -16528,7 +16589,7 @@ "shape":"Tag", "locationName":"Tag" }, - "documentation":"

A list of tags. For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

" + "documentation":"

A list of tags.

For more information, see Tagging Amazon RDS resources in the Amazon RDS User Guide or Tagging Amazon Aurora and Amazon RDS resources in the Amazon Aurora User Guide.

" }, "TagListMessage":{ "type":"structure", @@ -16789,7 +16850,7 @@ }, "AutoUpgrade":{ "shape":"Boolean", - "documentation":"

Indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true.

" + "documentation":"

Indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true.

This parameter is dynamic, and is set by RDS.

" }, "IsMajorVersionUpgrade":{ "shape":"Boolean", diff --git a/botocore/data/redshift-serverless/2021-04-21/service-2.json b/botocore/data/redshift-serverless/2021-04-21/service-2.json index 170fcd5185..26a91f59bb 100644 --- a/botocore/data/redshift-serverless/2021-04-21/service-2.json +++ b/botocore/data/redshift-serverless/2021-04-21/service-2.json @@ -172,7 +172,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"}, {"shape":"ValidationException"}, - {"shape":"TooManyTagsException"} + {"shape":"TooManyTagsException"}, + {"shape":"Ipv6CidrBlockNotFoundException"} ], "documentation":"

Creates an workgroup in Amazon Redshift Serverless.

", "idempotent":true @@ -916,7 +917,8 @@ {"shape":"InsufficientCapacityException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"Ipv6CidrBlockNotFoundException"} ], "documentation":"

Updates a workgroup with the specified configuration settings. You can't update multiple parameters in one request. For example, you can update baseCapacity or port in a single request, but you can't update both in the same request.

" } @@ -1200,7 +1202,7 @@ }, "roleArn":{ "shape":"IamRoleArn", - "documentation":"

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster Management Guide

" + "documentation":"

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management Guide

" }, "schedule":{ "shape":"Schedule", @@ -1383,6 +1385,10 @@ "shape":"Boolean", "documentation":"

The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC instead of over the internet.

" }, + "ipAddressType":{ + "shape":"IpAddressType", + "documentation":"

The IP address type that the workgroup supports. Possible values are ipv4 and dualstack.

" + }, "maxCapacity":{ "shape":"Integer", "documentation":"

The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.

" @@ -1999,6 +2005,19 @@ "documentation":"

The provided pagination token is invalid.

", "exception":true }, + "IpAddressType":{ + "type":"string", + "pattern":"^(ipv4|dualstack)$" + }, + "Ipv6CidrBlockNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

There are no subnets in your VPC with associated IPv6 CIDR blocks. To use dual-stack mode, associate an IPv6 CIDR block with each subnet in your VPC.

", + "exception":true + }, "KmsKeyId":{"type":"string"}, "ListCustomDomainAssociationsRequest":{ "type":"structure", @@ -2538,6 +2557,10 @@ "shape":"String", "documentation":"

The availability Zone.

" }, + "ipv6Address":{ + "shape":"String", + "documentation":"

The IPv6 address of the network interface within the subnet.

" + }, "networkInterfaceId":{ "shape":"String", "documentation":"

The unique identifier of the network interface.

" @@ -2925,7 +2948,7 @@ }, "roleArn":{ "shape":"IamRoleArn", - "documentation":"

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster Management Guide

" + "documentation":"

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management Guide

" }, "schedule":{ "shape":"Schedule", @@ -3461,7 +3484,7 @@ }, "roleArn":{ "shape":"IamRoleArn", - "documentation":"

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster Management Guide

" + "documentation":"

The ARN of the IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift Serverless API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management Guide

" }, "schedule":{ "shape":"Schedule", @@ -3581,6 +3604,10 @@ "shape":"Boolean", "documentation":"

The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC.

" }, + "ipAddressType":{ + "shape":"IpAddressType", + "documentation":"

The IP address type that the workgroup supports. Possible values are ipv4 and dualstack.

" + }, "maxCapacity":{ "shape":"Integer", "documentation":"

The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.

" @@ -3777,6 +3804,10 @@ "shape":"Boolean", "documentation":"

The value that specifies whether to enable enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC.

" }, + "ipAddressType":{ + "shape":"IpAddressType", + "documentation":"

The IP address type that the workgroup supports. Possible values are ipv4 and dualstack.

" + }, "maxCapacity":{ "shape":"Integer", "documentation":"

The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.

" diff --git a/botocore/data/redshift/2012-12-01/service-2.json b/botocore/data/redshift/2012-12-01/service-2.json index 8f13bcdb4f..3848efdce7 100644 --- a/botocore/data/redshift/2012-12-01/service-2.json +++ b/botocore/data/redshift/2012-12-01/service-2.json @@ -4,11 +4,13 @@ "apiVersion":"2012-12-01", "endpointPrefix":"redshift", "protocol":"query", + "protocols":["query"], "serviceFullName":"Amazon Redshift", "serviceId":"Redshift", "signatureVersion":"v4", "uid":"redshift-2012-12-01", - "xmlNamespace":"http://redshift.amazonaws.com/doc/2012-12-01/" + "xmlNamespace":"http://redshift.amazonaws.com/doc/2012-12-01/", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptReservedNodeExchange":{ @@ -2268,7 +2270,7 @@ {"shape":"DependentServiceUnavailableFault"}, {"shape":"ReservedNodeAlreadyExistsFault"} ], - "documentation":"

Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method.

Elastic resize operations have the following restrictions:

  • You can only resize clusters of the following types:

    • dc1.large (if your cluster is in a VPC)

    • dc1.8xlarge (if your cluster is in a VPC)

    • dc2.large

    • dc2.8xlarge

    • ds2.xlarge

    • ds2.8xlarge

    • ra3.xlplus

    • ra3.4xlarge

    • ra3.16xlarge

  • The type of nodes that you add must match the node type for the cluster.

" + "documentation":"

Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method.

Elastic resize operations have the following restrictions:

  • You can only resize clusters of the following types:

    • dc2.large

    • dc2.8xlarge

    • ra3.xlplus

    • ra3.4xlarge

    • ra3.16xlarge

  • The type of nodes that you add must match the node type for the cluster.

" }, "RestoreFromClusterSnapshot":{ "name":"RestoreFromClusterSnapshot", @@ -4083,7 +4085,7 @@ }, "NodeType":{ "shape":"String", - "documentation":"

The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge

" + "documentation":"

The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge

" }, "MasterUsername":{ "shape":"String", @@ -4127,7 +4129,7 @@ }, "Port":{ "shape":"IntegerOptional", - "documentation":"

The port number on which the cluster accepts incoming connections.

The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.

Default: 5439

Valid Values:

  • For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.)

  • For clusters with ds2 or dc2 nodes - Select a port within the range 1150-65535.

" + "documentation":"

The port number on which the cluster accepts incoming connections.

The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.

Default: 5439

Valid Values:

  • For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.)

  • For clusters with dc2 nodes - Select a port within the range 1150-65535.

" }, "ClusterVersion":{ "shape":"String", @@ -8122,7 +8124,7 @@ }, "NodeType":{ "shape":"String", - "documentation":"

The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter.

For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge

" + "documentation":"

The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter.

For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge

" }, "NumberOfNodes":{ "shape":"IntegerOptional", @@ -8210,7 +8212,7 @@ }, "Port":{ "shape":"IntegerOptional", - "documentation":"

The option to change the port of an Amazon Redshift cluster.

Valid Values:

  • For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.)

  • For clusters with ds2 or dc2 nodes - Select a port within the range 1150-65535.

" + "documentation":"

The option to change the port of an Amazon Redshift cluster.

Valid Values:

  • For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.)

  • For clusters with dc2 nodes - Select a port within the range 1150-65535.

" }, "ManageMasterPassword":{ "shape":"BooleanOptional", @@ -8598,7 +8600,7 @@ "members":{ "NodeType":{ "shape":"String", - "documentation":"

The node type, such as, \"ds2.8xlarge\".

" + "documentation":"

The node type, such as, \"ra3.4xlarge\".

" }, "NumberOfNodes":{ "shape":"Integer", @@ -9458,7 +9460,7 @@ }, "SourceReservedNodeType":{ "shape":"String", - "documentation":"

The source reserved-node type, for example ds2.xlarge.

" + "documentation":"

The source reserved-node type, for example ra3.4xlarge.

" }, "SourceReservedNodeCount":{ "shape":"Integer", @@ -9837,7 +9839,7 @@ }, "Port":{ "shape":"IntegerOptional", - "documentation":"

The port number on which the cluster accepts connections.

Default: The same port as the original cluster.

Valid values: For clusters with ds2 or dc2 nodes, must be within the range 1150-65535. For clusters with ra3 nodes, must be within the ranges 5431-5455 or 8191-8215.

" + "documentation":"

The port number on which the cluster accepts connections.

Default: The same port as the original cluster.

Valid values: For clusters with DC2 nodes, must be within the range 1150-65535. For clusters with ra3 nodes, must be within the ranges 5431-5455 or 8191-8215.

" }, "AvailabilityZone":{ "shape":"String", @@ -9901,7 +9903,7 @@ }, "NodeType":{ "shape":"String", - "documentation":"

The node type that the restored cluster will be provisioned with.

Default: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds1.xlarge into ds2.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type or dc2.large instance type. You can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlarge cluster, then resize to a dc2.8large cluster. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

The node type that the restored cluster will be provisioned with.

If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc2.large node type into another dc2 type. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide.

" }, "EnhancedVpcRouting":{ "shape":"BooleanOptional", @@ -9985,23 +9987,23 @@ }, "CurrentRestoreRateInMegaBytesPerSecond":{ "shape":"Double", - "documentation":"

The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.

" + "documentation":"

The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 node types.

" }, "SnapshotSizeInMegaBytes":{ "shape":"Long", - "documentation":"

The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.

" + "documentation":"

The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 node types.

" }, "ProgressInMegaBytes":{ "shape":"Long", - "documentation":"

The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.

" + "documentation":"

The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 node types.

" }, "ElapsedTimeInSeconds":{ "shape":"Long", - "documentation":"

The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.

" + "documentation":"

The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 node types.

" }, "EstimatedTimeToCompletionInSeconds":{ "shape":"Long", - "documentation":"

The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.

" + "documentation":"

The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 node types.

" } }, "documentation":"

Describes the status of a cluster restore action. Returns null if the cluster was not created by restoring a snapshot.

" @@ -10271,7 +10273,7 @@ }, "TargetAction":{ "shape":"ScheduledActionType", - "documentation":"

A JSON format string of the Amazon Redshift API operation with input parameters.

\"{\\\"ResizeCluster\\\":{\\\"NodeType\\\":\\\"ds2.8xlarge\\\",\\\"ClusterIdentifier\\\":\\\"my-test-cluster\\\",\\\"NumberOfNodes\\\":3}}\".

" + "documentation":"

A JSON format string of the Amazon Redshift API operation with input parameters.

\"{\\\"ResizeCluster\\\":{\\\"NodeType\\\":\\\"ra3.4xlarge\\\",\\\"ClusterIdentifier\\\":\\\"my-test-cluster\\\",\\\"NumberOfNodes\\\":3}}\".

" }, "Schedule":{ "shape":"String", diff --git a/botocore/data/rekognition/2016-06-27/service-2.json b/botocore/data/rekognition/2016-06-27/service-2.json index 85a7deee3a..45455d20ec 100644 --- a/botocore/data/rekognition/2016-06-27/service-2.json +++ b/botocore/data/rekognition/2016-06-27/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"rekognition", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon Rekognition", "serviceId":"Rekognition", "signatureVersion":"v4", "targetPrefix":"RekognitionService", - "uid":"rekognition-2016-06-27" + "uid":"rekognition-2016-06-27", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateFaces":{ @@ -750,7 +752,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the label detection results of a Amazon Rekognition Video analysis started by StartLabelDetection.

The label detection operation is started by a call to StartLabelDetection which returns a job identifier (JobId). When the label detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartlabelDetection.

To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLabelDetection and pass the job identifier (JobId) from the initial call to StartLabelDetection.

GetLabelDetection returns an array of detected labels (Labels) sorted by the time the labels were detected. You can also sort by the label name by specifying NAME for the SortBy input parameter. If there is no NAME specified, the default sort is by timestamp.

You can select how results are aggregated by using the AggregateBy input parameter. The default aggregation method is TIMESTAMPS. You can also aggregate by SEGMENTS, which aggregates all instances of labels detected in a given segment.

The returned Labels array may include the following attributes:

  • Name - The name of the detected label.

  • Confidence - The level of confidence in the label assigned to a detected object.

  • Parents - The ancestor labels for a detected label. GetLabelDetection returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response.

  • Aliases - Possible Aliases for the label.

  • Categories - The label categories that the detected label belongs to.

  • BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box.

  • Timestamp - Time, in milliseconds from the start of the video, that the label was detected. For aggregation by SEGMENTS, the StartTimestampMillis, EndTimestampMillis, and DurationMillis structures are what define a segment. Although the “Timestamp” structure is still returned with each label, its value is set to be the same as StartTimestampMillis.

Timestamp and Bounding box information are returned for detected Instances, only if aggregation is done by TIMESTAMPS. If aggregating by SEGMENTS, information about detected instances isn’t returned.

The version of the label model used for the detection is also returned.

Note DominantColors isn't returned for Instances, although it is shown as part of the response in the sample seen below.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request parameter with the token value returned from the previous call to GetLabelDetection.

" + "documentation":"

Gets the label detection results of a Amazon Rekognition Video analysis started by StartLabelDetection.

The label detection operation is started by a call to StartLabelDetection which returns a job identifier (JobId). When the label detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartlabelDetection.

To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLabelDetection and pass the job identifier (JobId) from the initial call to StartLabelDetection.

GetLabelDetection returns an array of detected labels (Labels) sorted by the time the labels were detected. You can also sort by the label name by specifying NAME for the SortBy input parameter. If there is no NAME specified, the default sort is by timestamp.

You can select how results are aggregated by using the AggregateBy input parameter. The default aggregation method is TIMESTAMPS. You can also aggregate by SEGMENTS, which aggregates all instances of labels detected in a given segment.

The returned Labels array may include the following attributes:

  • Name - The name of the detected label.

  • Confidence - The level of confidence in the label assigned to a detected object.

  • Parents - The ancestor labels for a detected label. GetLabelDetection returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response.

  • Aliases - Possible Aliases for the label.

  • Categories - The label categories that the detected label belongs to.

  • BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box.

  • Timestamp - Time, in milliseconds from the start of the video, that the label was detected. For aggregation by SEGMENTS, the StartTimestampMillis, EndTimestampMillis, and DurationMillis structures are what define a segment. Although the “Timestamp” structure is still returned with each label, its value is set to be the same as StartTimestampMillis.

Timestamp and Bounding box information are returned for detected Instances, only if aggregation is done by TIMESTAMPS. If aggregating by SEGMENTS, information about detected instances isn’t returned.

The version of the label model used for the detection is also returned.

Note DominantColors isn't returned for Instances, although it is shown as part of the response in the sample seen below.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request parameter with the token value returned from the previous call to GetLabelDetection.

If you are retrieving results while using the Amazon Simple Notification Service, note that you will receive an \"ERROR\" notification if the job encounters an issue.

" }, "GetMediaAnalysisJob":{ "name":"GetMediaAnalysisJob", @@ -2204,6 +2206,10 @@ "ProjectArn":{ "shape":"ProjectArn", "documentation":"

The ARN of the Amazon Rekognition Custom Labels project to which you want to asssign the dataset.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

A set of tags (key-value pairs) that you want to attach to the dataset.

" } } }, @@ -2272,6 +2278,10 @@ "AutoUpdate":{ "shape":"ProjectAutoUpdate", "documentation":"

Specifies whether automatic retraining should be attempted for the versions of the project. Automatic retraining is done as a best effort. Required argument for Content Moderation. Applicable only to adapters.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

A set of tags (key-value pairs) that you want to attach to the project.

" } } }, @@ -3283,7 +3293,7 @@ "members":{ "ModerationLabels":{ "shape":"ModerationLabels", - "documentation":"

Array of detected Moderation labels and the time, in milliseconds from the start of the video, they were detected.

" + "documentation":"

Array of detected Moderation labels. For video operations, this includes the time, in milliseconds from the start of the video, they were detected.

" }, "ModerationModelVersion":{ "shape":"String", diff --git a/botocore/data/resiliencehub/2020-04-30/paginators-1.json b/botocore/data/resiliencehub/2020-04-30/paginators-1.json index 45c746174d..f98356da47 100644 --- a/botocore/data/resiliencehub/2020-04-30/paginators-1.json +++ b/botocore/data/resiliencehub/2020-04-30/paginators-1.json @@ -5,6 +5,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "resourceDrifts" + }, + "ListResourceGroupingRecommendations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "groupingRecommendations" } } } diff --git a/botocore/data/resiliencehub/2020-04-30/service-2.json b/botocore/data/resiliencehub/2020-04-30/service-2.json index 9c465d0eec..4bb1d5776a 100644 --- a/botocore/data/resiliencehub/2020-04-30/service-2.json +++ b/botocore/data/resiliencehub/2020-04-30/service-2.json @@ -10,9 +10,28 @@ "serviceId":"resiliencehub", "signatureVersion":"v4", "signingName":"resiliencehub", - "uid":"resiliencehub-2020-04-30" + "uid":"resiliencehub-2020-04-30", + "auth":["aws.auth#sigv4"] }, "operations":{ + "AcceptResourceGroupingRecommendations":{ + "name":"AcceptResourceGroupingRecommendations", + "http":{ + "method":"POST", + "requestUri":"/accept-resource-grouping-recommendations", + "responseCode":200 + }, + "input":{"shape":"AcceptResourceGroupingRecommendationsRequest"}, + "output":{"shape":"AcceptResourceGroupingRecommendationsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Accepts the resource grouping recommendations suggested by Resilience Hub for your application.

" + }, "AddDraftAppVersionResourceMappings":{ "name":"AddDraftAppVersionResourceMappings", "http":{ @@ -26,11 +45,12 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"}, {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Adds the source of resource-maps to the draft version of an application. During assessment, Resilience Hub will use these resource-maps to resolve the latest physical ID for each resource in the application template. For more information about different types of resources suported by Resilience Hub and how to add them in your application, see Step 2: How is your application managed? in the Resilience Hub User Guide.

" + "documentation":"

Adds the source of resource-maps to the draft version of an application. During assessment, Resilience Hub will use these resource-maps to resolve the latest physical ID for each resource in the application template. For more information about different types of resources supported by Resilience Hub and how to add them in your application, see Step 2: How is your application managed? in the Resilience Hub User Guide.

" }, "BatchUpdateRecommendationStatus":{ "name":"BatchUpdateRecommendationStatus", @@ -370,7 +390,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Describes a resource of the Resilience Hub application.

This API accepts only one of the following parameters to descibe the resource:

  • resourceName

  • logicalResourceId

  • physicalResourceId (Along with physicalResourceId, you can also provide awsAccountId, and awsRegion)

" + "documentation":"

Describes a resource of the Resilience Hub application.

This API accepts only one of the following parameters to describe the resource:

  • resourceName

  • logicalResourceId

  • physicalResourceId (Along with physicalResourceId, you can also provide awsAccountId, and awsRegion)

" }, "DescribeAppVersionResourcesResolutionStatus":{ "name":"DescribeAppVersionResourcesResolutionStatus", @@ -444,6 +464,24 @@ ], "documentation":"

Describes a specified resiliency policy for an Resilience Hub application. The returned policy object includes creation time, data location constraints, the Amazon Resource Name (ARN) for the policy, tags, tier, and more.

" }, + "DescribeResourceGroupingRecommendationTask":{ + "name":"DescribeResourceGroupingRecommendationTask", + "http":{ + "method":"POST", + "requestUri":"/describe-resource-grouping-recommendation-task", + "responseCode":200 + }, + "input":{"shape":"DescribeResourceGroupingRecommendationTaskRequest"}, + "output":{"shape":"DescribeResourceGroupingRecommendationTaskResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Describes the resource grouping recommendation tasks run by Resilience Hub for your application.

" + }, "ImportResourcesToDraftAppVersion":{ "name":"ImportResourcesToDraftAppVersion", "http":{ @@ -713,6 +751,24 @@ ], "documentation":"

Lists the resiliency policies for the Resilience Hub applications.

" }, + "ListResourceGroupingRecommendations":{ + "name":"ListResourceGroupingRecommendations", + "http":{ + "method":"GET", + "requestUri":"/list-resource-grouping-recommendations", + "responseCode":200 + }, + "input":{"shape":"ListResourceGroupingRecommendationsRequest"}, + "output":{"shape":"ListResourceGroupingRecommendationsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists the resource grouping recommendations suggested by Resilience Hub for your application.

" + }, "ListSopRecommendations":{ "name":"ListSopRecommendations", "http":{ @@ -844,6 +900,24 @@ ], "documentation":"

Adds or updates the app template for an Resilience Hub application draft version.

" }, + "RejectResourceGroupingRecommendations":{ + "name":"RejectResourceGroupingRecommendations", + "http":{ + "method":"POST", + "requestUri":"/reject-resource-grouping-recommendations", + "responseCode":200 + }, + "input":{"shape":"RejectResourceGroupingRecommendationsRequest"}, + "output":{"shape":"RejectResourceGroupingRecommendationsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Rejects resource grouping recommendations.

" + }, "RemoveDraftAppVersionResourceMappings":{ "name":"RemoveDraftAppVersionResourceMappings", "http":{ @@ -902,6 +976,25 @@ ], "documentation":"

Creates a new application assessment for an application.

" }, + "StartResourceGroupingRecommendationTask":{ + "name":"StartResourceGroupingRecommendationTask", + "http":{ + "method":"POST", + "requestUri":"/start-resource-grouping-recommendation-task", + "responseCode":200 + }, + "input":{"shape":"StartResourceGroupingRecommendationTaskRequest"}, + "output":{"shape":"StartResourceGroupingRecommendationTaskResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Starts grouping recommendation task.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -1036,6 +1129,57 @@ } }, "shapes":{ + "AcceptGroupingRecommendationEntries":{ + "type":"list", + "member":{"shape":"AcceptGroupingRecommendationEntry"}, + "max":30, + "min":1 + }, + "AcceptGroupingRecommendationEntry":{ + "type":"structure", + "required":["groupingRecommendationId"], + "members":{ + "groupingRecommendationId":{ + "shape":"String255", + "documentation":"

Indicates the identifier of the grouping recommendation.

" + } + }, + "documentation":"

Indicates the grouping recommendation you have accepted to include in your application.

" + }, + "AcceptResourceGroupingRecommendationsRequest":{ + "type":"structure", + "required":[ + "appArn", + "entries" + ], + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide.

" + }, + "entries":{ + "shape":"AcceptGroupingRecommendationEntries", + "documentation":"

Indicates the list of resource grouping recommendations you want to include in your application.

" + } + } + }, + "AcceptResourceGroupingRecommendationsResponse":{ + "type":"structure", + "required":[ + "appArn", + "failedEntries" + ], + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide.

" + }, + "failedEntries":{ + "shape":"FailedGroupingRecommendationEntries", + "documentation":"

Indicates the list of resource grouping recommendations that could not be included in your application.

" + } + } + }, "AccessDeniedException":{ "type":"structure", "members":{ @@ -1327,6 +1471,10 @@ "shape":"TimeStamp", "documentation":"

Starting time for the action.

" }, + "summary":{ + "shape":"AssessmentSummary", + "documentation":"

Indicates a concise summary that provides an overview of the Resilience Hub assessment.

" + }, "tags":{ "shape":"TagMap", "documentation":"

Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

" @@ -1374,7 +1522,7 @@ }, "complianceStatus":{ "shape":"ComplianceStatus", - "documentation":"

TCurrent status of compliance for the resiliency policy.

" + "documentation":"

Current status of compliance for the resiliency policy.

" }, "cost":{ "shape":"Cost", @@ -1421,7 +1569,9 @@ "PolicyBreached", "PolicyMet", "NotAssessed", - "ChangesDetected" + "ChangesDetected", + "NotApplicable", + "MissingPolicy" ] }, "AppComponent":{ @@ -1436,11 +1586,11 @@ "documentation":"

Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

Key: \"failover-regions\"

Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

" }, "id":{ - "shape":"String255", + "shape":"EntityName255", "documentation":"

Identifier of the Application Component.

" }, "name":{ - "shape":"String255", + "shape":"EntityName255", "documentation":"

Name of the Application Component.

" }, "type":{ @@ -1654,6 +1804,28 @@ "System" ] }, + "AssessmentRiskRecommendation":{ + "type":"structure", + "members":{ + "appComponents":{ + "shape":"AppComponentNameList", + "documentation":"

Indicates the Application Components (AppComponents) that were assessed as part of the assessnent and are associated with the identified risk and recommendation.

This property is available only in the US East (N. Virginia) Region.

" + }, + "recommendation":{ + "shape":"String255", + "documentation":"

Indicates the recommendation provided by the Resilience Hub to address the identified risks in the application.

This property is available only in the US East (N. Virginia) Region.

" + }, + "risk":{ + "shape":"String255", + "documentation":"

Indicates the description of the potential risk identified in the application as part of the Resilience Hub assessment.

This property is available only in the US East (N. Virginia) Region.

" + } + }, + "documentation":"

Indicates a specific risk identified in the Resilience Hub assessment and the corresponding recommendation provided to address that risk.

The assessment summary generated by large language models (LLMs) on Amazon Bedrock are only suggestions. The current level of generative AI technology is not perfect and LLMs are not infallible. Bias and incorrect answers, although rare, should be expected. Review each recommendation in the assessment summary before you use the output from an LLM.

This property is available only in the US East (N. Virginia) Region.

" + }, + "AssessmentRiskRecommendationList":{ + "type":"list", + "member":{"shape":"AssessmentRiskRecommendation"} + }, "AssessmentStatus":{ "type":"string", "enum":[ @@ -1669,6 +1841,20 @@ "max":10, "min":1 }, + "AssessmentSummary":{ + "type":"structure", + "members":{ + "riskRecommendations":{ + "shape":"AssessmentRiskRecommendationList", + "documentation":"

Indicates the top risks and recommendations identified by the Resilience Hub assessment, each representing a specific risk and the corresponding recommendation to address it.

This property is available only in the US East (N. Virginia) Region.

" + }, + "summary":{ + "shape":"String500", + "documentation":"

Indicates a concise summary that provides an overview of the Resilience Hub assessment.

This property is available only in the US East (N. Virginia) Region.

" + } + }, + "documentation":"

Indicates the AI-generated summary for the Resilience Hub assessment, providing a concise overview that highlights the top risks and recommendations.

This property is available only in the US East (N. Virginia) Region.

" + }, "AwsRegion":{ "type":"string", "pattern":"^[a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]$" @@ -1801,7 +1987,7 @@ }, "diffType":{ "shape":"DifferenceType", - "documentation":"

Difference type between actual and expected recovery point objective (RPO) and recovery time objective (RTO) values. Currently, Resilience Hub supports only NotEqual difference type.

" + "documentation":"

Difference type between actual and expected recovery point objective (RPO) and recovery time objective (RTO) values. Currently, Resilience Hub supports only NotEqual difference type.

" }, "driftType":{ "shape":"DriftType", @@ -1834,7 +2020,9 @@ "type":"string", "enum":[ "PolicyBreached", - "PolicyMet" + "PolicyMet", + "NotApplicable", + "MissingPolicy" ] }, "ComponentCompliancesList":{ @@ -2870,6 +3058,41 @@ } } }, + "DescribeResourceGroupingRecommendationTaskRequest":{ + "type":"structure", + "required":["appArn"], + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide.

" + }, + "groupingId":{ + "shape":"String255", + "documentation":"

Indicates the identifier of the grouping recommendation task.

" + } + } + }, + "DescribeResourceGroupingRecommendationTaskResponse":{ + "type":"structure", + "required":[ + "groupingId", + "status" + ], + "members":{ + "errorMessage":{ + "shape":"String500", + "documentation":"

Indicates the error that occurred while generating a grouping recommendation.

" + }, + "groupingId":{ + "shape":"String255", + "documentation":"

Indicates the identifier of the grouping recommendation task.

" + }, + "status":{ + "shape":"ResourcesGroupingRecGenStatusType", + "documentation":"

Status of the action.

" + } + } + }, "DifferenceType":{ "type":"string", "enum":[ @@ -3028,6 +3251,10 @@ "type":"string", "pattern":"^[A-Za-z0-9][A-Za-z0-9_\\-]{1,59}$" }, + "EntityName255":{ + "type":"string", + "pattern":"^[A-Za-z0-9][A-Za-z0-9_\\-]{0,254}$" + }, "EntityNameList":{ "type":"list", "member":{"shape":"EntityName"} @@ -3093,6 +3320,28 @@ "ComplexityOfImplementation" ] }, + "FailedGroupingRecommendationEntries":{ + "type":"list", + "member":{"shape":"FailedGroupingRecommendationEntry"} + }, + "FailedGroupingRecommendationEntry":{ + "type":"structure", + "required":[ + "errorMessage", + "groupingRecommendationId" + ], + "members":{ + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

Indicates the error that occurred while implementing a grouping recommendation.

" + }, + "groupingRecommendationId":{ + "shape":"String255", + "documentation":"

Indicates the identifier of the grouping recommendation.

" + } + }, + "documentation":"

Indicates the accepted grouping recommendation whose implementation failed.

" + }, "FailurePolicy":{ "type":"structure", "required":[ @@ -3111,6 +3360,146 @@ }, "documentation":"

Defines a failure policy.

" }, + "GroupingAppComponent":{ + "type":"structure", + "required":[ + "appComponentId", + "appComponentName", + "appComponentType" + ], + "members":{ + "appComponentId":{ + "shape":"EntityName255", + "documentation":"

Indicates the identifier of an AppComponent.

" + }, + "appComponentName":{ + "shape":"EntityName255", + "documentation":"

Indicates the name of an AppComponent.

" + }, + "appComponentType":{ + "shape":"String255", + "documentation":"

Indicates the type of an AppComponent.

" + } + }, + "documentation":"

Creates a new recommended Application Component (AppComponent).

" + }, + "GroupingRecommendation":{ + "type":"structure", + "required":[ + "confidenceLevel", + "creationTime", + "groupingAppComponent", + "groupingRecommendationId", + "recommendationReasons", + "resources", + "score", + "status" + ], + "members":{ + "confidenceLevel":{ + "shape":"GroupingRecommendationConfidenceLevel", + "documentation":"

Indicates the confidence level of Resilience Hub on the grouping recommendation.

" + }, + "creationTime":{ + "shape":"TimeStamp", + "documentation":"

Indicates the creation time of the grouping recommendation.

" + }, + "groupingAppComponent":{ + "shape":"GroupingAppComponent", + "documentation":"

Indicates the name of the recommended Application Component (AppComponent).

" + }, + "groupingRecommendationId":{ + "shape":"String255", + "documentation":"

Indicates all the reasons available for rejecting a grouping recommendation.

" + }, + "recommendationReasons":{ + "shape":"String255List", + "documentation":"

Indicates all the reasons available for rejecting a grouping recommendation.

" + }, + "rejectionReason":{ + "shape":"GroupingRecommendationRejectionReason", + "documentation":"

Indicates the reason you had selected while rejecting a grouping recommendation.

" + }, + "resources":{ + "shape":"GroupingResourceList", + "documentation":"

Indicates the resources that are grouped in a recommended AppComponent.

" + }, + "score":{ + "shape":"Double", + "documentation":"

Indicates the confidence level of the grouping recommendation.

" + }, + "status":{ + "shape":"GroupingRecommendationStatusType", + "documentation":"

Indicates the status of grouping resources into AppComponents.

" + } + }, + "documentation":"

Creates a new grouping recommendation.

" + }, + "GroupingRecommendationConfidenceLevel":{ + "type":"string", + "enum":[ + "High", + "Medium" + ] + }, + "GroupingRecommendationList":{ + "type":"list", + "member":{"shape":"GroupingRecommendation"} + }, + "GroupingRecommendationRejectionReason":{ + "type":"string", + "enum":[ + "DistinctBusinessPurpose", + "SeparateDataConcern", + "DistinctUserGroupHandling", + "Other" + ] + }, + "GroupingRecommendationStatusType":{ + "type":"string", + "enum":[ + "Accepted", + "Rejected", + "PendingDecision" + ] + }, + "GroupingResource":{ + "type":"structure", + "required":[ + "logicalResourceId", + "physicalResourceId", + "resourceName", + "resourceType", + "sourceAppComponentIds" + ], + "members":{ + "logicalResourceId":{ + "shape":"LogicalResourceId", + "documentation":"

Indicates the logical identifier of the resource.

" + }, + "physicalResourceId":{ + "shape":"PhysicalResourceId", + "documentation":"

Indicates the physical identifier of the resource.

" + }, + "resourceName":{ + "shape":"String255", + "documentation":"

Indicates the resource name.

" + }, + "resourceType":{ + "shape":"String255", + "documentation":"

Indicates the resource type.

" + }, + "sourceAppComponentIds":{ + "shape":"String255List", + "documentation":"

Indicates the identifier of the source AppComponents in which the resources were previously grouped into.

" + } + }, + "documentation":"

Indicates the resource that will be grouped in the recommended Application Component (AppComponent).

" + }, + "GroupingResourceList":{ + "type":"list", + "member":{"shape":"GroupingResource"} + }, "HaArchitecture":{ "type":"string", "enum":[ @@ -3252,11 +3641,11 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

Indicates the maximum number of applications requested.

" + "documentation":"

Indicates the maximum number of compliance drifts requested.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Indicates the unique token number of the next application to be checked for compliance and regulatory requirements from the list of applications.

" + "documentation":"

Null, or the token from a previous call to get the next set of results.

" } } }, @@ -3270,7 +3659,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

Token number of the next application to be checked for compliance and regulatory requirements from the list of applications.

" + "documentation":"

Null, or the token from a previous call to get the next set of results.

" } } }, @@ -3690,7 +4079,7 @@ }, "reverseOrder":{ "shape":"BooleanOptional", - "documentation":"

The application list is sorted based on the values of lastAppComplianceEvaluationTime field. By default, application list is sorted in ascending order. To sort the appliation list in descending order, set this field to True.

", + "documentation":"

The application list is sorted based on the values of lastAppComplianceEvaluationTime field. By default, application list is sorted in ascending order. To sort the application list in descending order, set this field to True.

", "location":"querystring", "locationName":"reverseOrder" }, @@ -3813,6 +4202,43 @@ } } }, + "ListResourceGroupingRecommendationsRequest":{ + "type":"structure", + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide.

", + "location":"querystring", + "locationName":"appArn" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

Maximum number of grouping recommendations to be displayed per Resilience Hub application.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Null, or the token from a previous call to get the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListResourceGroupingRecommendationsResponse":{ + "type":"structure", + "required":["groupingRecommendations"], + "members":{ + "groupingRecommendations":{ + "shape":"GroupingRecommendationList", + "documentation":"

List of resource grouping recommendations generated by Resilience Hub.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Null, or the token from a previous call to get the next set of results.

" + } + } + }, "ListSopRecommendationsRequest":{ "type":"structure", "required":["assessmentArn"], @@ -4207,7 +4633,8 @@ "enum":[ "BreachedUnattainable", "BreachedCanMeet", - "MetCanImprove" + "MetCanImprove", + "MissingPolicy" ] }, "RecommendationDisruptionCompliance":{ @@ -4375,6 +4802,61 @@ "max":4, "min":1 }, + "RejectGroupingRecommendationEntries":{ + "type":"list", + "member":{"shape":"RejectGroupingRecommendationEntry"}, + "max":30, + "min":1 + }, + "RejectGroupingRecommendationEntry":{ + "type":"structure", + "required":["groupingRecommendationId"], + "members":{ + "groupingRecommendationId":{ + "shape":"String255", + "documentation":"

Indicates the identifier of the grouping recommendation.

" + }, + "rejectionReason":{ + "shape":"GroupingRecommendationRejectionReason", + "documentation":"

Indicates the reason you had selected while rejecting a grouping recommendation.

" + } + }, + "documentation":"

Indicates the rejected grouping recommendation.

" + }, + "RejectResourceGroupingRecommendationsRequest":{ + "type":"structure", + "required":[ + "appArn", + "entries" + ], + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide.

" + }, + "entries":{ + "shape":"RejectGroupingRecommendationEntries", + "documentation":"

Indicates the list of resource grouping recommendations you have selected to exclude from your application.

" + } + } + }, + "RejectResourceGroupingRecommendationsResponse":{ + "type":"structure", + "required":[ + "appArn", + "failedEntries" + ], + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide.

" + }, + "failedEntries":{ + "shape":"FailedGroupingRecommendationEntries", + "documentation":"

Indicates the list of resource grouping recommendations that failed to get excluded in your application.

" + } + } + }, "RemoveDraftAppVersionResourceMappingsRequest":{ "type":"structure", "required":["appArn"], @@ -4625,7 +5107,7 @@ "members":{ "hasMoreErrors":{ "shape":"BooleanOptional", - "documentation":"

This indicates if there are more errors not listed in the resourceErrors list.

" + "documentation":"

This indicates if there are more errors not listed in the resourceErrors list.

" }, "resourceErrors":{ "shape":"ResourceErrorList", @@ -4765,6 +5247,15 @@ "type":"string", "pattern":".*" }, + "ResourcesGroupingRecGenStatusType":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Failed", + "Success" + ] + }, "RetryAfterSeconds":{ "type":"integer", "box":true @@ -4933,6 +5424,42 @@ } } }, + "StartResourceGroupingRecommendationTaskRequest":{ + "type":"structure", + "required":["appArn"], + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide.

" + } + } + }, + "StartResourceGroupingRecommendationTaskResponse":{ + "type":"structure", + "required":[ + "appArn", + "groupingId", + "status" + ], + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide.

" + }, + "errorMessage":{ + "shape":"String500", + "documentation":"

Indicates the error that occurred while executing a grouping recommendation task.

" + }, + "groupingId":{ + "shape":"String255", + "documentation":"

Indicates the identifier of the grouping recommendation task.

" + }, + "status":{ + "shape":"ResourcesGroupingRecGenStatusType", + "documentation":"

Status of the action.

" + } + } + }, "String1024":{ "type":"string", "max":1024, diff --git a/botocore/data/rolesanywhere/2018-05-10/service-2.json b/botocore/data/rolesanywhere/2018-05-10/service-2.json index f8ce3a98ee..0e2891a73e 100644 --- a/botocore/data/rolesanywhere/2018-05-10/service-2.json +++ b/botocore/data/rolesanywhere/2018-05-10/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"rolesanywhere", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"IAM Roles Anywhere", "serviceId":"RolesAnywhere", "signatureVersion":"v4", "signingName":"rolesanywhere", - "uid":"rolesanywhere-2018-05-10" + "uid":"rolesanywhere-2018-05-10", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateProfile":{ @@ -537,6 +539,10 @@ "roleArns" ], "members":{ + "acceptRoleSessionName":{ + "shape":"Boolean", + "documentation":"

Used to determine if a custom role session name will be accepted in a temporary credential request.

" + }, "durationSeconds":{ "shape":"CreateProfileRequestDurationSecondsInteger", "documentation":"

Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600.

" @@ -1059,6 +1065,10 @@ "ProfileDetail":{ "type":"structure", "members":{ + "acceptRoleSessionName":{ + "shape":"Boolean", + "documentation":"

Used to determine if a custom role session name will be accepted in a temporary credential request.

" + }, "attributeMappings":{ "shape":"AttributeMappings", "documentation":"

A mapping applied to the authenticating end-entity certificate.

" @@ -1614,6 +1624,10 @@ "type":"structure", "required":["profileId"], "members":{ + "acceptRoleSessionName":{ + "shape":"Boolean", + "documentation":"

Used to determine if a custom role session name will be accepted in a temporary credential request.

" + }, "durationSeconds":{ "shape":"UpdateProfileRequestDurationSecondsInteger", "documentation":"

Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600.

" diff --git a/botocore/data/route53/2013-04-01/service-2.json b/botocore/data/route53/2013-04-01/service-2.json index 0399620f94..fcb3fac736 100644 --- a/botocore/data/route53/2013-04-01/service-2.json +++ b/botocore/data/route53/2013-04-01/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"route53", "globalEndpoint":"route53.amazonaws.com", "protocol":"rest-xml", + "protocols":["rest-xml"], "serviceAbbreviation":"Route 53", "serviceFullName":"Amazon Route 53", "serviceId":"Route 53", "signatureVersion":"v4", - "uid":"route53-2013-04-01" + "uid":"route53-2013-04-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "ActivateKeySigningKey":{ diff --git a/botocore/data/route53domains/2014-05-15/service-2.json b/botocore/data/route53domains/2014-05-15/service-2.json index 42162f1b41..ea75d18949 100644 --- a/botocore/data/route53domains/2014-05-15/service-2.json +++ b/botocore/data/route53domains/2014-05-15/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"route53domains", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon Route 53 Domains", "serviceId":"Route 53 Domains", "signatureVersion":"v4", "targetPrefix":"Route53Domains_v20140515", - "uid":"route53domains-2014-05-15" + "uid":"route53domains-2014-05-15", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptDomainTransferFromAnotherAwsAccount":{ diff --git a/botocore/data/route53resolver/2018-04-01/service-2.json b/botocore/data/route53resolver/2018-04-01/service-2.json index 8291dda2c2..1b6a916072 100644 --- a/botocore/data/route53resolver/2018-04-01/service-2.json +++ b/botocore/data/route53resolver/2018-04-01/service-2.json @@ -11,7 +11,8 @@ "serviceId":"Route53Resolver", "signatureVersion":"v4", "targetPrefix":"Route53Resolver", - "uid":"route53resolver-2018-04-01" + "uid":"route53resolver-2018-04-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateFirewallRuleGroup":{ diff --git a/botocore/data/s3/2006-03-01/endpoint-rule-set-1.json b/botocore/data/s3/2006-03-01/endpoint-rule-set-1.json index 66392c565c..1ef9810cda 100644 --- a/botocore/data/s3/2006-03-01/endpoint-rule-set-1.json +++ b/botocore/data/s3/2006-03-01/endpoint-rule-set-1.json @@ -68,6 +68,11 @@ "documentation": "The S3 Prefix used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 Prefix.", "type": "String" }, + "CopySource": { + "required": false, + "documentation": "The Copy Source used for Copy Object request. This is an optional parameter that will be set automatically for operations that are scoped to Copy Source.", + "type": "String" + }, "DisableAccessPoints": { "required": false, "documentation": "Internal parameter to disable Access Point Buckets", diff --git a/botocore/data/s3/2006-03-01/paginators-1.json b/botocore/data/s3/2006-03-01/paginators-1.json index 15bc1131ae..b1b432071f 100644 --- a/botocore/data/s3/2006-03-01/paginators-1.json +++ b/botocore/data/s3/2006-03-01/paginators-1.json @@ -65,6 +65,12 @@ "limit_key": "MaxDirectoryBuckets", "output_token": "ContinuationToken", "result_key": "Buckets" + }, + "ListBuckets": { + "input_token": "ContinuationToken", + "limit_key": "MaxBuckets", + "output_token": "ContinuationToken", + "result_key": "Buckets" } } } diff --git a/botocore/data/s3/2006-03-01/paginators-1.sdk-extras.json b/botocore/data/s3/2006-03-01/paginators-1.sdk-extras.json index 67a92132a7..6ad51da4cb 100644 --- a/botocore/data/s3/2006-03-01/paginators-1.sdk-extras.json +++ b/botocore/data/s3/2006-03-01/paginators-1.sdk-extras.json @@ -2,6 +2,11 @@ "version": 1.0, "merge": { "pagination": { + "ListBuckets": { + "non_aggregate_keys": [ + "Owner" + ] + }, "ListMultipartUploads": { "non_aggregate_keys": [ "RequestCharged" diff --git a/botocore/data/s3/2006-03-01/service-2.json b/botocore/data/s3/2006-03-01/service-2.json index 01c7cb9632..0e56e1c958 100644 --- a/botocore/data/s3/2006-03-01/service-2.json +++ b/botocore/data/s3/2006-03-01/service-2.json @@ -11,7 +11,8 @@ "serviceFullName":"Amazon Simple Storage Service", "serviceId":"S3", "signatureVersion":"s3", - "uid":"s3-2006-03-01" + "uid":"s3-2006-03-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AbortMultipartUpload":{ @@ -27,7 +28,7 @@ {"shape":"NoSuchUpload"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html", - "documentation":"

This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.

To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to AbortMultipartUpload:

" + "documentation":"

This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.

To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.

  • Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and use the AbortMultupartUpload operation to abort all the in-progress multipart uploads.

  • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to AbortMultipartUpload:

" }, "CompleteMultipartUpload":{ "name":"CompleteMultipartUpload", @@ -52,7 +53,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Authentication and authorization

All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

You must have read access to the source object and write access to the destination bucket.

  • General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation.

    • If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied.

    • If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket.

  • Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation.

    • If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

    • If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket.

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

Response and special errors

When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds. to keep the connection alive while we copy the data.

  • If the copy is successful, you receive a response with information about the copied object.

  • A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error.

    • If the error occurs before the copy action starts, you receive a standard Amazon S3 error.

    • If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.

      If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).

Charge

The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to CopyObject:

", + "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.

  • Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.

  • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

  • VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.

Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Authentication and authorization

All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

You must have read access to the source object and write access to the destination bucket.

  • General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation.

    • If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied.

    • If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket.

  • Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation.

    • If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

    • If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket.

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

Response and special errors

When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds.

  • If the copy is successful, you receive a response with information about the copied object.

  • A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error.

    • If the error occurs before the copy action starts, you receive a standard Amazon S3 error.

    • If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.

      If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).

Charge

The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to CopyObject:

", "alias":"PutObjectCopy", "staticContextParams":{ "DisableS3ExpressSessionAuth":{"value":true} @@ -313,7 +314,7 @@ "input":{"shape":"DeleteObjectsRequest"}, "output":{"shape":"DeleteObjectsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", - "documentation":"

This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.

  • Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.

  • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.

When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.

Directory buckets - MFA delete is not supported by directory buckets.

Permissions
  • General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers.

    • s3:DeleteObject - To delete an object from a bucket, you must always specify the s3:DeleteObject permission.

    • s3:DeleteObjectVersion - To delete a specific version of an object from a versiong-enabled bucket, you must specify the s3:DeleteObjectVersion permission.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Content-MD5 request header
  • General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.

  • Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, or x-amz-checksum-sha256) is required for all Multi-Object Delete requests.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to DeleteObjects:

", + "documentation":"

This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.

  • Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.

  • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.

When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.

Directory buckets - MFA delete is not supported by directory buckets.

Permissions
  • General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers.

    • s3:DeleteObject - To delete an object from a bucket, you must always specify the s3:DeleteObject permission.

    • s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion permission.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Content-MD5 request header
  • General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.

  • Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, or x-amz-checksum-sha256) is required for all Multi-Object Delete requests.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to DeleteObjects:

", "alias":"DeleteMultipleObjects", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -759,7 +760,7 @@ {"shape":"NoSuchBucket"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketHEAD.html", - "documentation":"

You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission to access it.

If the bucket does not exist or you do not have permission to access it, the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes.

Directory buckets - You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Authentication and authorization

All HeadBucket requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory bucket - You must use IAM credentials to authenticate and authorize your access to the HeadBucket API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

" + "documentation":"

You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission to access it.

If the bucket does not exist or you do not have permission to access it, the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes.

Authentication and authorization

General purpose buckets - Request to public buckets that grant the s3:ListBucket permission publicly do not need to be signed. All other HeadBucket requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory buckets - You must use IAM credentials to authenticate and authorize your access to the HeadBucket API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

" }, "HeadObject":{ "name":"HeadObject", @@ -773,7 +774,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html", - "documentation":"

The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.

A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not possible to retrieve the exact exception of these error codes.

Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Permissions

  • General purpose bucket permissions - To use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide.

    If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error.

    • If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden error.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Encryption

Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a HEAD request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

  • x-amz-server-side-encryption-customer-algorithm

  • x-amz-server-side-encryption-customer-key

  • x-amz-server-side-encryption-customer-key-MD5

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

Versioning
  • If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

  • If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.

  • Directory buckets - Delete marker is not supported by directory buckets.

  • Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following actions are related to HeadObject:

" + "documentation":"

The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.

A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not possible to retrieve the exact exception of these error codes.

Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

Permissions

  • General purpose bucket permissions - To use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide.

    If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error.

    • If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden error.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Encryption

Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a HEAD request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

  • x-amz-server-side-encryption-customer-algorithm

  • x-amz-server-side-encryption-customer-key

  • x-amz-server-side-encryption-customer-key-MD5

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

Versioning
  • If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

  • If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.

  • Directory buckets - Delete marker is not supported by directory buckets.

  • Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

The following actions are related to HeadObject:

" }, "ListBucketAnalyticsConfigurations":{ "name":"ListBucketAnalyticsConfigurations", @@ -830,6 +831,7 @@ "method":"GET", "requestUri":"/" }, + "input":{"shape":"ListBucketsRequest"}, "output":{"shape":"ListBucketsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTServiceGET.html", "documentation":"

This operation is not supported by directory buckets.

Returns a list of all buckets owned by the authenticated sender of the request. To use this operation, you must have the s3:ListAllMyBuckets permission.

For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets.

", @@ -857,7 +859,7 @@ "input":{"shape":"ListMultipartUploadsRequest"}, "output":{"shape":"ListMultipartUploadsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html", - "documentation":"

This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload request, but has not yet been completed or aborted.

Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.

The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads request, the response returns an IsTruncated element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. In these requests, include two query parameters: key-marker and upload-id-marker. Set the value of key-marker to the NextKeyMarker value from the previous response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response.

Directory buckets - The upload-id-marker element and the NextUploadIdMarker element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response.

For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Sorting of multipart uploads in response
  • General purpose bucket - In the ListMultipartUploads response, the multipart uploads are sorted based on two criteria:

    • Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.

    • Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.

  • Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to ListMultipartUploads:

" + "documentation":"

This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload request, but has not yet been completed or aborted.

Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and use the AbortMultupartUpload operation to abort all the in-progress multipart uploads.

The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads request, the response returns an IsTruncated element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. In these requests, include two query parameters: key-marker and upload-id-marker. Set the value of key-marker to the NextKeyMarker value from the previous response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response.

Directory buckets - The upload-id-marker element and the NextUploadIdMarker element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response.

For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Sorting of multipart uploads in response
  • General purpose bucket - In the ListMultipartUploads response, the multipart uploads are sorted based on two criteria:

    • Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.

    • Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.

  • Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to ListMultipartUploads:

" }, "ListObjectVersions":{ "name":"ListObjectVersions", @@ -897,7 +899,7 @@ "errors":[ {"shape":"NoSuchBucket"} ], - "documentation":"

Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Sorting order of returned objects
  • General purpose bucket - For general purpose buckets, ListObjectsV2 returns objects in lexicographical order based on their key names.

  • Directory bucket - For directory buckets, ListObjectsV2 does not return objects in lexicographical order.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects.

The following operations are related to ListObjectsV2:

" + "documentation":"

Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.

  • General purpose bucket - For general purpose buckets, ListObjectsV2 doesn't return prefixes that are related only to in-progress multipart uploads.

  • Directory buckets - For directory buckets, ListObjectsV2 response includes the prefixes that are related only to in-progress multipart uploads.

  • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Sorting order of returned objects
  • General purpose bucket - For general purpose buckets, ListObjectsV2 returns objects in lexicographical order based on their key names.

  • Directory bucket - For directory buckets, ListObjectsV2 does not return objects in lexicographical order.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects.

The following operations are related to ListObjectsV2:

" }, "ListParts":{ "name":"ListParts", @@ -979,7 +981,7 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"PutBucketEncryptionRequest"}, - "documentation":"

This operation is not supported by directory buckets.

This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket.

By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.

This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).

To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

The following operations are related to PutBucketEncryption:

", + "documentation":"

This operation is not supported by directory buckets.

This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket.

By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.

If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.

Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).

To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

The following operations are related to PutBucketEncryption:

", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -1193,7 +1195,7 @@ }, "input":{"shape":"PutBucketVersioningRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html", - "documentation":"

This operation is not supported by directory buckets.

Sets the versioning state of an existing bucket.

You can set the versioning state with one of the following values:

Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.

Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.

If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.

In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket.

If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.

The following operations are related to PutBucketVersioning:

", + "documentation":"

This operation is not supported by directory buckets.

When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT or DELETE) on objects in the bucket.

Sets the versioning state of an existing bucket.

You can set the versioning state with one of the following values:

Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.

Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.

If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.

In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket.

If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.

The following operations are related to PutBucketVersioning:

", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -1381,7 +1383,7 @@ "input":{"shape":"UploadPartCopyRequest"}, "output":{"shape":"UploadPartCopyOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html", - "documentation":"

Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range in your request.

For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.

Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.

You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.

For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Authentication and authorization

All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

You must have READ access to the source object and WRITE access to the destination bucket.

  • General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy operation.

    • If the source object is in a general purpose bucket, you must have the s3:GetObject permission to read the source object that is being copied.

    • If the destination bucket is a general purpose bucket, you must have the s3:PutObject permission to write the object copy to the destination bucket.

    For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

  • Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy operation.

    • If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object . By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

    • If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key cannot be set to ReadOnly on the copy destination.

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

Encryption
  • General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.

  • Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

Special errors
  • Error Code: NoSuchUpload

    • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

    • HTTP Status Code: 404 Not Found

  • Error Code: InvalidRequest

    • Description: The specified copy source is not supported as a byte-range copy source.

    • HTTP Status Code: 400 Bad Request

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to UploadPartCopy:

", + "documentation":"

Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range in your request.

For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.

Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.

You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.

For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Authentication and authorization

All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

You must have READ access to the source object and WRITE access to the destination bucket.

  • General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy operation.

    • If the source object is in a general purpose bucket, you must have the s3:GetObject permission to read the source object that is being copied.

    • If the destination bucket is a general purpose bucket, you must have the s3:PutObject permission to write the object copy to the destination bucket.

    For information about permissions required to use the multipart upload API, see Multipart upload API and permissions in the Amazon S3 User Guide.

  • Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy operation.

    • If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

    • If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key cannot be set to ReadOnly on the copy destination.

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

Encryption
  • General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.

  • Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

Special errors
  • Error Code: NoSuchUpload

    • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

    • HTTP Status Code: 404 Not Found

  • Error Code: InvalidRequest

    • Description: The specified copy source is not supported as a byte-range copy source.

    • HTTP Status Code: 400 Bad Request

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to UploadPartCopy:

", "staticContextParams":{ "DisableS3ExpressSessionAuth":{"value":true} } @@ -1400,7 +1402,8 @@ }, "staticContextParams":{ "UseObjectLambdaEndpoint":{"value":true} - } + }, + "unsignedPayload":true } }, "shapes":{ @@ -2384,6 +2387,7 @@ "CopySource":{ "shape":"CopySource", "documentation":"

Specifies the source object for the copy operation. The source object can be up to 5 GB. If the source object is an object that was uploaded by using a multipart upload, the object copy will be a single part object after the source object is copied to the destination bucket.

You specify the value of the copy source in one of two formats, depending on whether you want to access the source object through an access point:

  • For objects not accessed through an access point, specify the name of the source bucket and the key of the source object, separated by a slash (/). For example, to copy the object reports/january.pdf from the general purpose bucket awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value must be URL-encoded. To copy the object reports/january.pdf from the directory bucket awsexamplebucket--use1-az5--x-s3, use awsexamplebucket--use1-az5--x-s3/reports/january.pdf. The value must be URL-encoded.

  • For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:<Region>:<account-id>:accesspoint/<access-point-name>/object/<key>. For example, to copy the object reports/january.pdf through access point my-access-point owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. The value must be URL encoded.

    • Amazon S3 supports copy operations using Access points only when the source and destination buckets are in the same Amazon Web Services Region.

    • Access points are not supported by directory buckets.

    Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/<key>. For example, to copy the object reports/january.pdf through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. The value must be URL-encoded.

If your source bucket versioning is enabled, the x-amz-copy-source header by default identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId query parameter. Specifically, append ?versionId=<version-id> to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.

If you enable versioning on the destination bucket, Amazon S3 generates a unique version ID for the copied object. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the destination bucket, the version ID that Amazon S3 generates in the x-amz-version-id response header is always null.

Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.

", + "contextParam":{"name":"CopySource"}, "location":"header", "locationName":"x-amz-copy-source" }, @@ -2444,6 +2448,7 @@ "Key":{ "shape":"ObjectKey", "documentation":"

The key of the destination object.

", + "contextParam":{"name":"Key"}, "location":"uri", "locationName":"Key" }, @@ -3025,7 +3030,7 @@ "members":{ "Credentials":{ "shape":"SessionCredentials", - "documentation":"

The established temporary security credentials for the created session..

", + "documentation":"

The established temporary security credentials for the created session.

", "locationName":"Credentials" } } @@ -3082,7 +3087,7 @@ "documentation":"

The number of years that you want to specify for the default retention period. Must be used with Mode.

" } }, - "documentation":"

The container element for specifying the default Object Lock retention settings for new objects placed in the specified bucket.

  • The DefaultRetention settings require both a mode and a period.

  • The DefaultRetention period can be either Days or Years but you must select one. You cannot specify Days and Years at the same time.

" + "documentation":"

The container element for optionally specifying the default Object Lock retention settings for new objects placed in the specified bucket.

  • The DefaultRetention settings require both a mode and a period.

  • The DefaultRetention period can be either Days or Years but you must select one. You cannot specify Days and Years at the same time.

" }, "Delete":{ "type":"structure", @@ -3714,7 +3719,7 @@ }, "EncodingType":{ "type":"string", - "documentation":"

Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key can contain any Unicode character; however, the XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.

", + "documentation":"

Encoding type used by Amazon S3 to encode the object keys in the response. Responses are encoded only in UTF-8. An object key can contain any Unicode character. However, the XML 1.0 parser can't parse certain characters, such as characters with an ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response. For more information about characters to avoid in object key names, see Object key naming guidelines.

When using the URL encoding type, non-ASCII characters that are used in an object's key name will be percent-encoded according to UTF-8 code values. For example, the object test_file(3).png will appear as test_file%283%29.png.

", "enum":["url"] }, "Encryption":{ @@ -3744,7 +3749,7 @@ "documentation":"

Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web Services KMS key stored in Amazon Web Services Key Management Service (KMS) for the destination bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

" } }, - "documentation":"

Specifies encryption-related information for an Amazon S3 bucket that is a destination for replicated objects.

" + "documentation":"

Specifies encryption-related information for an Amazon S3 bucket that is a destination for replicated objects.

If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.

" }, "End":{ "type":"long", @@ -5493,13 +5498,13 @@ }, "BucketRegion":{ "shape":"Region", - "documentation":"

The Region that the bucket is located.

This functionality is not supported for directory buckets.

", + "documentation":"

The Region that the bucket is located.

", "location":"header", "locationName":"x-amz-bucket-region" }, "AccessPointAlias":{ "shape":"AccessPointAlias", - "documentation":"

Indicates whether the bucket name used in the request is an access point alias.

This functionality is not supported for directory buckets.

", + "documentation":"

Indicates whether the bucket name used in the request is an access point alias.

For directory buckets, the value of this field is false.

", "location":"header", "locationName":"x-amz-access-point-alias" } @@ -5783,6 +5788,42 @@ "location":"header", "locationName":"Range" }, + "ResponseCacheControl":{ + "shape":"ResponseCacheControl", + "documentation":"

Sets the Cache-Control header of the response.

", + "location":"querystring", + "locationName":"response-cache-control" + }, + "ResponseContentDisposition":{ + "shape":"ResponseContentDisposition", + "documentation":"

Sets the Content-Disposition header of the response.

", + "location":"querystring", + "locationName":"response-content-disposition" + }, + "ResponseContentEncoding":{ + "shape":"ResponseContentEncoding", + "documentation":"

Sets the Content-Encoding header of the response.

", + "location":"querystring", + "locationName":"response-content-encoding" + }, + "ResponseContentLanguage":{ + "shape":"ResponseContentLanguage", + "documentation":"

Sets the Content-Language header of the response.

", + "location":"querystring", + "locationName":"response-content-language" + }, + "ResponseContentType":{ + "shape":"ResponseContentType", + "documentation":"

Sets the Content-Type header of the response.

", + "location":"querystring", + "locationName":"response-content-type" + }, + "ResponseExpires":{ + "shape":"ResponseExpires", + "documentation":"

Sets the Expires header of the response.

", + "location":"querystring", + "locationName":"response-expires" + }, "VersionId":{ "shape":"ObjectVersionId", "documentation":"

Version ID used to reference a specific version of the object.

For directory buckets in this API operation, only the null value of the version ID is supported.

", @@ -5846,7 +5887,7 @@ "members":{ "Suffix":{ "shape":"Suffix", - "documentation":"

A suffix that is appended to a request that is for a directory on the website endpoint (for example,if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" + "documentation":"

A suffix that is appended to a request that is for a directory on the website endpoint. (For example, if the suffix is index.html and you make a request to samplebucket/images/, the data that is returned will be for the object with the key name images/index.html.) The suffix must not be empty and must not include a slash character.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" } }, "documentation":"

Container for the Suffix element.

" @@ -6550,6 +6591,27 @@ "Owner":{ "shape":"Owner", "documentation":"

The owner of the buckets listed.

" + }, + "ContinuationToken":{ + "shape":"NextToken", + "documentation":"

ContinuationToken is included in the response when there are more buckets that can be listed with pagination. The next ListBuckets request to Amazon S3 can be continued with this ContinuationToken. ContinuationToken is obfuscated and is not a real bucket.

" + } + } + }, + "ListBucketsRequest":{ + "type":"structure", + "members":{ + "MaxBuckets":{ + "shape":"MaxBuckets", + "documentation":"

Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response.

", + "location":"querystring", + "locationName":"max-buckets" + }, + "ContinuationToken":{ + "shape":"Token", + "documentation":"

ContinuationToken indicates to Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key. You can use this ContinuationToken for pagination of the list results.

Length Constraints: Minimum length of 0. Maximum length of 1024.

Required: No.

", + "location":"querystring", + "locationName":"continuation-token" } } }, @@ -6571,7 +6633,7 @@ "members":{ "ContinuationToken":{ "shape":"DirectoryBucketToken", - "documentation":"

ContinuationToken indicates to Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key. You can use this ContinuationToken for pagination of the list results.

", + "documentation":"

ContinuationToken indicates to Amazon S3 that the list is being continued on buckets in this account with a token. ContinuationToken is obfuscated and is not a real bucket name. You can use this ContinuationToken for the pagination of the list results.

", "location":"querystring", "locationName":"continuation-token" }, @@ -6873,7 +6935,7 @@ }, "EncodingType":{ "shape":"EncodingType", - "documentation":"

Encoding type used by Amazon S3 to encode object keys in the response. If using url, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.

" + "documentation":"

Encoding type used by Amazon S3 to encode the object keys in the response. Responses are encoded only in UTF-8. An object key can contain any Unicode character. However, the XML 1.0 parser can't parse certain characters, such as characters with an ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response. For more information about characters to avoid in object key names, see Object key naming guidelines.

When using the URL encoding type, non-ASCII characters that are used in an object's key name will be percent-encoded according to UTF-8 code values. For example, the object test_file(3).png will appear as test_file%283%29.png.

" }, "RequestCharged":{ "shape":"RequestCharged", @@ -7020,7 +7082,7 @@ }, "EncodingType":{ "shape":"EncodingType", - "documentation":"

Encoding type used by Amazon S3 to encode object keys in the response. If using url, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.

", + "documentation":"

Encoding type used by Amazon S3 to encode the object keys in the response. Responses are encoded only in UTF-8. An object key can contain any Unicode character. However, the XML 1.0 parser can't parse certain characters, such as characters with an ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response. For more information about characters to avoid in object key names, see Object key naming guidelines.

When using the URL encoding type, non-ASCII characters that are used in an object's key name will be percent-encoded according to UTF-8 code values. For example, the object test_file(3).png will appear as test_file%283%29.png.

", "location":"querystring", "locationName":"encoding-type" }, @@ -7284,6 +7346,12 @@ "type":"integer", "box":true }, + "MaxBuckets":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, "MaxDirectoryBuckets":{ "type":"integer", "box":true, @@ -7494,7 +7562,7 @@ }, "NewerNoncurrentVersions":{ "shape":"VersionCount", - "documentation":"

Specifies how many newer noncurrent versions must exist before Amazon S3 can perform the associated action on a given version. If there are this many more recent noncurrent versions, Amazon S3 will take the associated action. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.

" + "documentation":"

Specifies how many noncurrent versions Amazon S3 will retain. You can specify up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.

" } }, "documentation":"

Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.

" @@ -7512,7 +7580,7 @@ }, "NewerNoncurrentVersions":{ "shape":"VersionCount", - "documentation":"

Specifies how many newer noncurrent versions must exist before Amazon S3 can perform the associated action on a given version. If there are this many more recent noncurrent versions, Amazon S3 will take the associated action. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.

" + "documentation":"

Specifies how many noncurrent versions Amazon S3 will retain in the same storage class before transitioning objects. You can specify up to 100 noncurrent versions to retain. Amazon S3 will transition any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.

" } }, "documentation":"

Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's lifetime.

" @@ -8025,7 +8093,7 @@ "members":{ "PartitionDateSource":{ "shape":"PartitionDateSource", - "documentation":"

Specifies the partition date source for the partitioned prefix. PartitionDateSource can be EventTime or DeliveryTime.

" + "documentation":"

Specifies the partition date source for the partitioned prefix. PartitionDateSource can be EventTime or DeliveryTime.

For DeliveryTime, the time in the log file names corresponds to the delivery time for the log files.

For EventTime, The logs delivered are for a specific day only. The year, month, and day correspond to the day on which the event occurred, and the hour, minutes and seconds are set to 00 in the key.

" } }, "documentation":"

Amazon S3 keys for log objects are partitioned in the following format:

[DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]

PartitionedPrefix defaults to EventTime delivery when server access logs are delivered.

", @@ -8136,7 +8204,7 @@ }, "RestrictPublicBuckets":{ "shape":"Setting", - "documentation":"

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Service principals and authorized users within this account if the bucket has a public policy.

Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

", + "documentation":"

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has a public policy.

Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

", "locationName":"RestrictPublicBuckets" } }, @@ -9757,7 +9825,7 @@ "members":{ "Payload":{ "shape":"Body", - "documentation":"

The byte array of partial, one or more result records.

", + "documentation":"

The byte array of partial, one or more result records. S3 Select doesn't guarantee that a record will be self-contained in one record frame. To ensure continuous streaming of data, S3 Select might split the same record across multiple record frames instead of aggregating the results in memory. Some S3 clients (for example, the SDK for Java) handle this behavior by creating a ByteStream out of the response by default. Other clients might not handle this behavior by default. In those cases, you must aggregate the results on the client side and parse the response.

", "eventpayload":true } }, @@ -10464,7 +10532,7 @@ "documentation":"

Amazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default encryption. This parameter is allowed if and only if SSEAlgorithm is set to aws:kms or aws:kms:dsse.

You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS key.

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Key Alias: alias/alias-name

If you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log.

If you are using encryption with cross-account or Amazon Web Services service operations you must use a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations.

Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

" } }, - "documentation":"

Describes the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption in the Amazon S3 API Reference.

" + "documentation":"

Describes the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption in the Amazon S3 API Reference.

If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.

" }, "ServerSideEncryptionConfiguration":{ "type":"structure", @@ -10490,7 +10558,7 @@ "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.

For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

" } }, - "documentation":"

Specifies the default server-side encryption configuration.

" + "documentation":"

Specifies the default server-side encryption configuration.

If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.

" }, "ServerSideEncryptionRules":{ "type":"list", diff --git a/botocore/data/sagemaker/2017-07-24/paginators-1.json b/botocore/data/sagemaker/2017-07-24/paginators-1.json index 95b87ccd71..d12fd9c57d 100644 --- a/botocore/data/sagemaker/2017-07-24/paginators-1.json +++ b/botocore/data/sagemaker/2017-07-24/paginators-1.json @@ -431,6 +431,18 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "InferenceComponents" + }, + "ListMlflowTrackingServers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "TrackingServerSummaries" + }, + "ListOptimizationJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "OptimizationJobSummaries" } } } diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index bf535208d3..9b5c96fb65 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -12,7 +12,8 @@ "signatureVersion":"v4", "signingName":"sagemaker", "targetPrefix":"SageMaker", - "uid":"sagemaker-2017-07-24" + "uid":"sagemaker-2017-07-24", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddAssociation":{ @@ -138,7 +139,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.

CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning).

Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

" + "documentation":"

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment.

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide.

We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.

CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning).

Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

" }, "CreateAutoMLJobV2":{ "name":"CreateAutoMLJobV2", @@ -152,7 +153,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility.

CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning).

Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig.

You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

" + "documentation":"

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment.

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide.

AutoML jobs V2 support various problem types such as regression, binary, and multiclass classification with tabular data, text and image classification, time-series forecasting, and fine-tuning of large language models (LLMs) for text generation.

CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility.

CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning).

Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig.

You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

" }, "CreateCluster":{ "name":"CreateCluster", @@ -362,7 +363,22 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Create a hub.

Hub APIs are only callable through SageMaker Studio.

" + "documentation":"

Create a hub.

" + }, + "CreateHubContentReference":{ + "name":"CreateHubContentReference", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHubContentReferenceRequest"}, + "output":{"shape":"CreateHubContentReferenceResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Create a hub content reference in order to add a model in the JumpStart public hub to a private hub.

" }, "CreateHumanTaskUi":{ "name":"CreateHumanTaskUi", @@ -476,6 +492,19 @@ ], "documentation":"

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

  • A private workforce that you create. It can include employees, contractors, and outside experts. Use a private workforce when want the data to stay within your organization or when a specific set of skills is required.

  • One or more vendors that you select from the Amazon Web Services Marketplace. Vendors provide expertise in specific areas.

  • The Amazon Mechanical Turk workforce. This is the largest workforce, but it should only be used for public data or data that has been stripped of any personally identifiable information.

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

You can use this operation to create a static labeling job or a streaming labeling job. A static labeling job stops if all data objects in the input manifest file identified in ManifestS3Uri have been labeled. A streaming labeling job runs perpetually until it is manually stopped, or remains idle for 10 days. You can send new data objects to an active (InProgress) streaming labeling job in real time. To learn how to create a static labeling job, see Create a Labeling Job (API) in the Amazon SageMaker Developer Guide. To learn how to create a streaming labeling job, see Create a Streaming Labeling Job.

" }, + "CreateMlflowTrackingServer":{ + "name":"CreateMlflowTrackingServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMlflowTrackingServerRequest"}, + "output":{"shape":"CreateMlflowTrackingServerResponse"}, + "errors":[ + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates an MLflow Tracking Server using a general purpose Amazon S3 bucket as the artifact store. For more information, see Create an MLflow Tracking Server.

" + }, "CreateModel":{ "name":"CreateModel", "http":{ @@ -627,6 +656,20 @@ ], "documentation":"

Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.

Each lifecycle configuration script has a limit of 16384 characters.

The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

View Amazon CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

" }, + "CreateOptimizationJob":{ + "name":"CreateOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptimizationJobRequest"}, + "output":{"shape":"CreateOptimizationJobResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates a job that optimizes a model for inference performance. To create the job, you provide the location of a source model, and you provide the settings for the optimization techniques that you want the job to apply. When the job completes successfully, SageMaker uploads the new optimized model to the output destination that you specify.

For more information about how to use this action, and about the supported optimization techniques, see Optimize model inference with Amazon SageMaker.

" + }, "CreatePipeline":{ "name":"CreatePipeline", "http":{ @@ -655,6 +698,19 @@ ], "documentation":"

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM.

The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app.

You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint .

The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page.

" }, + "CreatePresignedMlflowTrackingServerUrl":{ + "name":"CreatePresignedMlflowTrackingServerUrl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePresignedMlflowTrackingServerUrlRequest"}, + "output":{"shape":"CreatePresignedMlflowTrackingServerUrlResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Returns a presigned URL that you can use to connect to the MLflow UI attached to your tracking server. For more information, see Launch the MLflow UI using a presigned URL.

" + }, "CreatePresignedNotebookInstanceUrl":{ "name":"CreatePresignedNotebookInstanceUrl", "http":{ @@ -705,7 +761,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceInUse"} ], - "documentation":"

Creates a space used for real time collaboration in a domain.

" + "documentation":"

Creates a private space or a space used for real time collaboration in a domain.

" }, "CreateStudioLifecycleConfig":{ "name":"CreateStudioLifecycleConfig", @@ -1068,7 +1124,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceNotFound"} ], - "documentation":"

Delete a hub.

Hub APIs are only callable through SageMaker Studio.

" + "documentation":"

Delete a hub.

" }, "DeleteHubContent":{ "name":"DeleteHubContent", @@ -1081,7 +1137,19 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceNotFound"} ], - "documentation":"

Delete the contents of a hub.

Hub APIs are only callable through SageMaker Studio.

" + "documentation":"

Delete the contents of a hub.

" + }, + "DeleteHubContentReference":{ + "name":"DeleteHubContentReference", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHubContentReferenceRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Delete a hub content reference in order to remove a model from a private hub.

" }, "DeleteHumanTaskUi":{ "name":"DeleteHumanTaskUi", @@ -1156,6 +1224,19 @@ ], "documentation":"

Deletes an inference experiment.

This operation does not delete your endpoint, variants, or any underlying resources. This operation only deletes the metadata of your experiment.

" }, + "DeleteMlflowTrackingServer":{ + "name":"DeleteMlflowTrackingServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMlflowTrackingServerRequest"}, + "output":{"shape":"DeleteMlflowTrackingServerResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes an MLflow Tracking Server. For more information, see Clean up MLflow resources.

" + }, "DeleteModel":{ "name":"DeleteModel", "http":{ @@ -1277,6 +1358,18 @@ "input":{"shape":"DeleteNotebookInstanceLifecycleConfigInput"}, "documentation":"

Deletes a notebook instance lifecycle configuration.

" }, + "DeleteOptimizationJob":{ + "name":"DeleteOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptimizationJobRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes an optimization job.

" + }, "DeletePipeline":{ "name":"DeletePipeline", "http":{ @@ -1386,7 +1479,7 @@ }, "input":{"shape":"DeleteWorkforceRequest"}, "output":{"shape":"DeleteWorkforceResponse"}, - "documentation":"

Use this operation to delete a workforce.

If you want to create a new workforce in an Amazon Web Services Region where a workforce already exists, use this operation to delete the existing workforce and then use CreateWorkforce to create a new workforce.

If a private workforce contains one or more work teams, you must use the DeleteWorkteam operation to delete all work teams before you delete the workforce. If you try to delete a workforce that contains one or more work teams, you will recieve a ResourceInUse error.

" + "documentation":"

Use this operation to delete a workforce.

If you want to create a new workforce in an Amazon Web Services Region where a workforce already exists, use this operation to delete the existing workforce and then use CreateWorkforce to create a new workforce.

If a private workforce contains one or more work teams, you must use the DeleteWorkteam operation to delete all work teams before you delete the workforce. If you try to delete a workforce that contains one or more work teams, you will receive a ResourceInUse error.

" }, "DeleteWorkteam":{ "name":"DeleteWorkteam", @@ -1522,7 +1615,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Retrieves information of an instance (also called a node interchangeably) of a SageMaker HyperPod cluster.

" + "documentation":"

Retrieves information of a node (also called a instance interchangeably) of a SageMaker HyperPod cluster.

" }, "DescribeCodeRepository":{ "name":"DescribeCodeRepository", @@ -1721,7 +1814,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Describe a hub.

Hub APIs are only callable through SageMaker Studio.

" + "documentation":"

Describes a hub.

" }, "DescribeHubContent":{ "name":"DescribeHubContent", @@ -1734,7 +1827,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Describe the content of a hub.

Hub APIs are only callable through SageMaker Studio.

" + "documentation":"

Describe the content of a hub.

" }, "DescribeHumanTaskUi":{ "name":"DescribeHumanTaskUi", @@ -1850,6 +1943,19 @@ ], "documentation":"

Provides a list of properties for the requested lineage group. For more information, see Cross-Account Lineage Tracking in the Amazon SageMaker Developer Guide.

" }, + "DescribeMlflowTrackingServer":{ + "name":"DescribeMlflowTrackingServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMlflowTrackingServerRequest"}, + "output":{"shape":"DescribeMlflowTrackingServerResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Returns information about an MLflow Tracking Server.

" + }, "DescribeModel":{ "name":"DescribeModel", "http":{ @@ -1920,7 +2026,7 @@ }, "input":{"shape":"DescribeModelPackageInput"}, "output":{"shape":"DescribeModelPackageOutput"}, - "documentation":"

Returns a description of the specified model package, which is used to create SageMaker models or list them on Amazon Web Services Marketplace.

To create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services Marketplace.

" + "documentation":"

Returns a description of the specified model package, which is used to create SageMaker models or list them on Amazon Web Services Marketplace.

If you provided a KMS Key ID when you created your model package, you will see the KMS Decrypt API call in your CloudTrail logs when you use this API.

To create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services Marketplace.

" }, "DescribeModelPackageGroup":{ "name":"DescribeModelPackageGroup", @@ -1978,6 +2084,19 @@ "output":{"shape":"DescribeNotebookInstanceLifecycleConfigOutput"}, "documentation":"

Returns a description of a notebook instance lifecycle configuration.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

" }, + "DescribeOptimizationJob":{ + "name":"DescribeOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptimizationJobRequest"}, + "output":{"shape":"DescribeOptimizationJobResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Provides the properties of the specified optimization job.

" + }, "DescribePipeline":{ "name":"DescribePipeline", "http":{ @@ -2160,7 +2279,7 @@ }, "input":{"shape":"DescribeWorkteamRequest"}, "output":{"shape":"DescribeWorkteamResponse"}, - "documentation":"

Gets information about a specific work team. You can see information such as the create date, the last updated date, membership information, and the work team's Amazon Resource Name (ARN).

" + "documentation":"

Gets information about a specific work team. You can see information such as the creation date, the last updated date, membership information, and the work team's Amazon Resource Name (ARN).

" }, "DisableSagemakerServicecatalogPortfolio":{ "name":"DisableSagemakerServicecatalogPortfolio", @@ -2274,7 +2393,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceNotFound"} ], - "documentation":"

Import hub content.

Hub APIs are only callable through SageMaker Studio.

" + "documentation":"

Import hub content.

" }, "ListActions":{ "name":"ListActions", @@ -2558,7 +2677,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

List hub content versions.

Hub APIs are only callable through SageMaker Studio.

" + "documentation":"

List hub content versions.

" }, "ListHubContents":{ "name":"ListHubContents", @@ -2571,7 +2690,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

List the contents of a hub.

Hub APIs are only callable through SageMaker Studio.

" + "documentation":"

List the contents of a hub.

" }, "ListHubs":{ "name":"ListHubs", @@ -2581,7 +2700,7 @@ }, "input":{"shape":"ListHubsRequest"}, "output":{"shape":"ListHubsResponse"}, - "documentation":"

List all existing hubs.

Hub APIs are only callable through SageMaker Studio.

" + "documentation":"

List all existing hubs.

" }, "ListHumanTaskUis":{ "name":"ListHumanTaskUis", @@ -2702,6 +2821,16 @@ "output":{"shape":"ListLineageGroupsResponse"}, "documentation":"

A list of lineage groups shared with your Amazon Web Services account. For more information, see Cross-Account Lineage Tracking in the Amazon SageMaker Developer Guide.

" }, + "ListMlflowTrackingServers":{ + "name":"ListMlflowTrackingServers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMlflowTrackingServersRequest"}, + "output":{"shape":"ListMlflowTrackingServersResponse"}, + "documentation":"

Lists all MLflow Tracking Servers.

" + }, "ListModelBiasJobDefinitions":{ "name":"ListModelBiasJobDefinitions", "http":{ @@ -2871,6 +3000,16 @@ "output":{"shape":"ListNotebookInstancesOutput"}, "documentation":"

Returns a list of the SageMaker notebook instances in the requester's account in an Amazon Web Services Region.

" }, + "ListOptimizationJobs":{ + "name":"ListOptimizationJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOptimizationJobsRequest"}, + "output":{"shape":"ListOptimizationJobsResponse"}, + "documentation":"

Lists the optimization jobs in your account and their properties.

" + }, "ListPipelineExecutionSteps":{ "name":"ListPipelineExecutionSteps", "http":{ @@ -3221,6 +3360,20 @@ ], "documentation":"

Starts an inference experiment.

" }, + "StartMlflowTrackingServer":{ + "name":"StartMlflowTrackingServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartMlflowTrackingServerRequest"}, + "output":{"shape":"StartMlflowTrackingServerResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ConflictException"} + ], + "documentation":"

Programmatically start an MLflow Tracking Server.

" + }, "StartMonitoringSchedule":{ "name":"StartMonitoringSchedule", "http":{ @@ -3352,6 +3505,20 @@ ], "documentation":"

Stops a running labeling job. A job that is stopped cannot be restarted. Any results obtained before the job is stopped are placed in the Amazon S3 output bucket.

" }, + "StopMlflowTrackingServer":{ + "name":"StopMlflowTrackingServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopMlflowTrackingServerRequest"}, + "output":{"shape":"StopMlflowTrackingServerResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ConflictException"} + ], + "documentation":"

Programmatically stop an MLflow Tracking Server.

" + }, "StopMonitoringSchedule":{ "name":"StopMonitoringSchedule", "http":{ @@ -3373,6 +3540,18 @@ "input":{"shape":"StopNotebookInstanceInput"}, "documentation":"

Terminates the ML compute instance. Before terminating the instance, SageMaker disconnects the ML storage volume from it. SageMaker preserves the ML storage volume. SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance.

To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work.

" }, + "StopOptimizationJob":{ + "name":"StopOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopOptimizationJobRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Ends a running inference optimization job.

" + }, "StopPipelineExecution":{ "name":"StopPipelineExecution", "http":{ @@ -3633,7 +3812,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Update a hub.

Hub APIs are only callable through SageMaker Studio.

" + "documentation":"

Update a hub.

" }, "UpdateImage":{ "name":"UpdateImage", @@ -3703,6 +3882,21 @@ ], "documentation":"

Updates an inference experiment that you created. The status of the inference experiment has to be either Created, Running. For more information on the status of an inference experiment, see DescribeInferenceExperiment.

" }, + "UpdateMlflowTrackingServer":{ + "name":"UpdateMlflowTrackingServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMlflowTrackingServerRequest"}, + "output":{"shape":"UpdateMlflowTrackingServerResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ResourceLimitExceeded"}, + {"shape":"ConflictException"} + ], + "documentation":"

Updates properties of an existing MLflow Tracking Server.

" + }, "UpdateModelCard":{ "name":"UpdateModelCard", "http":{ @@ -4120,6 +4314,33 @@ "max":15, "min":1 }, + "AdditionalModelChannelName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[A-Za-z0-9\\.\\-_]+" + }, + "AdditionalModelDataSource":{ + "type":"structure", + "required":[ + "ChannelName", + "S3DataSource" + ], + "members":{ + "ChannelName":{ + "shape":"AdditionalModelChannelName", + "documentation":"

A custom name for this AdditionalModelDataSource object.

" + }, + "S3DataSource":{"shape":"S3ModelDataSource"} + }, + "documentation":"

Data sources that are available to your model in addition to the one that you specify for ModelDataSource when you use the CreateModel action.

" + }, + "AdditionalModelDataSources":{ + "type":"list", + "member":{"shape":"AdditionalModelDataSource"}, + "max":5, + "min":0 + }, "AdditionalS3DataSource":{ "type":"structure", "required":[ @@ -4396,6 +4617,20 @@ }, "documentation":"

Specifies configurations for one or more training jobs that SageMaker runs to test the algorithm.

" }, + "AmazonQSettings":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"FeatureStatus", + "documentation":"

Whether Amazon Q has been enabled within the domain.

" + }, + "QProfileArn":{ + "shape":"QProfileArn", + "documentation":"

The ARN of the Amazon Q profile used within the domain.

" + } + }, + "documentation":"

A collection of settings that configure the Amazon Q experience within the domain.

" + }, "AnnotationConsolidationConfig":{ "type":"structure", "required":["AnnotationConsolidationLambdaArn"], @@ -4938,6 +5173,11 @@ }, "documentation":"

Lists a summary of the properties of an association. An association is an entity that links other lineage or experiment entities. An example would be an association between a training job and a model.

" }, + "AssumableRoleArns":{ + "type":"list", + "member":{"shape":"RoleArn"}, + "max":5 + }, "AsyncInferenceClientConfig":{ "type":"structure", "members":{ @@ -5109,6 +5349,22 @@ "IAM" ] }, + "AuthenticationRequestExtraParams":{ + "type":"map", + "key":{"shape":"AuthenticationRequestExtraParamsKey"}, + "value":{"shape":"AuthenticationRequestExtraParamsValue"}, + "max":10 + }, + "AuthenticationRequestExtraParamsKey":{ + "type":"string", + "max":512, + "pattern":".*" + }, + "AuthenticationRequestExtraParamsValue":{ + "type":"string", + "max":512, + "pattern":".*" + }, "AutoGenerateEndpointName":{"type":"boolean"}, "AutoMLAlgorithm":{ "type":"string", @@ -5121,7 +5377,13 @@ "randomforest", "extra-trees", "nn-torch", - "fastai" + "fastai", + "cnn-qr", + "deepar", + "prophet", + "npts", + "arima", + "ets" ] }, "AutoMLAlgorithmConfig":{ @@ -5130,10 +5392,10 @@ "members":{ "AutoMLAlgorithms":{ "shape":"AutoMLAlgorithms", - "documentation":"

The selection of algorithms run on a dataset to train the model candidates of an Autopilot job.

Selected algorithms must belong to the list corresponding to the training mode set in AutoMLJobConfig.Mode (ENSEMBLING or HYPERPARAMETER_TUNING). Choose a minimum of 1 algorithm.

  • In ENSEMBLING mode:

    • \"catboost\"

    • \"extra-trees\"

    • \"fastai\"

    • \"lightgbm\"

    • \"linear-learner\"

    • \"nn-torch\"

    • \"randomforest\"

    • \"xgboost\"

  • In HYPERPARAMETER_TUNING mode:

    • \"linear-learner\"

    • \"mlp\"

    • \"xgboost\"

" + "documentation":"

The selection of algorithms trained on your dataset to generate the model candidates for an Autopilot job.

  • For the tabular problem type TabularJobConfig:

    Selected algorithms must belong to the list corresponding to the training mode set in AutoMLJobConfig.Mode (ENSEMBLING or HYPERPARAMETER_TUNING). Choose a minimum of 1 algorithm.

    • In ENSEMBLING mode:

      • \"catboost\"

      • \"extra-trees\"

      • \"fastai\"

      • \"lightgbm\"

      • \"linear-learner\"

      • \"nn-torch\"

      • \"randomforest\"

      • \"xgboost\"

    • In HYPERPARAMETER_TUNING mode:

      • \"linear-learner\"

      • \"mlp\"

      • \"xgboost\"

  • For the time-series forecasting problem type TimeSeriesForecastingJobConfig:

    • Choose your algorithms from this list.

      • \"cnn-qr\"

      • \"deepar\"

      • \"prophet\"

      • \"arima\"

      • \"npts\"

      • \"ets\"

" } }, - "documentation":"

The collection of algorithms run on a dataset for training the model candidates of an Autopilot job.

" + "documentation":"

The selection of algorithms trained on your dataset to generate the model candidates for an Autopilot job.

" }, "AutoMLAlgorithms":{ "type":"list", @@ -5213,7 +5475,7 @@ }, "AlgorithmsConfig":{ "shape":"AutoMLAlgorithmsConfig", - "documentation":"

Stores the configuration information for the selection of algorithms used to train the model candidates.

The list of available algorithms to choose from depends on the training mode set in AutoMLJobConfig.Mode .

  • AlgorithmsConfig should not be set in AUTO training mode.

  • When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only.

    If the list of algorithms provided as values for AutoMLAlgorithms is empty, AutoMLCandidateGenerationConfig uses the full set of algorithms for the given training mode.

  • When AlgorithmsConfig is not provided, AutoMLCandidateGenerationConfig uses the full set of algorithms for the given training mode.

For the list of all algorithms per training mode, see AutoMLAlgorithmConfig.

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" + "documentation":"

Stores the configuration information for the selection of algorithms trained on tabular data.

The list of available algorithms to choose from depends on the training mode set in TabularJobConfig.Mode .

  • AlgorithmsConfig should not be set if the training mode is set on AUTO.

  • When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only.

    If the list of algorithms provided as values for AutoMLAlgorithms is empty, CandidateGenerationConfig uses the full set of algorithms for the given training mode.

  • When AlgorithmsConfig is not provided, CandidateGenerationConfig uses the full set of algorithms for the given training mode.

For the list of all algorithms per problem type and training mode, see AutoMLAlgorithmConfig.

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" } }, "documentation":"

Stores the configuration information for how a candidate is generated (optional).

" @@ -5283,6 +5545,16 @@ "validation" ] }, + "AutoMLComputeConfig":{ + "type":"structure", + "members":{ + "EmrServerlessComputeConfig":{ + "shape":"EmrServerlessComputeConfig", + "documentation":"

The configuration for using EMR Serverless to run the AutoML job V2.

To allow your AutoML job V2 to automatically initiate a remote job on EMR Serverless when additional compute resources are needed to process large datasets, you need to provide an EmrServerlessComputeConfig object, which includes an ExecutionRoleARN attribute, to the AutoMLComputeConfig of the AutoML job V2 input request.

By seamlessly transitioning to EMR Serverless when required, the AutoML job can handle datasets that would otherwise exceed the initially provisioned resources, without any manual intervention from you.

EMR Serverless is available for the tabular and time series problem types. We recommend setting up this option for tabular datasets larger than 5 GB and time series datasets larger than 30 GB.

" + } + }, + "documentation":"

This data type is intended for use exclusively by SageMaker Canvas and cannot be used in other contexts at the moment.

Specifies the compute configuration for an AutoML job V2.

" + }, "AutoMLContainerDefinition":{ "type":"structure", "required":[ @@ -5654,7 +5926,7 @@ }, "S3OutputPath":{ "shape":"S3Uri", - "documentation":"

The Amazon S3 output path. Must be 128 characters or less.

" + "documentation":"

The Amazon S3 output path. Must be 512 characters or less.

" } }, "documentation":"

The output data configuration.

" @@ -6181,7 +6453,7 @@ "members":{ "AlgorithmsConfig":{ "shape":"AutoMLAlgorithmsConfig", - "documentation":"

Stores the configuration information for the selection of algorithms used to train model candidates on tabular data.

The list of available algorithms to choose from depends on the training mode set in TabularJobConfig.Mode .

  • AlgorithmsConfig should not be set in AUTO training mode.

  • When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only.

    If the list of algorithms provided as values for AutoMLAlgorithms is empty, CandidateGenerationConfig uses the full set of algorithms for the given training mode.

  • When AlgorithmsConfig is not provided, CandidateGenerationConfig uses the full set of algorithms for the given training mode.

For the list of all algorithms per problem type and training mode, see AutoMLAlgorithmConfig.

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" + "documentation":"

Your Autopilot job trains a default set of algorithms on your dataset. For tabular and time-series data, you can customize the algorithm list by selecting a subset of algorithms for your problem type.

AlgorithmsConfig stores the customized selection of algorithms to train on your data.

  • For the tabular problem type TabularJobConfig, the list of available algorithms to choose from depends on the training mode set in AutoMLJobConfig.Mode .

    • AlgorithmsConfig should not be set when the training mode AutoMLJobConfig.Mode is set to AUTO.

    • When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only.

      If the list of algorithms provided as values for AutoMLAlgorithms is empty, CandidateGenerationConfig uses the full set of algorithms for the given training mode.

    • When AlgorithmsConfig is not provided, CandidateGenerationConfig uses the full set of algorithms for the given training mode.

    For the list of all algorithms per training mode, see AlgorithmConfig.

    For more information on each algorithm, see the Algorithm support section in the Autopilot developer guide.

  • For the time-series forecasting problem type TimeSeriesForecastingJobConfig, choose your algorithms from the list provided in AlgorithmConfig.

    For more information on each algorithm, see the Algorithms support for time-series forecasting section in the Autopilot developer guide.

    • When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only.

      If the list of algorithms provided as values for AutoMLAlgorithms is empty, CandidateGenerationConfig uses the full set of algorithms for time-series forecasting.

    • When AlgorithmsConfig is not provided, CandidateGenerationConfig uses the full set of algorithms for time-series forecasting.

" } }, "documentation":"

Stores the configuration information for how model candidates are generated using an AutoML job V2.

" @@ -6276,6 +6548,10 @@ "GenerativeAiSettings":{ "shape":"GenerativeAiSettings", "documentation":"

The generative AI settings for the SageMaker Canvas application.

" + }, + "EmrServerlessSettings":{ + "shape":"EmrServerlessSettings", + "documentation":"

The settings for running Amazon EMR Serverless data processing jobs in SageMaker Canvas.

" } }, "documentation":"

The SageMaker Canvas application settings.

" @@ -6910,6 +7186,30 @@ "max":256, "pattern":"^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:cluster/[a-z0-9]{12}$" }, + "ClusterAvailabilityZone":{ + "type":"string", + "pattern":"^[a-z]{2}-[a-z]+-\\d[a-z]$" + }, + "ClusterAvailabilityZoneId":{ + "type":"string", + "pattern":"^[a-z]{3}\\d-az\\d$" + }, + "ClusterEbsVolumeConfig":{ + "type":"structure", + "required":["VolumeSizeInGB"], + "members":{ + "VolumeSizeInGB":{ + "shape":"ClusterEbsVolumeSizeInGB", + "documentation":"

The size in gigabytes (GB) of the additional EBS volume to be attached to the instances in the SageMaker HyperPod cluster instance group. The additional EBS volume is attached to each instance within the SageMaker HyperPod cluster instance group and mounted to /opt/sagemaker.

" + } + }, + "documentation":"

Defines the configuration for attaching an additional Amazon Elastic Block Store (EBS) volume to each instance of the SageMaker HyperPod cluster instance group. To learn more, see SageMaker HyperPod release notes: June 20, 2024.

" + }, + "ClusterEbsVolumeSizeInGB":{ + "type":"integer", + "max":16384, + "min":1 + }, "ClusterInstanceCount":{ "type":"integer", "min":1 @@ -6944,6 +7244,10 @@ "ThreadsPerCore":{ "shape":"ClusterThreadsPerCore", "documentation":"

The number you specified to TreadsPerCore in CreateCluster for enabling or disabling multithreading. For instance types that support multithreading, you can specify 1 for disabling multithreading and 2 for enabling multithreading. For more information, see the reference table of CPU cores and threads per CPU core per instance type in the Amazon Elastic Compute Cloud User Guide.

" + }, + "InstanceStorageConfigs":{ + "shape":"ClusterInstanceStorageConfigs", + "documentation":"

The additional storage configurations for the instances in the SageMaker HyperPod cluster instance group.

" } }, "documentation":"

Details of an instance group in a SageMaker HyperPod cluster.

" @@ -6991,6 +7295,10 @@ "ThreadsPerCore":{ "shape":"ClusterThreadsPerCore", "documentation":"

Specifies the value for Threads per core. For instance types that support multithreading, you can specify 1 for disabling multithreading and 2 for enabling multithreading. For instance types that doesn't support multithreading, specify 1. For more information, see the reference table of CPU cores and threads per CPU core per instance type in the Amazon Elastic Compute Cloud User Guide.

" + }, + "InstanceStorageConfigs":{ + "shape":"ClusterInstanceStorageConfigs", + "documentation":"

Specifies the additional storage configurations for the instances in the SageMaker HyperPod cluster instance group.

" } }, "documentation":"

The specifications of an instance group that you need to define.

" @@ -6998,9 +7306,23 @@ "ClusterInstanceGroupSpecifications":{ "type":"list", "member":{"shape":"ClusterInstanceGroupSpecification"}, - "max":5, + "max":20, "min":1 }, + "ClusterInstancePlacement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"ClusterAvailabilityZone", + "documentation":"

The Availability Zone where the node in the SageMaker HyperPod cluster is launched.

" + }, + "AvailabilityZoneId":{ + "shape":"ClusterAvailabilityZoneId", + "documentation":"

The unique identifier (ID) of the Availability Zone where the node in the SageMaker HyperPod cluster is launched.

" + } + }, + "documentation":"

Specifies the placement details for the node in the SageMaker HyperPod cluster, including the Availability Zone and the unique identifier (ID) of the Availability Zone.

" + }, "ClusterInstanceStatus":{ "type":"string", "enum":[ @@ -7026,6 +7348,22 @@ }, "documentation":"

Details of an instance in a SageMaker HyperPod cluster.

" }, + "ClusterInstanceStorageConfig":{ + "type":"structure", + "members":{ + "EbsVolumeConfig":{ + "shape":"ClusterEbsVolumeConfig", + "documentation":"

Defines the configuration for attaching additional Amazon Elastic Block Store (EBS) volumes to the instances in the SageMaker HyperPod cluster instance group. The additional EBS volume is attached to each instance within the SageMaker HyperPod cluster instance group and mounted to /opt/sagemaker.

" + } + }, + "documentation":"

Defines the configuration for attaching additional storage to the instances in the SageMaker HyperPod cluster instance group. To learn more, see SageMaker HyperPod release notes: June 20, 2024.

", + "union":true + }, + "ClusterInstanceStorageConfigs":{ + "type":"list", + "member":{"shape":"ClusterInstanceStorageConfig"}, + "max":1 + }, "ClusterInstanceType":{ "type":"string", "enum":[ @@ -7134,6 +7472,22 @@ "ThreadsPerCore":{ "shape":"ClusterThreadsPerCore", "documentation":"

The number of threads per CPU core you specified under CreateCluster.

" + }, + "InstanceStorageConfigs":{ + "shape":"ClusterInstanceStorageConfigs", + "documentation":"

The configurations of additional storage specified to the instance group where the instance (node) is launched.

" + }, + "PrivatePrimaryIp":{ + "shape":"ClusterPrivatePrimaryIp", + "documentation":"

The private primary IP address of the SageMaker HyperPod cluster node.

" + }, + "PrivateDnsHostname":{ + "shape":"ClusterPrivateDnsHostname", + "documentation":"

The private DNS hostname of the SageMaker HyperPod cluster node.

" + }, + "Placement":{ + "shape":"ClusterInstancePlacement", + "documentation":"

The placement details of the SageMaker HyperPod cluster node.

" } }, "documentation":"

Details of an instance (also called a node interchangeably) in a SageMaker HyperPod cluster.

" @@ -7142,7 +7496,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z][-a-zA-Z0-9]*$" + "pattern":"^i-[a-f0-9]{8}(?:[a-f0-9]{9})?$" }, "ClusterNodeSummaries":{ "type":"list", @@ -7185,6 +7539,14 @@ "type":"integer", "min":0 }, + "ClusterPrivateDnsHostname":{ + "type":"string", + "pattern":"^ip-((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)-?\\b){4}\\..*$" + }, + "ClusterPrivatePrimaryIp":{ + "type":"string", + "pattern":"^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}$" + }, "ClusterSortBy":{ "type":"string", "enum":[ @@ -7650,6 +8012,10 @@ "shape":"ModelDataSource", "documentation":"

Specifies the location of ML model data to deploy.

Currently you cannot use ModelDataSource in conjunction with SageMaker batch transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and SageMaker Marketplace.

" }, + "AdditionalModelDataSources":{ + "shape":"AdditionalModelDataSources", + "documentation":"

Data sources that are available to your model in addition to the one that you specify for ModelDataSource when you use the CreateModel action.

" + }, "Environment":{ "shape":"EnvironmentMap", "documentation":"

The environment variables to set in the Docker container.

The maximum length of each key and value in the Environment map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a CreateModel request, then the maximum length of all of their maps, combined, is also 32 KB.

" @@ -8196,6 +8562,10 @@ "DataSplitConfig":{ "shape":"AutoMLDataSplitConfig", "documentation":"

This structure specifies how to split the data into train and validation datasets.

The validation and training datasets must contain the same headers. For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB in size.

This attribute must not be set for the time-series forecasting problem type, as Autopilot automatically splits the input dataset into training and validation sets.

" + }, + "AutoMLComputeConfig":{ + "shape":"AutoMLComputeConfig", + "documentation":"

Specifies the compute configuration for the AutoML job V2.

" } } }, @@ -8858,6 +9228,52 @@ } } }, + "CreateHubContentReferenceRequest":{ + "type":"structure", + "required":[ + "HubName", + "SageMakerPublicHubContentArn" + ], + "members":{ + "HubName":{ + "shape":"HubNameOrArn", + "documentation":"

The name of the hub to add the hub content reference to.

" + }, + "SageMakerPublicHubContentArn":{ + "shape":"SageMakerPublicHubContentArn", + "documentation":"

The ARN of the public hub content to reference.

" + }, + "HubContentName":{ + "shape":"HubContentName", + "documentation":"

The name of the hub content to reference.

" + }, + "MinVersion":{ + "shape":"HubContentVersion", + "documentation":"

The minimum version of the hub content to reference.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags associated with the hub content to reference.

" + } + } + }, + "CreateHubContentReferenceResponse":{ + "type":"structure", + "required":[ + "HubArn", + "HubContentArn" + ], + "members":{ + "HubArn":{ + "shape":"HubArn", + "documentation":"

The ARN of the hub that the hub content reference was added to.

" + }, + "HubContentArn":{ + "shape":"HubContentArn", + "documentation":"

The ARN of the hub content.

" + } + } + }, "CreateHubRequest":{ "type":"structure", "required":[ @@ -9307,6 +9723,57 @@ } } }, + "CreateMlflowTrackingServerRequest":{ + "type":"structure", + "required":[ + "TrackingServerName", + "ArtifactStoreUri", + "RoleArn" + ], + "members":{ + "TrackingServerName":{ + "shape":"TrackingServerName", + "documentation":"

A unique string identifying the tracking server name. This string is part of the tracking server ARN.

" + }, + "ArtifactStoreUri":{ + "shape":"S3Uri", + "documentation":"

The S3 URI for a general purpose bucket to use as the MLflow Tracking Server artifact store.

" + }, + "TrackingServerSize":{ + "shape":"TrackingServerSize", + "documentation":"

The size of the tracking server you want to create. You can choose between \"Small\", \"Medium\", and \"Large\". The default MLflow Tracking Server configuration size is \"Small\". You can choose a size depending on the projected use of the tracking server such as the volume of data logged, number of users, and frequency of use.

We recommend using a small tracking server for teams of up to 25 users, a medium tracking server for teams of up to 50 users, and a large tracking server for teams of up to 100 users.

" + }, + "MlflowVersion":{ + "shape":"MlflowVersion", + "documentation":"

The version of MLflow that the tracking server uses. To see which MLflow versions are available to use, see How it works.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) for an IAM role in your account that the MLflow Tracking Server uses to access the artifact store in Amazon S3. The role should have AmazonS3FullAccess permissions. For more information on IAM permissions for tracking server creation, see Set up IAM permissions for MLflow.

" + }, + "AutomaticModelRegistration":{ + "shape":"Boolean", + "documentation":"

Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. To enable automatic model registration, set this value to True. To disable automatic model registration, set this value to False. If not specified, AutomaticModelRegistration defaults to False.

" + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. For example: TUE:03:30.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Tags consisting of key-value pairs used to manage metadata for the tracking server.

" + } + } + }, + "CreateMlflowTrackingServerResponse":{ + "type":"structure", + "members":{ + "TrackingServerArn":{ + "shape":"TrackingServerArn", + "documentation":"

The ARN of the tracking server.

" + } + } + }, "CreateModelBiasJobDefinitionRequest":{ "type":"structure", "required":[ @@ -9647,6 +10114,14 @@ "SourceUri":{ "shape":"ModelPackageSourceUri", "documentation":"

The URI of the source for the model package. If you want to clone a model package, set it to the model package Amazon Resource Name (ARN). If you want to register a model, set it to the model ARN.

" + }, + "SecurityConfig":{ + "shape":"ModelPackageSecurityConfig", + "documentation":"

The KMS Key ID (KMSKeyId) used for encryption of model package information.

" + }, + "ModelCard":{ + "shape":"ModelPackageModelCard", + "documentation":"

The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model package model card schema, see Model package model card schema. For more information about the model card associated with the model package, see View the Details of a Model Version.

" } } }, @@ -9855,6 +10330,67 @@ } } }, + "CreateOptimizationJobRequest":{ + "type":"structure", + "required":[ + "OptimizationJobName", + "RoleArn", + "ModelSource", + "DeploymentInstanceType", + "OptimizationConfigs", + "OutputConfig", + "StoppingCondition" + ], + "members":{ + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

A custom name for the new optimization job.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.

During model optimization, Amazon SageMaker needs your permission to:

  • Read input data from an S3 bucket

  • Write model artifacts to an S3 bucket

  • Write logs to Amazon CloudWatch Logs

  • Publish metrics to Amazon CloudWatch

You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles.

" + }, + "ModelSource":{ + "shape":"OptimizationJobModelSource", + "documentation":"

The location of the source model to optimize with an optimization job.

" + }, + "DeploymentInstanceType":{ + "shape":"OptimizationJobDeploymentInstanceType", + "documentation":"

The type of instance that hosts the optimized model that you create with the optimization job.

" + }, + "OptimizationEnvironment":{ + "shape":"OptimizationJobEnvironmentVariables", + "documentation":"

The environment variables to set in the model container.

" + }, + "OptimizationConfigs":{ + "shape":"OptimizationConfigs", + "documentation":"

Settings for each of the optimization techniques that the job applies.

" + }, + "OutputConfig":{ + "shape":"OptimizationJobOutputConfig", + "documentation":"

Details for where to store the optimized model that you create with the optimization job.

" + }, + "StoppingCondition":{"shape":"StoppingCondition"}, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of key-value pairs associated with the optimization job. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

" + }, + "VpcConfig":{ + "shape":"OptimizationVpcConfig", + "documentation":"

A VPC in Amazon VPC that your optimized model has access to.

" + } + } + }, + "CreateOptimizationJobResponse":{ + "type":"structure", + "required":["OptimizationJobArn"], + "members":{ + "OptimizationJobArn":{ + "shape":"OptimizationJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the optimization job.

" + } + } + }, "CreatePipelineRequest":{ "type":"structure", "required":[ @@ -9953,6 +10489,33 @@ } } }, + "CreatePresignedMlflowTrackingServerUrlRequest":{ + "type":"structure", + "required":["TrackingServerName"], + "members":{ + "TrackingServerName":{ + "shape":"TrackingServerName", + "documentation":"

The name of the tracking server to connect to your MLflow UI.

" + }, + "ExpiresInSeconds":{ + "shape":"ExpiresInSeconds", + "documentation":"

The duration in seconds that your presigned URL is valid. The presigned URL can be used only once.

" + }, + "SessionExpirationDurationInSeconds":{ + "shape":"SessionExpirationDurationInSeconds", + "documentation":"

The duration in seconds that your MLflow UI session is valid.

" + } + } + }, + "CreatePresignedMlflowTrackingServerUrlResponse":{ + "type":"structure", + "members":{ + "AuthorizedUrl":{ + "shape":"TrackingServerUrl", + "documentation":"

A presigned URL with an authorization token.

" + } + } + }, "CreatePresignedNotebookInstanceUrlInput":{ "type":"structure", "required":["NotebookInstanceName"], @@ -10541,6 +11104,10 @@ "shape":"NotificationConfiguration", "documentation":"

Configures notification of workers regarding available or expiring work items.

" }, + "WorkerAccessConfiguration":{ + "shape":"WorkerAccessConfiguration", + "documentation":"

Use this optional parameter to constrain access to an Amazon S3 resource based on the IP address using supported IAM global condition keys. The Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL.

" + }, "Tags":{ "shape":"TagList", "documentation":"

An array of key-value pairs.

For more information, see Resource Tag and Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

" @@ -10601,12 +11168,12 @@ "CustomFileSystemConfigs":{ "type":"list", "member":{"shape":"CustomFileSystemConfig"}, - "max":2 + "max":10 }, "CustomFileSystems":{ "type":"list", "member":{"shape":"CustomFileSystem"}, - "max":1 + "max":5 }, "CustomImage":{ "type":"structure", @@ -11403,6 +11970,28 @@ "members":{ } }, + "DeleteHubContentReferenceRequest":{ + "type":"structure", + "required":[ + "HubName", + "HubContentType", + "HubContentName" + ], + "members":{ + "HubName":{ + "shape":"HubNameOrArn", + "documentation":"

The name of the hub to delete the hub content reference from.

" + }, + "HubContentType":{ + "shape":"HubContentType", + "documentation":"

The type of hub content reference to delete. The only supported type of hub content reference to delete is ModelReference.

" + }, + "HubContentName":{ + "shape":"HubContentName", + "documentation":"

The name of the hub content to delete.

" + } + } + }, "DeleteHubContentRequest":{ "type":"structure", "required":[ @@ -11413,7 +12002,7 @@ ], "members":{ "HubName":{ - "shape":"HubName", + "shape":"HubNameOrArn", "documentation":"

The name of the hub that you want to delete content in.

" }, "HubContentType":{ @@ -11435,7 +12024,7 @@ "required":["HubName"], "members":{ "HubName":{ - "shape":"HubName", + "shape":"HubNameOrArn", "documentation":"

The name of the hub to delete.

" } } @@ -11533,6 +12122,25 @@ } } }, + "DeleteMlflowTrackingServerRequest":{ + "type":"structure", + "required":["TrackingServerName"], + "members":{ + "TrackingServerName":{ + "shape":"TrackingServerName", + "documentation":"

The name of the the tracking server to delete.

" + } + } + }, + "DeleteMlflowTrackingServerResponse":{ + "type":"structure", + "members":{ + "TrackingServerArn":{ + "shape":"TrackingServerArn", + "documentation":"

A TrackingServerArn object, the ARN of the tracking server that is deleted if successfully found.

" + } + } + }, "DeleteModelBiasJobDefinitionRequest":{ "type":"structure", "required":["JobDefinitionName"], @@ -11643,6 +12251,16 @@ } } }, + "DeleteOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

The name that you assigned to the optimization job.

" + } + } + }, "DeletePipelineRequest":{ "type":"structure", "required":[ @@ -12492,6 +13110,10 @@ "SecurityConfig":{ "shape":"AutoMLSecurityConfig", "documentation":"

Returns the security configuration for traffic encryption or Amazon VPC settings.

" + }, + "AutoMLComputeConfig":{ + "shape":"AutoMLComputeConfig", + "documentation":"

The compute configuration used for the AutoML job V2.

" } } }, @@ -12504,11 +13126,11 @@ "members":{ "ClusterName":{ "shape":"ClusterNameOrArn", - "documentation":"

The string name or the Amazon Resource Name (ARN) of the SageMaker HyperPod cluster in which the instance is.

" + "documentation":"

The string name or the Amazon Resource Name (ARN) of the SageMaker HyperPod cluster in which the node is.

" }, "NodeId":{ "shape":"ClusterNodeId", - "documentation":"

The ID of the instance.

" + "documentation":"

The ID of the SageMaker HyperPod cluster node.

" } } }, @@ -12518,7 +13140,7 @@ "members":{ "NodeDetails":{ "shape":"ClusterNodeDetails", - "documentation":"

The details of the instance.

" + "documentation":"

The details of the SageMaker HyperPod cluster node.

" } } }, @@ -13629,7 +14251,7 @@ ], "members":{ "HubName":{ - "shape":"HubName", + "shape":"HubNameOrArn", "documentation":"

The name of the hub that contains the content to describe.

" }, "HubContentType":{ @@ -13705,6 +14327,18 @@ "shape":"HubContentDocument", "documentation":"

The hub content document that describes information about the hub content such as type, associated containers, scripts, and more.

" }, + "SageMakerPublicHubContentArn":{ + "shape":"SageMakerPublicHubContentArn", + "documentation":"

The ARN of the public hub content.

" + }, + "ReferenceMinVersion":{ + "shape":"ReferenceMinVersion", + "documentation":"

The minimum version of the hub content.

" + }, + "SupportStatus":{ + "shape":"HubContentSupportStatus", + "documentation":"

The support status of the hub content.

" + }, "HubContentSearchKeywords":{ "shape":"HubContentSearchKeywordList", "documentation":"

The searchable keywords for the hub content.

" @@ -13732,7 +14366,7 @@ "required":["HubName"], "members":{ "HubName":{ - "shape":"HubName", + "shape":"HubNameOrArn", "documentation":"

The name of the hub to describe.

" } } @@ -14434,6 +15068,75 @@ "LastModifiedBy":{"shape":"UserContext"} } }, + "DescribeMlflowTrackingServerRequest":{ + "type":"structure", + "required":["TrackingServerName"], + "members":{ + "TrackingServerName":{ + "shape":"TrackingServerName", + "documentation":"

The name of the MLflow Tracking Server to describe.

" + } + } + }, + "DescribeMlflowTrackingServerResponse":{ + "type":"structure", + "members":{ + "TrackingServerArn":{ + "shape":"TrackingServerArn", + "documentation":"

The ARN of the described tracking server.

" + }, + "TrackingServerName":{ + "shape":"TrackingServerName", + "documentation":"

The name of the described tracking server.

" + }, + "ArtifactStoreUri":{ + "shape":"S3Uri", + "documentation":"

The S3 URI of the general purpose bucket used as the MLflow Tracking Server artifact store.

" + }, + "TrackingServerSize":{ + "shape":"TrackingServerSize", + "documentation":"

The size of the described tracking server.

" + }, + "MlflowVersion":{ + "shape":"MlflowVersion", + "documentation":"

The MLflow version used for the described tracking server.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) for an IAM role in your account that the described MLflow Tracking Server uses to access the artifact store in Amazon S3.

" + }, + "TrackingServerStatus":{ + "shape":"TrackingServerStatus", + "documentation":"

The current creation status of the described MLflow Tracking Server.

" + }, + "IsActive":{ + "shape":"IsTrackingServerActive", + "documentation":"

Whether the described MLflow Tracking Server is currently active.

" + }, + "TrackingServerUrl":{ + "shape":"TrackingServerUrl", + "documentation":"

The URL to connect to the MLflow user interface for the described tracking server.

" + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

The day and time of the week when weekly maintenance occurs on the described tracking server.

" + }, + "AutomaticModelRegistration":{ + "shape":"Boolean", + "documentation":"

Whether automatic registration of new MLflow models to the SageMaker Model Registry is enabled.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the described MLflow Tracking Server was created.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the described MLflow Tracking Server was last modified.

" + }, + "LastModifiedBy":{"shape":"UserContext"} + } + }, "DescribeModelBiasJobDefinitionRequest":{ "type":"structure", "required":["JobDefinitionName"], @@ -14906,6 +15609,14 @@ "SourceUri":{ "shape":"ModelPackageSourceUri", "documentation":"

The URI of the source for the model package.

" + }, + "SecurityConfig":{ + "shape":"ModelPackageSecurityConfig", + "documentation":"

The KMS Key ID (KMSKeyId) used for encryption of model package information.

" + }, + "ModelCard":{ + "shape":"ModelPackageModelCard", + "documentation":"

The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model package model card schema, see Model package model card schema. For more information about the model card associated with the model package, see View the Details of a Model Version.

" } } }, @@ -15174,6 +15885,99 @@ } } }, + "DescribeOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

The name that you assigned to the optimization job.

" + } + } + }, + "DescribeOptimizationJobResponse":{ + "type":"structure", + "required":[ + "OptimizationJobArn", + "OptimizationJobStatus", + "CreationTime", + "LastModifiedTime", + "OptimizationJobName", + "ModelSource", + "DeploymentInstanceType", + "OptimizationConfigs", + "OutputConfig", + "RoleArn", + "StoppingCondition" + ], + "members":{ + "OptimizationJobArn":{ + "shape":"OptimizationJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the optimization job.

" + }, + "OptimizationJobStatus":{ + "shape":"OptimizationJobStatus", + "documentation":"

The current status of the optimization job.

" + }, + "OptimizationStartTime":{ + "shape":"Timestamp", + "documentation":"

The time when the optimization job started.

" + }, + "OptimizationEndTime":{ + "shape":"Timestamp", + "documentation":"

The time when the optimization job finished processing.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

The time when you created the optimization job.

" + }, + "LastModifiedTime":{ + "shape":"LastModifiedTime", + "documentation":"

The time when the optimization job was last updated.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the optimization job status is FAILED, the reason for the failure.

" + }, + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

The name that you assigned to the optimization job.

" + }, + "ModelSource":{ + "shape":"OptimizationJobModelSource", + "documentation":"

The location of the source model to optimize with an optimization job.

" + }, + "OptimizationEnvironment":{ + "shape":"OptimizationJobEnvironmentVariables", + "documentation":"

The environment variables to set in the model container.

" + }, + "DeploymentInstanceType":{ + "shape":"OptimizationJobDeploymentInstanceType", + "documentation":"

The type of instance that hosts the optimized model that you create with the optimization job.

" + }, + "OptimizationConfigs":{ + "shape":"OptimizationConfigs", + "documentation":"

Settings for each of the optimization techniques that the job applies.

" + }, + "OutputConfig":{ + "shape":"OptimizationJobOutputConfig", + "documentation":"

Details for where to store the optimized model that you create with the optimization job.

" + }, + "OptimizationOutput":{ + "shape":"OptimizationOutput", + "documentation":"

Output values produced by an optimization job.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the IAM role that you assigned to the optimization job.

" + }, + "StoppingCondition":{"shape":"StoppingCondition"}, + "VpcConfig":{ + "shape":"OptimizationVpcConfig", + "documentation":"

A VPC in Amazon VPC that your optimized model has access to.

" + } + } + }, "DescribePipelineDefinitionForExecutionRequest":{ "type":"structure", "required":["PipelineExecutionArn"], @@ -16636,6 +17440,10 @@ "DockerSettings":{ "shape":"DockerSettings", "documentation":"

A collection of settings that configure the domain's Docker interaction.

" + }, + "AmazonQSettings":{ + "shape":"AmazonQSettings", + "documentation":"

A collection of settings that configure the Amazon Q experience within the domain. The AuthMode that you use to create the domain must be SSO.

" } }, "documentation":"

A collection of settings that apply to the SageMaker Domain. These settings are specified through the CreateDomain API call.

" @@ -16658,6 +17466,10 @@ "DockerSettings":{ "shape":"DockerSettings", "documentation":"

A collection of settings that configure the domain's Docker interaction.

" + }, + "AmazonQSettings":{ + "shape":"AmazonQSettings", + "documentation":"

A collection of settings that configure the Amazon Q experience within the domain.

" } }, "documentation":"

A collection of Domain configuration settings to update.

" @@ -17221,6 +18033,45 @@ "max":10, "pattern":"\\d+" }, + "EmrServerlessComputeConfig":{ + "type":"structure", + "required":["ExecutionRoleARN"], + "members":{ + "ExecutionRoleARN":{ + "shape":"RoleArn", + "documentation":"

The ARN of the IAM role granting the AutoML job V2 the necessary permissions access policies to list, connect to, or manage EMR Serverless jobs. For detailed information about the required permissions of this role, see \"How to configure AutoML to initiate a remote job on EMR Serverless for large datasets\" in Create a regression or classification job for tabular data using the AutoML API or Create an AutoML job for time-series forecasting using the API.

" + } + }, + "documentation":"

This data type is intended for use exclusively by SageMaker Canvas and cannot be used in other contexts at the moment.

Specifies the compute configuration for the EMR Serverless job.

" + }, + "EmrServerlessSettings":{ + "type":"structure", + "members":{ + "ExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services IAM role that is assumed for running Amazon EMR Serverless jobs in SageMaker Canvas. This role should have the necessary permissions to read and write data attached and a trust relationship with EMR Serverless.

" + }, + "Status":{ + "shape":"FeatureStatus", + "documentation":"

Describes whether Amazon EMR Serverless job capabilities are enabled or disabled in the SageMaker Canvas application.

" + } + }, + "documentation":"

The settings for running Amazon EMR Serverless jobs in SageMaker Canvas.

" + }, + "EmrSettings":{ + "type":"structure", + "members":{ + "AssumableRoleArns":{ + "shape":"AssumableRoleArns", + "documentation":"

An array of Amazon Resource Names (ARNs) of the IAM roles that the execution role of SageMaker can assume for performing operations or tasks related to Amazon EMR clusters or Amazon EMR Serverless applications. These roles define the permissions and access policies required when performing Amazon EMR-related operations, such as listing, connecting to, or terminating Amazon EMR clusters or Amazon EMR Serverless applications. They are typically used in cross-account access scenarios, where the Amazon EMR resources (clusters or serverless applications) are located in a different Amazon Web Services account than the SageMaker domain.

" + }, + "ExecutionRoleArns":{ + "shape":"ExecutionRoleArns", + "documentation":"

An array of Amazon Resource Names (ARNs) of the IAM roles used by the Amazon EMR cluster instances or job execution environments to access other Amazon Web Services services and resources needed during the runtime of your Amazon EMR or Amazon EMR Serverless workloads, such as Amazon S3 for data access, Amazon CloudWatch for logging, or other Amazon Web Services services based on the particular workload requirements.

" + } + }, + "documentation":"

The configuration parameters that specify the IAM roles assumed by the execution role of SageMaker (assumable roles) and the cluster instances or job execution environments (execution roles or runtime roles) to manage and access resources required for running Amazon EMR clusters or Amazon EMR Serverless applications.

" + }, "EnableCapture":{"type":"boolean"}, "EnableInfraCheck":{"type":"boolean"}, "EnableIotRoleAlias":{"type":"boolean"}, @@ -17236,6 +18087,13 @@ } }, "EnableSessionTagChaining":{"type":"boolean"}, + "EnabledOrDisabled":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, "Endpoint":{ "type":"structure", "required":[ @@ -17324,6 +18182,16 @@ "CreationTime" ] }, + "EndpointConfigStepMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"EndpointConfigArn", + "documentation":"

The Amazon Resource Name (ARN) of the endpoint configuration used in the step.

" + } + }, + "documentation":"

Metadata for an endpoint configuration step.

" + }, "EndpointConfigSummary":{ "type":"structure", "required":[ @@ -17542,6 +18410,16 @@ "UpdateRollbackFailed" ] }, + "EndpointStepMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"EndpointArn", + "documentation":"

The Amazon Resource Name (ARN) of the endpoint in the step.

" + } + }, + "documentation":"

Metadata for an endpoint step.

" + }, "EndpointSummary":{ "type":"structure", "required":[ @@ -17654,6 +18532,11 @@ "type":"string", "max":100 }, + "ExecutionRoleArns":{ + "type":"list", + "member":{"shape":"RoleArn"}, + "max":5 + }, "ExecutionRoleIdentityConfig":{ "type":"string", "enum":[ @@ -18790,6 +19673,14 @@ "max":10, "min":1 }, + "HiddenAppTypesList":{ + "type":"list", + "member":{"shape":"AppType"} + }, + "HiddenMlToolsList":{ + "type":"list", + "member":{"shape":"MlTools"} + }, "HolidayConfig":{ "type":"list", "member":{"shape":"HolidayConfigAttributes"}, @@ -18878,6 +19769,10 @@ "shape":"HubContentArn", "documentation":"

The Amazon Resource Name (ARN) of the hub content.

" }, + "SageMakerPublicHubContentArn":{ + "shape":"SageMakerPublicHubContentArn", + "documentation":"

The ARN of the public hub content.

" + }, "HubContentVersion":{ "shape":"HubContentVersion", "documentation":"

The version of the hub content.

" @@ -18898,6 +19793,10 @@ "shape":"HubContentDescription", "documentation":"

A description of the hub content.

" }, + "SupportStatus":{ + "shape":"HubContentSupportStatus", + "documentation":"

The support status of the hub content.

" + }, "HubContentSearchKeywords":{ "shape":"HubContentSearchKeywordList", "documentation":"

The searchable keywords for the hub content.

" @@ -18909,6 +19808,10 @@ "CreationTime":{ "shape":"Timestamp", "documentation":"

The date and time that the hub content was created.

" + }, + "OriginalCreationTime":{ + "shape":"Timestamp", + "documentation":"

The date and time when the hub content was originally created, before any updates or revisions.

" } }, "documentation":"

Information about hub content.

" @@ -18919,8 +19822,7 @@ }, "HubContentMarkdown":{ "type":"string", - "max":65535, - "pattern":".*" + "max":65535 }, "HubContentName":{ "type":"string", @@ -18950,11 +19852,19 @@ "DeleteFailed" ] }, + "HubContentSupportStatus":{ + "type":"string", + "enum":[ + "Supported", + "Deprecated" + ] + }, "HubContentType":{ "type":"string", "enum":[ "Model", - "Notebook" + "Notebook", + "ModelReference" ] }, "HubContentVersion":{ @@ -19027,6 +19937,10 @@ "max":63, "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, + "HubNameOrArn":{ + "type":"string", + "pattern":"^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + }, "HubS3StorageConfig":{ "type":"structure", "members":{ @@ -19914,6 +20828,20 @@ }, "documentation":"

The IAM Identity details associated with the user. These details are associated with model package groups, model packages and project entities only.

" }, + "IamPolicyConstraints":{ + "type":"structure", + "members":{ + "SourceIp":{ + "shape":"EnabledOrDisabled", + "documentation":"

When SourceIp is Enabled the worker's IP address when a task is rendered in the worker portal is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. This IP address is checked by Amazon S3 and must match in order for the Amazon S3 resource to be rendered in the worker portal.

" + }, + "VpcSourceIp":{ + "shape":"EnabledOrDisabled", + "documentation":"

When VpcSourceIp is Enabled the worker's IP address when a task is rendered in private worker portal inside the VPC is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. To render the task successfully Amazon S3 checks that the presigned URL is being accessed over an Amazon S3 VPC Endpoint, and that the worker's IP address matches the IP address in the IAM policy. To learn more about configuring private worker portal, see Use Amazon VPC mode from a private worker portal.

" + } + }, + "documentation":"

Use this parameter to specify a supported global condition key that is added to the IAM policy.

" + }, "IdempotencyToken":{ "type":"string", "max":128, @@ -20216,7 +21144,7 @@ "documentation":"

The version of the hub content schema to import.

" }, "HubName":{ - "shape":"HubName", + "shape":"HubNameOrArn", "documentation":"

The name of the hub to import content into.

" }, "HubContentDisplayName":{ @@ -20640,6 +21568,17 @@ "type":"string", "enum":["ShadowMode"] }, + "InferenceHubAccessConfig":{ + "type":"structure", + "required":["HubContentArn"], + "members":{ + "HubContentArn":{ + "shape":"HubContentArn", + "documentation":"

The ARN of the hub content for which deployment access is allowed.

" + } + }, + "documentation":"

Configuration information specifying which hub contents have accessible deployment options.

" + }, "InferenceImage":{ "type":"string", "max":256 @@ -20665,7 +21604,6 @@ "InferenceRecommendation":{ "type":"structure", "required":[ - "Metrics", "EndpointConfiguration", "ModelConfiguration" ], @@ -21193,6 +22131,13 @@ "type":"string", "pattern":"^arn:aws[a-z\\-]*:iam::\\d{12}:rolealias/?[a-zA-Z_0-9+=,.@\\-_/]+$" }, + "IsTrackingServerActive":{ + "type":"string", + "enum":[ + "Active", + "Inactive" + ] + }, "ItemIdentifierAttributeName":{ "type":"string", "max":256, @@ -21268,6 +22213,10 @@ "CodeRepositories":{ "shape":"CodeRepositories", "documentation":"

A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.

" + }, + "EmrSettings":{ + "shape":"EmrSettings", + "documentation":"

The configuration parameters that specify the IAM roles assumed by the execution role of SageMaker (assumable roles) and the cluster instances or job execution environments (execution roles or runtime roles) to manage and access resources required for running Amazon EMR clusters or Amazon EMR Serverless applications.

" } }, "documentation":"

The settings for the JupyterLab application.

" @@ -22012,7 +22961,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

" + "documentation":"

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" }, "SortOrder":{ "shape":"SortOrder", @@ -22712,7 +23661,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

" + "documentation":"

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" } } }, @@ -23124,7 +24073,7 @@ ], "members":{ "HubName":{ - "shape":"HubName", + "shape":"HubNameOrArn", "documentation":"

The name of the hub to list the content versions of.

" }, "HubContentType":{ @@ -23191,7 +24140,7 @@ ], "members":{ "HubName":{ - "shape":"HubName", + "shape":"HubNameOrArn", "documentation":"

The name of the hub to list the contents of.

" }, "HubContentType":{ @@ -23917,6 +24866,56 @@ "type":"integer", "max":100 }, + "ListMlflowTrackingServersRequest":{ + "type":"structure", + "members":{ + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

Use the CreatedAfter filter to only list tracking servers created after a specific date and time. Listed tracking servers are shown with a date and time such as \"2024-03-16T01:46:56+00:00\". The CreatedAfter parameter takes in a Unix timestamp. To convert a date and time into a Unix timestamp, see EpochConverter.

" + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

Use the CreatedBefore filter to only list tracking servers created before a specific date and time. Listed tracking servers are shown with a date and time such as \"2024-03-16T01:46:56+00:00\". The CreatedBefore parameter takes in a Unix timestamp. To convert a date and time into a Unix timestamp, see EpochConverter.

" + }, + "TrackingServerStatus":{ + "shape":"TrackingServerStatus", + "documentation":"

Filter for tracking servers with a specified creation status.

" + }, + "MlflowVersion":{ + "shape":"MlflowVersion", + "documentation":"

Filter for tracking servers using the specified MLflow version.

" + }, + "SortBy":{ + "shape":"SortTrackingServerBy", + "documentation":"

Filter for trackings servers sorting by name, creation time, or creation status.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

Change the order of the listed tracking servers. By default, tracking servers are listed in Descending order by creation time. To change the list order, you can specify SortOrder to be Ascending.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of tracking servers to list.

" + } + } + }, + "ListMlflowTrackingServersResponse":{ + "type":"structure", + "members":{ + "TrackingServerSummaries":{ + "shape":"TrackingServerSummaryList", + "documentation":"

A list of tracking servers according to chosen filters.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + } + } + }, "ListModelBiasJobDefinitionsRequest":{ "type":"structure", "members":{ @@ -24243,6 +25242,10 @@ "SortOrder":{ "shape":"SortOrder", "documentation":"

The sort order for results. The default is Ascending.

" + }, + "CrossAccountFilterOption":{ + "shape":"CrossAccountFilterOption", + "documentation":"

A filter that returns either model groups shared with you or model groups in your own account. When the value is CrossAccount, the results show the resources made discoverable to you from other accounts. When the value is SameAccount or null, the results show resources from your account. The default is SameAccount.

" } } }, @@ -24776,6 +25779,78 @@ } } }, + "ListOptimizationJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token that you use to get the next set of results following a truncated response. If the response to the previous request was truncated, that response provides the value for this token.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of optimization jobs to return in the response. The default is 50.

", + "box":true + }, + "CreationTimeAfter":{ + "shape":"CreationTime", + "documentation":"

Filters the results to only those optimization jobs that were created after the specified time.

" + }, + "CreationTimeBefore":{ + "shape":"CreationTime", + "documentation":"

Filters the results to only those optimization jobs that were created before the specified time.

" + }, + "LastModifiedTimeAfter":{ + "shape":"LastModifiedTime", + "documentation":"

Filters the results to only those optimization jobs that were updated after the specified time.

" + }, + "LastModifiedTimeBefore":{ + "shape":"LastModifiedTime", + "documentation":"

Filters the results to only those optimization jobs that were updated before the specified time.

" + }, + "OptimizationContains":{ + "shape":"NameContains", + "documentation":"

Filters the results to only those optimization jobs that apply the specified optimization techniques. You can specify either Quantization or Compilation.

" + }, + "NameContains":{ + "shape":"NameContains", + "documentation":"

Filters the results to only those optimization jobs with a name that contains the specified string.

" + }, + "StatusEquals":{ + "shape":"OptimizationJobStatus", + "documentation":"

Filters the results to only those optimization jobs with the specified status.

" + }, + "SortBy":{ + "shape":"ListOptimizationJobsSortBy", + "documentation":"

The field by which to sort the optimization jobs in the response. The default is CreationTime

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for results. The default is Ascending

" + } + } + }, + "ListOptimizationJobsResponse":{ + "type":"structure", + "required":["OptimizationJobSummaries"], + "members":{ + "OptimizationJobSummaries":{ + "shape":"OptimizationJobSummaries", + "documentation":"

A list of optimization jobs and their properties that matches any of the filters you specified in the request.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use in a subsequent request to get the next set of results following a truncated response.

" + } + } + }, + "ListOptimizationJobsSortBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime", + "Status" + ] + }, "ListPipelineExecutionStepsRequest":{ "type":"structure", "members":{ @@ -25096,7 +26171,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

" + "documentation":"

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" }, "SortOrder":{ "shape":"SortOrder", @@ -25583,7 +26658,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

" + "documentation":"

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" }, "SortOrder":{ "shape":"SortOrder", @@ -26014,6 +27089,30 @@ "max":1, "pattern":"1|2" }, + "MlTools":{ + "type":"string", + "enum":[ + "DataWrangler", + "FeatureStore", + "EmrClusters", + "AutoMl", + "Experiments", + "Training", + "ModelEvaluation", + "Pipelines", + "Models", + "JumpStart", + "InferenceRecommender", + "Endpoints", + "Projects", + "InferenceOptimization" + ] + }, + "MlflowVersion":{ + "type":"string", + "max":16, + "pattern":"^[0-9]*.[0-9]*.[0-9]*" + }, "Model":{ "type":"structure", "members":{ @@ -26458,6 +27557,20 @@ }, "documentation":"

Configures the timeout and maximum number of retries for processing a transform job invocation.

" }, + "ModelCompilationConfig":{ + "type":"structure", + "members":{ + "Image":{ + "shape":"OptimizationContainerImage", + "documentation":"

The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization.

" + }, + "OverrideEnvironment":{ + "shape":"OptimizationJobEnvironmentVariables", + "documentation":"

Environment variables that override the default ones in the model container.

" + } + }, + "documentation":"

Settings for the model compilation technique that's applied by a model optimization job.

" + }, "ModelCompressionType":{ "type":"string", "enum":[ @@ -27020,6 +28133,8 @@ "shape":"ModelPackageSourceUri", "documentation":"

The URI of the source for the model package.

" }, + "SecurityConfig":{"shape":"ModelPackageSecurityConfig"}, + "ModelCard":{"shape":"ModelPackageModelCard"}, "Tags":{ "shape":"TagList", "documentation":"

A list of the tags associated with the model package. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

" @@ -27209,6 +28324,31 @@ "type":"list", "member":{"shape":"ModelPackageGroupSummary"} }, + "ModelPackageModelCard":{ + "type":"structure", + "members":{ + "ModelCardContent":{ + "shape":"ModelCardContent", + "documentation":"

The content of the model card. The content must follow the schema described in Model Package Model Card Schema.

" + }, + "ModelCardStatus":{ + "shape":"ModelCardStatus", + "documentation":"

The approval status of the model card within your organization. Different organizations might have different criteria for model card review and approval.

  • Draft: The model card is a work in progress.

  • PendingReview: The model card is pending review.

  • Approved: The model card is approved.

  • Archived: The model card is archived. No more updates can be made to the model card content. If you try to update the model card content, you will receive the message Model Card is in Archived state.

" + } + }, + "documentation":"

The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model package model card schema, see Model package model card schema. For more information about the model card associated with the model package, see View the Details of a Model Version.

" + }, + "ModelPackageSecurityConfig":{ + "type":"structure", + "required":["KmsKeyId"], + "members":{ + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The KMS Key ID (KMSKeyId) used for encryption of model package information.

" + } + }, + "documentation":"

An optional Key Management Service key to encrypt, decrypt, and re-encrypt model package information for regulated workloads with highly sensitive data.

" + }, "ModelPackageSortBy":{ "type":"string", "enum":[ @@ -27455,6 +28595,20 @@ }, "documentation":"

The input for the model quality monitoring job. Currently endpoints are supported for input for model quality monitoring jobs.

" }, + "ModelQuantizationConfig":{ + "type":"structure", + "members":{ + "Image":{ + "shape":"OptimizationContainerImage", + "documentation":"

The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization.

" + }, + "OverrideEnvironment":{ + "shape":"OptimizationJobEnvironmentVariables", + "documentation":"

Environment variables that override the default ones in the model container.

" + } + }, + "documentation":"

Settings for the model quantization technique that's applied by a model optimization job.

" + }, "ModelRegisterSettings":{ "type":"structure", "members":{ @@ -28802,6 +29956,14 @@ "JwksUri":{ "shape":"OidcEndpoint", "documentation":"

The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce.

" + }, + "Scope":{ + "shape":"Scope", + "documentation":"

An array of string identifiers used to refer to the specific pieces of user data or claims that the client application wants to access.

" + }, + "AuthenticationRequestExtraParams":{ + "shape":"AuthenticationRequestExtraParams", + "documentation":"

A string to string map of identifiers specific to the custom identity provider (IdP) being used.

" } }, "documentation":"

Use this parameter to configure your OIDC Identity Provider (IdP).

" @@ -28836,6 +29998,14 @@ "JwksUri":{ "shape":"OidcEndpoint", "documentation":"

The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce.

" + }, + "Scope":{ + "shape":"Scope", + "documentation":"

An array of string identifiers used to refer to the specific pieces of user data or claims that the client application wants to access.

" + }, + "AuthenticationRequestExtraParams":{ + "shape":"AuthenticationRequestExtraParams", + "documentation":"

A string to string map of identifiers specific to the custom identity provider (IdP) being used.

" } }, "documentation":"

Your OIDC IdP workforce configuration.

" @@ -28913,6 +30083,244 @@ "In" ] }, + "OptimizationConfig":{ + "type":"structure", + "members":{ + "ModelQuantizationConfig":{ + "shape":"ModelQuantizationConfig", + "documentation":"

Settings for the model quantization technique that's applied by a model optimization job.

" + }, + "ModelCompilationConfig":{ + "shape":"ModelCompilationConfig", + "documentation":"

Settings for the model compilation technique that's applied by a model optimization job.

" + } + }, + "documentation":"

Settings for an optimization technique that you apply with a model optimization job.

", + "union":true + }, + "OptimizationConfigs":{ + "type":"list", + "member":{"shape":"OptimizationConfig"}, + "max":10 + }, + "OptimizationContainerImage":{ + "type":"string", + "max":255, + "pattern":"[\\S]+" + }, + "OptimizationJobArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:optimization-job/.*" + }, + "OptimizationJobDeploymentInstanceType":{ + "type":"string", + "enum":[ + "ml.p4d.24xlarge", + "ml.p4de.24xlarge", + "ml.p5.48xlarge", + "ml.g5.xlarge", + "ml.g5.2xlarge", + "ml.g5.4xlarge", + "ml.g5.8xlarge", + "ml.g5.12xlarge", + "ml.g5.16xlarge", + "ml.g5.24xlarge", + "ml.g5.48xlarge", + "ml.g6.xlarge", + "ml.g6.2xlarge", + "ml.g6.4xlarge", + "ml.g6.8xlarge", + "ml.g6.12xlarge", + "ml.g6.16xlarge", + "ml.g6.24xlarge", + "ml.g6.48xlarge", + "ml.inf2.xlarge", + "ml.inf2.8xlarge", + "ml.inf2.24xlarge", + "ml.inf2.48xlarge", + "ml.trn1.2xlarge", + "ml.trn1.32xlarge", + "ml.trn1n.32xlarge" + ] + }, + "OptimizationJobEnvironmentVariables":{ + "type":"map", + "key":{"shape":"NonEmptyString256"}, + "value":{"shape":"String256"}, + "max":25 + }, + "OptimizationJobModelSource":{ + "type":"structure", + "members":{ + "S3":{ + "shape":"OptimizationJobModelSourceS3", + "documentation":"

The Amazon S3 location of a source model to optimize with an optimization job.

" + } + }, + "documentation":"

The location of the source model to optimize with an optimization job.

" + }, + "OptimizationJobModelSourceS3":{ + "type":"structure", + "members":{ + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

An Amazon S3 URI that locates a source model to optimize with an optimization job.

" + }, + "ModelAccessConfig":{ + "shape":"OptimizationModelAccessConfig", + "documentation":"

The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA).

" + } + }, + "documentation":"

The Amazon S3 location of a source model to optimize with an optimization job.

" + }, + "OptimizationJobOutputConfig":{ + "type":"structure", + "required":["S3OutputLocation"], + "members":{ + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The Amazon Resource Name (ARN) of a key in Amazon Web Services KMS. SageMaker uses they key to encrypt the artifacts of the optimized model when SageMaker uploads the model to Amazon S3.

" + }, + "S3OutputLocation":{ + "shape":"S3Uri", + "documentation":"

The Amazon S3 URI for where to store the optimized model that you create with an optimization job.

" + } + }, + "documentation":"

Details for where to store the optimized model that you create with the optimization job.

" + }, + "OptimizationJobStatus":{ + "type":"string", + "enum":[ + "INPROGRESS", + "COMPLETED", + "FAILED", + "STARTING", + "STOPPING", + "STOPPED" + ] + }, + "OptimizationJobSummaries":{ + "type":"list", + "member":{"shape":"OptimizationJobSummary"} + }, + "OptimizationJobSummary":{ + "type":"structure", + "required":[ + "OptimizationJobName", + "OptimizationJobArn", + "CreationTime", + "OptimizationJobStatus", + "DeploymentInstanceType", + "OptimizationTypes" + ], + "members":{ + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

The name that you assigned to the optimization job.

" + }, + "OptimizationJobArn":{ + "shape":"OptimizationJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the optimization job.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

The time when you created the optimization job.

" + }, + "OptimizationJobStatus":{ + "shape":"OptimizationJobStatus", + "documentation":"

The current status of the optimization job.

" + }, + "OptimizationStartTime":{ + "shape":"Timestamp", + "documentation":"

The time when the optimization job started.

" + }, + "OptimizationEndTime":{ + "shape":"Timestamp", + "documentation":"

The time when the optimization job finished processing.

" + }, + "LastModifiedTime":{ + "shape":"LastModifiedTime", + "documentation":"

The time when the optimization job was last updated.

" + }, + "DeploymentInstanceType":{ + "shape":"OptimizationJobDeploymentInstanceType", + "documentation":"

The type of instance that hosts the optimized model that you create with the optimization job.

" + }, + "OptimizationTypes":{ + "shape":"OptimizationTypes", + "documentation":"

The optimization techniques that are applied by the optimization job.

" + } + }, + "documentation":"

Summarizes an optimization job by providing some of its key properties.

" + }, + "OptimizationModelAcceptEula":{"type":"boolean"}, + "OptimizationModelAccessConfig":{ + "type":"structure", + "required":["AcceptEula"], + "members":{ + "AcceptEula":{ + "shape":"OptimizationModelAcceptEula", + "documentation":"

Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as True in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

" + } + }, + "documentation":"

The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA).

" + }, + "OptimizationOutput":{ + "type":"structure", + "members":{ + "RecommendedInferenceImage":{ + "shape":"OptimizationContainerImage", + "documentation":"

The image that SageMaker recommends that you use to host the optimized model that you created with an optimization job.

" + } + }, + "documentation":"

Output values produced by an optimization job.

" + }, + "OptimizationType":{"type":"string"}, + "OptimizationTypes":{ + "type":"list", + "member":{"shape":"OptimizationType"} + }, + "OptimizationVpcConfig":{ + "type":"structure", + "required":[ + "SecurityGroupIds", + "Subnets" + ], + "members":{ + "SecurityGroupIds":{ + "shape":"OptimizationVpcSecurityGroupIds", + "documentation":"

The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security groups for the VPC that is specified in the Subnets field.

" + }, + "Subnets":{ + "shape":"OptimizationVpcSubnets", + "documentation":"

The ID of the subnets in the VPC to which you want to connect your optimized model.

" + } + }, + "documentation":"

A VPC in Amazon VPC that's accessible to an optimized that you create with an optimization job. You can control access to and from your resources by configuring a VPC. For more information, see Give SageMaker Access to Resources in your Amazon VPC.

" + }, + "OptimizationVpcSecurityGroupId":{ + "type":"string", + "max":32, + "pattern":"[-0-9a-zA-Z]+" + }, + "OptimizationVpcSecurityGroupIds":{ + "type":"list", + "member":{"shape":"OptimizationVpcSecurityGroupId"}, + "max":5, + "min":1 + }, + "OptimizationVpcSubnetId":{ + "type":"string", + "max":32, + "pattern":"[-0-9a-zA-Z]+" + }, + "OptimizationVpcSubnets":{ + "type":"list", + "member":{"shape":"OptimizationVpcSubnetId"}, + "max":16, + "min":1 + }, "OptionalDouble":{"type":"double"}, "OptionalInteger":{"type":"integer"}, "OptionalVolumeSizeInGB":{ @@ -29572,6 +30980,14 @@ "AutoMLJob":{ "shape":"AutoMLJobStepMetadata", "documentation":"

The Amazon Resource Name (ARN) of the AutoML job that was run by this step.

" + }, + "Endpoint":{ + "shape":"EndpointStepMetadata", + "documentation":"

The endpoint that was invoked during this step execution.

" + }, + "EndpointConfig":{ + "shape":"EndpointConfigStepMetadata", + "documentation":"

The endpoint configuration used to create an endpoint during this step execution.

" } }, "documentation":"

Metadata for a step execution.

" @@ -29860,7 +31276,23 @@ "ml.g4dn.4xlarge", "ml.g4dn.8xlarge", "ml.g4dn.12xlarge", - "ml.g4dn.16xlarge" + "ml.g4dn.16xlarge", + "ml.g5.xlarge", + "ml.g5.2xlarge", + "ml.g5.4xlarge", + "ml.g5.8xlarge", + "ml.g5.16xlarge", + "ml.g5.12xlarge", + "ml.g5.24xlarge", + "ml.g5.48xlarge", + "ml.r5d.large", + "ml.r5d.xlarge", + "ml.r5d.2xlarge", + "ml.r5d.4xlarge", + "ml.r5d.8xlarge", + "ml.r5d.12xlarge", + "ml.r5d.16xlarge", + "ml.r5d.24xlarge" ] }, "ProcessingJob":{ @@ -30149,7 +31581,6 @@ "type":"structure", "required":[ "S3Uri", - "LocalPath", "S3UploadMode" ], "members":{ @@ -30266,6 +31697,10 @@ "RoutingConfig":{ "shape":"ProductionVariantRoutingConfig", "documentation":"

Settings that control how the endpoint routes incoming traffic to the instances that the endpoint hosts.

" + }, + "InferenceAmiVersion":{ + "shape":"ProductionVariantInferenceAmiVersion", + "documentation":"

Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads.

By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions.

The AMI version names, and their configurations, are the following:

al2-ami-sagemaker-inference-gpu-2
  • Accelerator: GPU

  • NVIDIA driver version: 535.54.03

  • CUDA driver version: 12.2

  • Supported instance types: ml.g4dn.*, ml.g5.*, ml.g6.*, ml.p3.*, ml.p4d.*, ml.p4de.*, ml.p5.*

" } }, "documentation":"

Identifies a model that you want to host and the resources chosen to deploy for hosting it. If you are deploying multiple models, tell SageMaker how to distribute traffic among the models by specifying variant weights. For more information on production variants, check Production variants.

" @@ -30301,6 +31736,10 @@ }, "documentation":"

Specifies configuration for a core dump from the model container when the process crashes.

" }, + "ProductionVariantInferenceAmiVersion":{ + "type":"string", + "enum":["al2-ami-sagemaker-inference-gpu-2"] + }, "ProductionVariantInstanceType":{ "type":"string", "enum":[ @@ -31029,6 +32468,10 @@ } } }, + "QProfileArn":{ + "type":"string", + "pattern":"^arn:[-.a-z0-9]{1,63}:codewhisperer:([-.a-z0-9]{0,63}:){2}([a-zA-Z0-9-_:/]){1,1023}$" + }, "QualityCheckStepMetadata":{ "type":"structure", "members":{ @@ -31632,12 +33075,6 @@ }, "RecommendationMetrics":{ "type":"structure", - "required":[ - "CostPerHour", - "CostPerInference", - "MaxInvocations", - "ModelLatency" - ], "members":{ "CostPerHour":{ "shape":"Float", @@ -31770,6 +33207,12 @@ "min":1, "pattern":".*" }, + "ReferenceMinVersion":{ + "type":"string", + "max":14, + "min":5, + "pattern":"^\\d{1,4}.\\d{1,4}.\\d{1,4}$" + }, "RegisterDevicesRequest":{ "type":"structure", "required":[ @@ -32364,6 +33807,10 @@ "ModelAccessConfig":{ "shape":"ModelAccessConfig", "documentation":"

Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the ModelAccessConfig. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

" + }, + "HubAccessConfig":{ + "shape":"InferenceHubAccessConfig", + "documentation":"

Configuration information for hub access.

" } }, "documentation":"

Specifies the S3 location of ML model data to deploy.

" @@ -32385,6 +33832,16 @@ "max":1024, "pattern":"^(https|s3)://([^/]+)/?(.*)$" }, + "S3Presign":{ + "type":"structure", + "members":{ + "IamPolicyConstraints":{ + "shape":"IamPolicyConstraints", + "documentation":"

Use this parameter to specify the allowed request source. Possible sources are either SourceIp or VpcSourceIp.

" + } + }, + "documentation":"

This object defines the access restrictions to Amazon S3 resources that are included in custom worker task templates using the Liquid filter, grant_read_access.

To learn more about how custom templates are created, see Create custom worker task templates.

" + }, "S3StorageConfig":{ "type":"structure", "required":["S3Uri"], @@ -32419,6 +33876,11 @@ "type":"list", "member":{"shape":"SageMakerImageVersionAlias"} }, + "SageMakerPublicHubContentArn":{ + "type":"string", + "max":255, + "pattern":"^arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:aws:hub-content\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}\\/Model\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}$" + }, "SagemakerServicecatalogStatus":{ "type":"string", "enum":[ @@ -32513,6 +33975,11 @@ "Stopped" ] }, + "Scope":{ + "type":"string", + "max":1024, + "pattern":"^[!#-\\[\\]-~]+( [!#-\\[\\]-~]+)*$" + }, "SearchExpression":{ "type":"structure", "members":{ @@ -33080,6 +34547,14 @@ "CreationTime" ] }, + "SortTrackingServerBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime", + "Status" + ] + }, "SortTrialComponentsBy":{ "type":"string", "enum":[ @@ -33139,7 +34614,7 @@ "documentation":"

A list of one to ten Classless Inter-Domain Routing (CIDR) values.

Maximum: Ten CIDR values

The following Length Constraints apply to individual CIDR values in the CIDR value list.

" } }, - "documentation":"

A list of IP address ranges (CIDRs). Used to create an allow list of IP addresses for a private workforce. Workers will only be able to login to their worker portal from an IP address within this range. By default, a workforce isn't restricted to specific IP addresses.

" + "documentation":"

A list of IP address ranges (CIDRs). Used to create an allow list of IP addresses for a private workforce. Workers will only be able to log in to their worker portal from an IP address within this range. By default, a workforce isn't restricted to specific IP addresses.

" }, "SourceType":{ "type":"string", @@ -33403,6 +34878,25 @@ } } }, + "StartMlflowTrackingServerRequest":{ + "type":"structure", + "required":["TrackingServerName"], + "members":{ + "TrackingServerName":{ + "shape":"TrackingServerName", + "documentation":"

The name of the tracking server to start.

" + } + } + }, + "StartMlflowTrackingServerResponse":{ + "type":"structure", + "members":{ + "TrackingServerArn":{ + "shape":"TrackingServerArn", + "documentation":"

The ARN of the started tracking server.

" + } + } + }, "StartMonitoringScheduleRequest":{ "type":"structure", "required":["MonitoringScheduleName"], @@ -33630,6 +35124,25 @@ } } }, + "StopMlflowTrackingServerRequest":{ + "type":"structure", + "required":["TrackingServerName"], + "members":{ + "TrackingServerName":{ + "shape":"TrackingServerName", + "documentation":"

The name of the tracking server to stop.

" + } + } + }, + "StopMlflowTrackingServerResponse":{ + "type":"structure", + "members":{ + "TrackingServerArn":{ + "shape":"TrackingServerArn", + "documentation":"

The ARN of the stopped tracking server.

" + } + } + }, "StopMonitoringScheduleRequest":{ "type":"structure", "required":["MonitoringScheduleName"], @@ -33650,6 +35163,16 @@ } } }, + "StopOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{ + "shape":"EntityName", + "documentation":"

The name that you assigned to the optimization job.

" + } + } + }, "StopPipelineExecutionRequest":{ "type":"structure", "required":[ @@ -33723,7 +35246,7 @@ "documentation":"

The maximum length of time, in seconds, that a training or compilation job can be pending before it is stopped.

" } }, - "documentation":"

Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap model training costs.

To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

" + "documentation":"

Specifies a limit to how long a job can run. When the job reaches the time limit, SageMaker ends the job. Use this API to cap costs.

To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

" }, "StorageType":{ "type":"string", @@ -33842,6 +35365,20 @@ "DISABLED" ] }, + "StudioWebPortalSettings":{ + "type":"structure", + "members":{ + "HiddenMlTools":{ + "shape":"HiddenMlToolsList", + "documentation":"

The machine learning tools that are hidden from the Studio left navigation pane.

" + }, + "HiddenAppTypes":{ + "shape":"HiddenAppTypesList", + "documentation":"

The Applications supported in Studio that are hidden from the Studio left navigation pane.

" + } + }, + "documentation":"

Studio settings. If these settings are applied on a user level, they take priority over the settings applied on a domain level.

" + }, "SubnetId":{ "type":"string", "max":32, @@ -33878,7 +35415,7 @@ "documentation":"

Marketplace product listing ID.

" } }, - "documentation":"

Describes a work team of a vendor that does the a labelling job.

" + "documentation":"

Describes a work team of a vendor that does the labelling job.

" }, "SubscribedWorkteams":{ "type":"list", @@ -34403,7 +35940,8 @@ "HolidayConfig":{ "shape":"HolidayConfig", "documentation":"

The collection of holiday featurization attributes used to incorporate national holiday information into your forecasting model.

" - } + }, + "CandidateGenerationConfig":{"shape":"CandidateGenerationConfig"} }, "documentation":"

The collection of settings used by an AutoML job V2 for the time-series forecasting problem type.

" }, @@ -34441,6 +35979,91 @@ "max":256, "min":1 }, + "TrackingServerArn":{ + "type":"string", + "max":2048, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:mlflow-tracking-server/.*" + }, + "TrackingServerName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}" + }, + "TrackingServerSize":{ + "type":"string", + "enum":[ + "Small", + "Medium", + "Large" + ] + }, + "TrackingServerStatus":{ + "type":"string", + "enum":[ + "Creating", + "Created", + "CreateFailed", + "Updating", + "Updated", + "UpdateFailed", + "Deleting", + "DeleteFailed", + "Stopping", + "Stopped", + "StopFailed", + "Starting", + "Started", + "StartFailed", + "MaintenanceInProgress", + "MaintenanceComplete", + "MaintenanceFailed" + ] + }, + "TrackingServerSummary":{ + "type":"structure", + "members":{ + "TrackingServerArn":{ + "shape":"TrackingServerArn", + "documentation":"

The ARN of a listed tracking server.

" + }, + "TrackingServerName":{ + "shape":"TrackingServerName", + "documentation":"

The name of a listed tracking server.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of a listed tracking server.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The last modified time of a listed tracking server.

" + }, + "TrackingServerStatus":{ + "shape":"TrackingServerStatus", + "documentation":"

The creation status of a listed tracking server.

" + }, + "IsActive":{ + "shape":"IsTrackingServerActive", + "documentation":"

The activity status of a listed tracking server.

" + }, + "MlflowVersion":{ + "shape":"MlflowVersion", + "documentation":"

The MLflow version used for a listed tracking server.

" + } + }, + "documentation":"

The summary of the tracking server to list.

" + }, + "TrackingServerSummaryList":{ + "type":"list", + "member":{"shape":"TrackingServerSummary"}, + "max":100, + "min":0 + }, + "TrackingServerUrl":{ + "type":"string", + "max":2048 + }, "TrafficDurationInSeconds":{ "type":"integer", "min":1 @@ -34641,7 +36264,27 @@ "ml.c6i.12xlarge", "ml.c6i.16xlarge", "ml.c6i.24xlarge", - "ml.c6i.32xlarge" + "ml.c6i.32xlarge", + "ml.r5d.large", + "ml.r5d.xlarge", + "ml.r5d.2xlarge", + "ml.r5d.4xlarge", + "ml.r5d.8xlarge", + "ml.r5d.12xlarge", + "ml.r5d.16xlarge", + "ml.r5d.24xlarge", + "ml.t3.medium", + "ml.t3.large", + "ml.t3.xlarge", + "ml.t3.2xlarge", + "ml.r5.large", + "ml.r5.xlarge", + "ml.r5.2xlarge", + "ml.r5.4xlarge", + "ml.r5.8xlarge", + "ml.r5.12xlarge", + "ml.r5.16xlarge", + "ml.r5.24xlarge" ] }, "TrainingInstanceTypes":{ @@ -36444,7 +38087,7 @@ "required":["HubName"], "members":{ "HubName":{ - "shape":"HubName", + "shape":"HubNameOrArn", "documentation":"

The name of the hub to update.

" }, "HubDescription":{ @@ -36664,6 +38307,41 @@ } } }, + "UpdateMlflowTrackingServerRequest":{ + "type":"structure", + "required":["TrackingServerName"], + "members":{ + "TrackingServerName":{ + "shape":"TrackingServerName", + "documentation":"

The name of the MLflow Tracking Server to update.

" + }, + "ArtifactStoreUri":{ + "shape":"S3Uri", + "documentation":"

The new S3 URI for the general purpose bucket to use as the artifact store for the MLflow Tracking Server.

" + }, + "TrackingServerSize":{ + "shape":"TrackingServerSize", + "documentation":"

The new size for the MLflow Tracking Server.

" + }, + "AutomaticModelRegistration":{ + "shape":"Boolean", + "documentation":"

Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. To enable automatic model registration, set this value to True. To disable automatic model registration, set this value to False. If not specified, AutomaticModelRegistration defaults to False

" + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

The new weekly maintenance window start day and time to update. The maintenance window day and time should be in Coordinated Universal Time (UTC) 24-hour standard time. For example: TUE:03:30.

" + } + } + }, + "UpdateMlflowTrackingServerResponse":{ + "type":"structure", + "members":{ + "TrackingServerArn":{ + "shape":"TrackingServerArn", + "documentation":"

The ARN of the updated MLflow Tracking Server.

" + } + } + }, "UpdateModelCardRequest":{ "type":"structure", "required":["ModelCardName"], @@ -36727,6 +38405,10 @@ "SourceUri":{ "shape":"ModelPackageSourceUri", "documentation":"

The URI of the source for the model package.

" + }, + "ModelCard":{ + "shape":"ModelPackageModelCard", + "documentation":"

The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model package model card schema, see Model package model card schema. For more information about the model card associated with the model package, see View the Details of a Model Version.

" } } }, @@ -37237,6 +38919,10 @@ "NotificationConfiguration":{ "shape":"NotificationConfiguration", "documentation":"

Configures SNS topic notifications for available or expiring work items

" + }, + "WorkerAccessConfiguration":{ + "shape":"WorkerAccessConfiguration", + "documentation":"

Use this optional parameter to constrain access to an Amazon S3 resource based on the IP address using supported IAM global condition keys. The Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL.

" } } }, @@ -37402,6 +39088,10 @@ "CustomFileSystemConfigs":{ "shape":"CustomFileSystemConfigs", "documentation":"

The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.

" + }, + "StudioWebPortalSettings":{ + "shape":"StudioWebPortalSettings", + "documentation":"

Studio settings. If these settings are applied on a user level, they take priority over the settings applied on a domain level.

" } }, "documentation":"

A collection of settings that apply to users in a domain. These settings are specified when the CreateUserProfile API is called, and as DefaultUserSettings when the CreateDomain API is called.

SecurityGroups is aggregated when specified in both calls. For all other settings in UserSettings, the values specified in CreateUserProfile take precedence over those specified in CreateDomain.

" @@ -37589,7 +39279,7 @@ "VpcOnlyTrustedAccounts":{ "type":"list", "member":{"shape":"AccountId"}, - "max":10 + "max":20 }, "VpcSecurityGroupIds":{ "type":"list", @@ -37630,6 +39320,21 @@ }, "documentation":"

Status and billing information about the warm pool.

" }, + "WeeklyMaintenanceWindowStart":{ + "type":"string", + "max":9, + "pattern":"(Mon|Tue|Wed|Thu|Fri|Sat|Sun):([01]\\d|2[0-3]):([0-5]\\d)" + }, + "WorkerAccessConfiguration":{ + "type":"structure", + "members":{ + "S3Presign":{ + "shape":"S3Presign", + "documentation":"

Defines any Amazon S3 resource constraints.

" + } + }, + "documentation":"

Use this optional parameter to constrain access to an Amazon S3 resource based on the IP address using supported IAM global condition keys. The Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL.

" + }, "Workforce":{ "type":"structure", "required":[ @@ -37742,7 +39447,7 @@ }, "SecurityGroupIds":{ "shape":"WorkforceSecurityGroupIds", - "documentation":"

The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.

" + "documentation":"

The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.

" }, "Subnets":{ "shape":"WorkforceSubnets", @@ -37855,6 +39560,10 @@ "NotificationConfiguration":{ "shape":"NotificationConfiguration", "documentation":"

Configures SNS notifications of available or expiring work items for work teams.

" + }, + "WorkerAccessConfiguration":{ + "shape":"WorkerAccessConfiguration", + "documentation":"

Describes any access constraints that have been defined for Amazon S3 resources.

" } }, "documentation":"

Provides details about a labeling work team.

" diff --git a/botocore/data/secretsmanager/2017-10-17/service-2.json b/botocore/data/secretsmanager/2017-10-17/service-2.json index 39a795e726..2e5e318248 100644 --- a/botocore/data/secretsmanager/2017-10-17/service-2.json +++ b/botocore/data/secretsmanager/2017-10-17/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"secretsmanager", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS Secrets Manager", "serviceId":"Secrets Manager", "signatureVersion":"v4", "signingName":"secretsmanager", "targetPrefix":"secretsmanager", - "uid":"secretsmanager-2017-10-17" + "uid":"secretsmanager-2017-10-17", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchGetSecretValue":{ @@ -67,7 +69,7 @@ {"shape":"PreconditionNotMetException"}, {"shape":"DecryptionFailure"} ], - "documentation":"

Creates a new secret. A secret can be a password, a set of credentials such as a user name and password, an OAuth token, or other secret information that you store in an encrypted form in Secrets Manager. The secret also includes the connection information to access a database or other service, which Secrets Manager doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret.

For secrets that use managed rotation, you need to create the secret through the managing service. For more information, see Secrets Manager secrets managed by other Amazon Web Services services.

For information about creating a secret in the console, see Create a secret.

To create a secret, you can provide the secret value to be encrypted in either the SecretString parameter or the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager creates an initial secret version and automatically attaches the staging label AWSCURRENT to it.

For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the JSON you store in the SecretString matches the JSON structure of a database secret.

If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result.

If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use aws/secretsmanager to encrypt the secret, and you must create and use a customer managed KMS key.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:CreateSecret. If you include tags in the secret, you also need secretsmanager:TagResource. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key.

" + "documentation":"

Creates a new secret. A secret can be a password, a set of credentials such as a user name and password, an OAuth token, or other secret information that you store in an encrypted form in Secrets Manager. The secret also includes the connection information to access a database or other service, which Secrets Manager doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret.

For secrets that use managed rotation, you need to create the secret through the managing service. For more information, see Secrets Manager secrets managed by other Amazon Web Services services.

For information about creating a secret in the console, see Create a secret.

To create a secret, you can provide the secret value to be encrypted in either the SecretString parameter or the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager creates an initial secret version and automatically attaches the staging label AWSCURRENT to it.

For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the JSON you store in the SecretString matches the JSON structure of a database secret.

If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result.

If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use aws/secretsmanager to encrypt the secret, and you must create and use a customer managed KMS key.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:CreateSecret. If you include tags in the secret, you also need secretsmanager:TagResource. To add replica Regions, you must also have secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key.

When you enter commands in a command shell, there is a risk of the command history being accessed or utilities having access to your command parameters. This is a concern if the command includes the value of a secret. Learn how to Mitigate the risks of using command-line tools to store Secrets Manager secrets.

" }, "DeleteResourcePolicy":{ "name":"DeleteResourcePolicy", @@ -232,7 +234,7 @@ {"shape":"InternalServiceError"}, {"shape":"DecryptionFailure"} ], - "documentation":"

Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value.

We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.

You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it. If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value.

We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.

You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it. If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

When you enter commands in a command shell, there is a risk of the command history being accessed or utilities having access to your command parameters. This is a concern if the command includes the value of a secret. Learn how to Mitigate the risks of using command-line tools to store Secrets Manager secrets.

" }, "RemoveRegionsFromReplication":{ "name":"RemoveRegionsFromReplication", @@ -364,7 +366,7 @@ {"shape":"PreconditionNotMetException"}, {"shape":"DecryptionFailure"} ], - "documentation":"

Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue.

To change the rotation configuration of a secret, use RotateSecret instead.

To change a secret so that it is managed by another service, you need to recreate the secret in that service. See Secrets Manager secrets managed by other Amazon Web Services services.

We recommend you avoid calling UpdateSecret at a sustained rate of more than once every 10 minutes. When you call UpdateSecret to update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you update the secret value more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.

If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically moves the staging label AWSCURRENT to the new version. Then it attaches the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If you call this operation with a ClientRequestToken that matches an existing version's VersionId, the operation results in an error. You can't modify an existing version, you can only create a new version. To remove a version, remove all staging labels from it. See UpdateSecretVersionStage.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:UpdateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and kms:Decrypt permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more information, see Secret encryption and decryption.

" + "documentation":"

Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue.

To change the rotation configuration of a secret, use RotateSecret instead.

To change a secret so that it is managed by another service, you need to recreate the secret in that service. See Secrets Manager secrets managed by other Amazon Web Services services.

We recommend you avoid calling UpdateSecret at a sustained rate of more than once every 10 minutes. When you call UpdateSecret to update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you update the secret value more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.

If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically moves the staging label AWSCURRENT to the new version. Then it attaches the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If you call this operation with a ClientRequestToken that matches an existing version's VersionId, the operation results in an error. You can't modify an existing version, you can only create a new version. To remove a version, remove all staging labels from it. See UpdateSecretVersionStage.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:UpdateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and kms:Decrypt permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-encrypt existing secret versions with the new key. For more information, see Secret encryption and decryption.

When you enter commands in a command shell, there is a risk of the command history being accessed or utilities having access to your command parameters. This is a concern if the command includes the value of a secret. Learn how to Mitigate the risks of using command-line tools to store Secrets Manager secrets.

" }, "UpdateSecretVersionStage":{ "name":"UpdateSecretVersionStage", @@ -529,11 +531,11 @@ }, "SecretBinary":{ "shape":"SecretBinaryType", - "documentation":"

The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter.

Either SecretString or SecretBinary must have a value, but not both.

This parameter is not available in the Secrets Manager console.

" + "documentation":"

The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter.

Either SecretString or SecretBinary must have a value, but not both.

This parameter is not available in the Secrets Manager console.

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

The text data to encrypt and store in this new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value.

Either SecretString or SecretBinary must have a value, but not both.

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that a Lambda rotation function can parse.

" + "documentation":"

The text data to encrypt and store in this new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value.

Either SecretString or SecretBinary must have a value, but not both.

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that a Lambda rotation function can parse.

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" }, "Tags":{ "shape":"TagListType", @@ -673,7 +675,7 @@ }, "RotationEnabled":{ "shape":"RotationEnabledType", - "documentation":"

Specifies whether automatic rotation is turned on for this secret.

To turn on rotation, use RotateSecret. To turn off rotation, use CancelRotateSecret.

", + "documentation":"

Specifies whether automatic rotation is turned on for this secret. If the secret has never been configured for rotation, Secrets Manager returns null.

To turn on rotation, use RotateSecret. To turn off rotation, use CancelRotateSecret.

", "box":true }, "RotationLambdaARN":{ @@ -892,7 +894,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

The ARN or name of the secret to retrieve.

For an ARN, we recommend that you specify a complete ARN rather than a partial ARN. See Finding a secret from a partial ARN.

" + "documentation":"

The ARN or name of the secret to retrieve. To retrieve a secret from another account, you must use an ARN.

For an ARN, we recommend that you specify a complete ARN rather than a partial ARN. See Finding a secret from a partial ARN.

" }, "VersionId":{ "shape":"SecretVersionIdType", @@ -921,11 +923,11 @@ }, "SecretBinary":{ "shape":"SecretBinaryType", - "documentation":"

The decrypted secret value, if the secret value was originally provided as binary data in the form of a byte array. When you retrieve a SecretBinary using the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not encoded.

If the secret was created by using the Secrets Manager console, or if the secret value was originally provided as a string, then this field is omitted. The secret value appears in SecretString instead.

" + "documentation":"

The decrypted secret value, if the secret value was originally provided as binary data in the form of a byte array. When you retrieve a SecretBinary using the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not encoded.

If the secret was created by using the Secrets Manager console, or if the secret value was originally provided as a string, then this field is omitted. The secret value appears in SecretString instead.

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

The decrypted secret value, if the secret value was originally provided as a string or through the Secrets Manager console.

If this secret was created by using the console, then Secrets Manager stores the information as a JSON structure of key/value pairs.

" + "documentation":"

The decrypted secret value, if the secret value was originally provided as a string or through the Secrets Manager console.

If this secret was created by using the console, then Secrets Manager stores the information as a JSON structure of key/value pairs.

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" }, "VersionStages":{ "shape":"SecretVersionStagesType", @@ -1188,15 +1190,19 @@ }, "SecretBinary":{ "shape":"SecretBinaryType", - "documentation":"

The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter.

You must include SecretBinary or SecretString, but not both.

You can't access this value from the Secrets Manager console.

" + "documentation":"

The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter.

You must include SecretBinary or SecretString, but not both.

You can't access this value from the Secrets Manager console.

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

The text to encrypt and store in the new version of the secret.

You must include SecretBinary or SecretString, but not both.

We recommend you create the secret string as JSON key/value pairs, as shown in the example.

" + "documentation":"

The text to encrypt and store in the new version of the secret.

You must include SecretBinary or SecretString, but not both.

We recommend you create the secret string as JSON key/value pairs, as shown in the example.

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" }, "VersionStages":{ "shape":"SecretVersionStagesType", "documentation":"

A list of staging labels to attach to this version of the secret. Secrets Manager uses staging labels to track versions of a secret through the rotation process.

If you specify a staging label that's already associated with a different version of the same secret, then Secrets Manager removes the label from the other version and attaches it to this version. If you specify AWSCURRENT, and it is already attached to another version, then Secrets Manager also moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version.

" + }, + "RotationToken":{ + "shape":"RotationTokenType", + "documentation":"

A unique identifier that indicates the source of the request. For cross-account rotation (when you rotate a secret in one account by using a Lambda rotation function in another account) and the Lambda rotation function assumes an IAM role to call Secrets Manager, Secrets Manager validates the identity with the rotation token. For more information, see How rotation works.

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" } } }, @@ -1410,7 +1416,7 @@ }, "RotateImmediately":{ "shape":"BooleanType", - "documentation":"

Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in RotateSecretRequest$RotationRules.

For secrets that use a Lambda rotation function to rotate, if you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the testSecret step of the Lambda rotation function. The test creates an AWSPENDING version of the secret and then removes it.

By default, Secrets Manager rotates the secret immediately.

", + "documentation":"

Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in RotateSecretRequest$RotationRules.

For secrets that use a Lambda rotation function to rotate, if you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the testSecret step of the Lambda rotation function. The test creates an AWSPENDING version of the secret and then removes it.

By default, Secrets Manager rotates the secret immediately.

", "box":true } } @@ -1458,6 +1464,13 @@ }, "documentation":"

A structure that defines the rotation configuration for the secret.

" }, + "RotationTokenType":{ + "type":"string", + "max":256, + "min":36, + "pattern":"^[a-zA-Z0-9\\-]+$", + "sensitive":true + }, "ScheduleExpressionType":{ "type":"string", "max":256, @@ -1797,15 +1810,15 @@ }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more information about versions and staging labels, see Concepts: Version.

A key alias is always prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About aliases.

If you set this to an empty string, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result.

You can only use the Amazon Web Services managed key aws/secretsmanager if you call this operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in a different account, then you must use a customer managed key and provide the ARN of that KMS key in this field. The user making the call must have permissions to both the secret and the KMS key in their respective accounts.

" + "documentation":"

The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-encrypt existing secret versions with the new key. For more information about versions and staging labels, see Concepts: Version.

A key alias is always prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About aliases.

If you set this to an empty string, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result.

You can only use the Amazon Web Services managed key aws/secretsmanager if you call this operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in a different account, then you must use a customer managed key and provide the ARN of that KMS key in this field. The user making the call must have permissions to both the secret and the KMS key in their respective accounts.

" }, "SecretBinary":{ "shape":"SecretBinaryType", - "documentation":"

The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter.

Either SecretBinary or SecretString must have a value, but not both.

You can't access this parameter in the Secrets Manager console.

" + "documentation":"

The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter.

Either SecretBinary or SecretString must have a value, but not both.

You can't access this parameter in the Secrets Manager console.

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

The text data to encrypt and store in the new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value.

Either SecretBinary or SecretString must have a value, but not both.

" + "documentation":"

The text data to encrypt and store in the new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value.

Either SecretBinary or SecretString must have a value, but not both.

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" } } }, @@ -1872,7 +1885,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

This field is reserved for internal use.

" + "documentation":"

The ARN or name of the secret with the resource-based policy you want to validate.

" }, "ResourcePolicy":{ "shape":"NonEmptyResourcePolicyType", diff --git a/botocore/data/securityhub/2018-10-26/service-2.json b/botocore/data/securityhub/2018-10-26/service-2.json index 870b6f37d5..8781e7fbed 100644 --- a/botocore/data/securityhub/2018-10-26/service-2.json +++ b/botocore/data/securityhub/2018-10-26/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"securityhub", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS SecurityHub", "serviceId":"SecurityHub", "signatureVersion":"v4", "signingName":"securityhub", - "uid":"securityhub-2018-10-26" + "uid":"securityhub-2018-10-26", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptAdministratorInvitation":{ @@ -1489,7 +1491,7 @@ }, "PortName":{ "shape":"NonEmptyString", - "documentation":"

The port name of the local connection.

" + "documentation":"

The port name of the local connection.

Length Constraints: 128.

" } }, "documentation":"

For NetworkConnectionAction and PortProbeDetails, LocalPortDetails provides information about the local port that was involved in the action.

" @@ -1529,7 +1531,7 @@ }, "PortName":{ "shape":"NonEmptyString", - "documentation":"

The port name of the remote connection.

" + "documentation":"

The port name of the remote connection.

Length Constraints: 128.

" } }, "documentation":"

Provides information about the remote port that was involved in an attempted network connection.

" @@ -2270,11 +2272,11 @@ "members":{ "Api":{ "shape":"NonEmptyString", - "documentation":"

The name of the API method that was issued.

" + "documentation":"

The name of the API method that was issued.

Length Constraints: 128.

" }, "ServiceName":{ "shape":"NonEmptyString", - "documentation":"

The name of the Amazon Web Services service that the API method belongs to.

" + "documentation":"

The name of the Amazon Web Services service that the API method belongs to.

Length Constraints: 128.

" }, "CallerType":{ "shape":"NonEmptyString", @@ -2308,7 +2310,7 @@ "members":{ "Domain":{ "shape":"NonEmptyString", - "documentation":"

The name of the DNS domain that issued the API call.

" + "documentation":"

The name of the DNS domain that issued the API call.

Length Constraints: 128.

" } }, "documentation":"

Provided if CallerType is domain. It provides information about the DNS domain that issued the API call.

" @@ -13703,39 +13705,39 @@ "members":{ "SchemaVersion":{ "shape":"NonEmptyString", - "documentation":"

The schema version that a finding is formatted for.

" + "documentation":"

The schema version that a finding is formatted for. The value is 2018-10-08.

" }, "Id":{ "shape":"NonEmptyString", - "documentation":"

The security findings provider-specific identifier for a finding.

" + "documentation":"

The security findings provider-specific identifier for a finding.

Length Constraints: Minimum length of 1. Maximum length of 512.

" }, "ProductArn":{ "shape":"NonEmptyString", - "documentation":"

The ARN generated by Security Hub that uniquely identifies a product that generates findings. This can be the ARN for a third-party product that is integrated with Security Hub, or the ARN for a custom integration.

" + "documentation":"

The ARN generated by Security Hub that uniquely identifies a product that generates findings. This can be the ARN for a third-party product that is integrated with Security Hub, or the ARN for a custom integration.

Length Constraints: Minimum length of 12. Maximum length of 2048.

" }, "ProductName":{ "shape":"NonEmptyString", - "documentation":"

The name of the product that generated the finding.

Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration.

When you use the Security Hub console or API to filter findings by product name, you use this attribute.

" + "documentation":"

The name of the product that generated the finding.

Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration.

When you use the Security Hub console or API to filter findings by product name, you use this attribute.

Length Constraints: Minimum length of 1. Maximum length of 128.

" }, "CompanyName":{ "shape":"NonEmptyString", - "documentation":"

The name of the company for the product that generated the finding.

Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration.

When you use the Security Hub console or API to filter findings by company name, you use this attribute.

" + "documentation":"

The name of the company for the product that generated the finding.

Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration.

When you use the Security Hub console or API to filter findings by company name, you use this attribute.

Length Constraints: Minimum length of 1. Maximum length of 128.

" }, "Region":{ "shape":"NonEmptyString", - "documentation":"

The Region from which the finding was generated.

Security Hub populates this attribute automatically for each finding. You cannot update it using BatchImportFindings or BatchUpdateFindings.

" + "documentation":"

The Region from which the finding was generated.

Security Hub populates this attribute automatically for each finding. You cannot update it using BatchImportFindings or BatchUpdateFindings.

Length Constraints: Minimum length of 1. Maximum length of 16.

" }, "GeneratorId":{ "shape":"NonEmptyString", - "documentation":"

The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc.

" + "documentation":"

The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, or something else.

Length Constraints: Minimum length of 1. Maximum length of 512.

" }, "AwsAccountId":{ "shape":"NonEmptyString", - "documentation":"

The Amazon Web Services account ID that a finding is generated in.

" + "documentation":"

The Amazon Web Services account ID that a finding is generated in.

Length Constraints: 12.

" }, "Types":{ "shape":"TypeList", - "documentation":"

One or more finding types in the format of namespace/category/classifier that classify a finding.

Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications

" + "documentation":"

One or more finding types in the format of namespace/category/classifier that classify a finding.

Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications

Array Members: Maximum number of 50 items.

" }, "FirstObservedAt":{ "shape":"NonEmptyString", @@ -13767,11 +13769,11 @@ }, "Title":{ "shape":"NonEmptyString", - "documentation":"

A finding's title.

In this release, Title is a required property.

" + "documentation":"

A finding's title. Title is a required property.

Length Constraints: Minimum length of 1. Maximum length of 256.

" }, "Description":{ "shape":"NonEmptyString", - "documentation":"

A finding's description.

In this release, Description is a required property.

" + "documentation":"

A finding's description. Description is a required property.

Length Constraints: Minimum length of 1. Maximum length of 1024.

" }, "Remediation":{ "shape":"Remediation", @@ -13787,11 +13789,11 @@ }, "UserDefinedFields":{ "shape":"FieldMap", - "documentation":"

A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.

" + "documentation":"

A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.

Can contain up to 50 key-value pairs. For each key-value pair, the key can contain up to 128 characters, and the value can contain up to 1024 characters.

" }, "Malware":{ "shape":"MalwareList", - "documentation":"

A list of malware related to a finding.

" + "documentation":"

A list of malware related to a finding.

Array Members: Maximum number of 5 items.

" }, "Network":{ "shape":"Network", @@ -13807,15 +13809,15 @@ }, "Threats":{ "shape":"ThreatList", - "documentation":"

Details about the threat detected in a security finding and the file paths that were affected by the threat.

" + "documentation":"

Details about the threat detected in a security finding and the file paths that were affected by the threat.

Array Members: Minimum number of 1 item. Maximum number of 32 items.

" }, "ThreatIntelIndicators":{ "shape":"ThreatIntelIndicatorList", - "documentation":"

Threat intelligence details related to a finding.

" + "documentation":"

Threat intelligence details related to a finding.

Array Members: Minimum number of 1 item. Maximum number of 5 items.

" }, "Resources":{ "shape":"ResourceList", - "documentation":"

A set of resource data types that describe the resources that the finding refers to.

" + "documentation":"

A set of resource data types that describe the resources that the finding refers to.

Array Members: Minimum number of 1 item. Maximum number of 32 items.

" }, "Compliance":{ "shape":"Compliance", @@ -13839,7 +13841,7 @@ }, "RelatedFindings":{ "shape":"RelatedFindingList", - "documentation":"

A list of related findings.

" + "documentation":"

A list of related findings.

Array Members: Minimum number of 1 item. Maximum number of 10 items.

" }, "Note":{ "shape":"Note", @@ -13871,11 +13873,11 @@ }, "ProcessedAt":{ "shape":"NonEmptyString", - "documentation":"

A imestamp that indicates when Security Hub received a finding and begins to process it.

This field accepts only the specified formats. Timestamps can end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

  • YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

  • YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

  • YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

  • YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

  • YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

" + "documentation":"

A timestamp that indicates when Security Hub received a finding and begins to process it.

This field accepts only the specified formats. Timestamps can end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

  • YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

  • YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

  • YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

  • YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

  • YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

" }, "AwsAccountName":{ "shape":"NonEmptyString", - "documentation":"

The name of the Amazon Web Services account from which a finding was generated.

" + "documentation":"

The name of the Amazon Web Services account from which a finding was generated.

Length Constraints: Minimum length of 1. Maximum length of 50.

" } }, "documentation":"

Provides a consistent format for Security Hub findings. AwsSecurityFinding format allows you to share findings between Amazon Web Services security services and third-party solutions.

A finding is a potential security issue generated either by Amazon Web Services services or by the integrated third-party solutions and standards checks.

" @@ -15843,11 +15845,11 @@ "members":{ "Status":{ "shape":"ComplianceStatus", - "documentation":"

The result of a standards check.

The valid values for Status are as follows.

    • PASSED - Standards check passed for all evaluated resources.

    • WARNING - Some information is missing or this check is not supported for your configuration.

    • FAILED - Standards check failed for at least one evaluated resource.

    • NOT_AVAILABLE - Check could not be performed due to a service outage, API error, or because the result of the Config evaluation was NOT_APPLICABLE. If the Config evaluation result was NOT_APPLICABLE, then after 3 days, Security Hub automatically archives the finding.

" + "documentation":"

The result of a standards check.

The valid values for Status are as follows.

    • PASSED - Standards check passed for all evaluated resources.

    • WARNING - Some information is missing or this check is not supported for your configuration.

    • FAILED - Standards check failed for at least one evaluated resource.

    • NOT_AVAILABLE - Check could not be performed due to a service outage, API error, or because the result of the Config evaluation was NOT_APPLICABLE. If the Config evaluation result was NOT_APPLICABLE for a Security Hub control, Security Hub automatically archives the finding after 3 days.

" }, "RelatedRequirements":{ "shape":"RelatedRequirementsList", - "documentation":"

For a control, the industry or regulatory framework requirements that are related to the control. The check for that control is aligned with these requirements.

" + "documentation":"

For a control, the industry or regulatory framework requirements that are related to the control. The check for that control is aligned with these requirements.

Array Members: Maximum number of 32 items.

" }, "StatusReasons":{ "shape":"StatusReasonsList", @@ -16849,11 +16851,11 @@ "members":{ "Domain":{ "shape":"NonEmptyString", - "documentation":"

The DNS domain that is associated with the DNS request.

" + "documentation":"

The DNS domain that is associated with the DNS request.

Length Constraints: 128.

" }, "Protocol":{ "shape":"NonEmptyString", - "documentation":"

The protocol that was used for the DNS request.

" + "documentation":"

The protocol that was used for the DNS request.

Length Constraints: Minimum length of 1. Maximum length of 64.

" }, "Blocked":{ "shape":"Boolean", @@ -16991,19 +16993,19 @@ "members":{ "FilePath":{ "shape":"NonEmptyString", - "documentation":"

Path to the infected or suspicious file on the resource it was detected on.

" + "documentation":"

Path to the infected or suspicious file on the resource it was detected on.

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" }, "FileName":{ "shape":"NonEmptyString", - "documentation":"

The name of the infected or suspicious file corresponding to the hash.

" + "documentation":"

The name of the infected or suspicious file corresponding to the hash.

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" }, "ResourceId":{ "shape":"NonEmptyString", - "documentation":"

The Amazon Resource Name (ARN) of the resource on which the threat was detected.

" + "documentation":"

The Amazon Resource Name (ARN) of the resource on which the threat was detected.

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" }, "Hash":{ "shape":"NonEmptyString", - "documentation":"

The hash value for the infected or suspicious file.

" + "documentation":"

The hash value for the infected or suspicious file.

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" } }, "documentation":"

Provides information about the file paths that were affected by the threat.

" @@ -17120,7 +17122,7 @@ "documentation":"

One or more finding types in the format of namespace/category/classifier that classify a finding.

Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications

" } }, - "documentation":"

In a BatchImportFindings request, finding providers use FindingProviderFields to provide and update values for confidence, criticality, related findings, severity, and types.

" + "documentation":"

In a BatchImportFindings request, finding providers use FindingProviderFields to provide and update values for the following fields:

  • Confidence

  • Criticality

  • RelatedFindings

  • Severity

  • Types

The preceding fields are nested under the FindingProviderFields object, but also have analogues of the same name as top-level ASFF fields. When a new finding is sent to Security Hub by a finding provider, Security Hub populates the FindingProviderFields object automatically, if it is empty, based on the corresponding top-level fields.

Finding providers can update FindingProviderFields only by using the BatchImportFindings operation. Finding providers can't update this object with the BatchUpdateFindings operation. Customers can update the top-level fields by using the BatchUpdateFindings operation. Customers can't update FindingProviderFields.

For information about how Security Hub handles updates from BatchImportFindings to FindingProviderFields and to the corresponding top-level attributes, see Using FindingProviderFields in the Security Hub User Guide.

" }, "FindingProviderSeverity":{ "type":"structure", @@ -17131,10 +17133,10 @@ }, "Original":{ "shape":"NonEmptyString", - "documentation":"

The finding provider's original value for the severity.

" + "documentation":"

The finding provider's original value for the severity.

Length Constraints: Minimum length of 1. Maximum length of 64.

" } }, - "documentation":"

The severity assigned to the finding by the finding provider.

" + "documentation":"

The severity assigned to a finding by the finding provider. This object may include one or more of the following attributes:

  • Label

  • Normalized

  • Original

  • Product

If a BatchImportFindings request for a new finding only provides Label or only provides Normalized, Security Hub automatically populates the value of the other field.

The Normalized and Product attributes are included in the FindingProviderSeverity structure to preserve the historical information associated with the finding, even if the top-level Severity object is later modified using the BatchUpdateFindings operation.

If the top-level Finding.Severity object is present, but Finding.FindingProviderFields isn't present, Security Hub creates the FindingProviderFields.Severity object and copies the entire Finding.Severity object into it. This ensures that the original, provider-supplied details are retained within the FindingProviderFields.Severity object, even if the top-level Severity object is overwritten.

" }, "FirewallPolicyDetails":{ "type":"structure", @@ -17225,7 +17227,7 @@ }, "Labels":{ "shape":"TypeList", - "documentation":"

An array of tags used to identify the detector associated with the finding.

" + "documentation":"

An array of tags used to identify the detector associated with the finding.

Array Members: Minimum number of 0 items. Maximum number of 10 items.

" } }, "documentation":"

Provides metadata for the Amazon CodeGuru detector associated with a finding. This field pertains to findings that relate to Lambda functions. Amazon Inspector identifies policy violations and vulnerabilities in Lambda function code based on internal detectors developed in collaboration with Amazon CodeGuru. Security Hub receives those findings.

" @@ -18295,7 +18297,7 @@ "members":{ "Name":{ "shape":"NonEmptyString", - "documentation":"

The name of the malware that was observed.

" + "documentation":"

The name of the malware that was observed.

Length Constraints: Minimum of 1. Maximum of 64.

" }, "Type":{ "shape":"MalwareType", @@ -18303,7 +18305,7 @@ }, "Path":{ "shape":"NonEmptyString", - "documentation":"

The file system path of the malware that was observed.

" + "documentation":"

The file system path of the malware that was observed.

Length Constraints: Minimum of 1. Maximum of 512.

" }, "State":{ "shape":"MalwareState", @@ -18429,7 +18431,7 @@ }, "Protocol":{ "shape":"NonEmptyString", - "documentation":"

The protocol of network-related information about a finding.

" + "documentation":"

The protocol of network-related information about a finding.

Length Constraints: Minimum of 1. Maximum of 16.

" }, "OpenPortRange":{ "shape":"PortRange", @@ -18449,7 +18451,7 @@ }, "SourceDomain":{ "shape":"NonEmptyString", - "documentation":"

The source domain of network-related information about a finding.

" + "documentation":"

The source domain of network-related information about a finding.

Length Constraints: Minimum of 1. Maximum of 128.

" }, "SourceMac":{ "shape":"NonEmptyString", @@ -18469,7 +18471,7 @@ }, "DestinationDomain":{ "shape":"NonEmptyString", - "documentation":"

The destination domain of network-related information about a finding.

" + "documentation":"

The destination domain of network-related information about a finding.

Length Constraints: Minimum of 1. Maximum of 128.

" } }, "documentation":"

The details of network-related information about a finding.

" @@ -18495,7 +18497,7 @@ }, "Protocol":{ "shape":"NonEmptyString", - "documentation":"

The protocol used to make the network connection request.

" + "documentation":"

The protocol used to make the network connection request.

Length Constraints: Minimum length of 1. Maximum length of 64.

" }, "Blocked":{ "shape":"Boolean", @@ -18516,7 +18518,7 @@ "members":{ "Protocol":{ "shape":"NonEmptyString", - "documentation":"

The protocol used for the component.

" + "documentation":"

The protocol used for the component.

Length Constraints: Minimum of 1. Maximum of 16.

" }, "Destination":{ "shape":"NetworkPathComponentDetails", @@ -18534,11 +18536,11 @@ "members":{ "ComponentId":{ "shape":"NonEmptyString", - "documentation":"

The identifier of a component in the network path.

" + "documentation":"

The identifier of a component in the network path.

Length Constraints: Minimum of 1. Maximum of 32.

" }, "ComponentType":{ "shape":"NonEmptyString", - "documentation":"

The type of component.

" + "documentation":"

The type of component.

Length Constraints: Minimum of 1. Maximum of 32.

" }, "Egress":{ "shape":"NetworkHeader", @@ -18588,7 +18590,7 @@ "members":{ "Text":{ "shape":"NonEmptyString", - "documentation":"

The text of a note.

" + "documentation":"

The text of a note.

Length Constraints: Minimum of 1. Maximum of 512.

" }, "UpdatedBy":{ "shape":"NonEmptyString", @@ -18833,31 +18835,31 @@ "members":{ "Id":{ "shape":"NonEmptyString", - "documentation":"

The identifier of the compliance standard that was used to determine the patch compliance status.

" + "documentation":"

The identifier of the compliance standard that was used to determine the patch compliance status.

Length Constraints: Minimum length of 1. Maximum length of 256.

" }, "InstalledCount":{ "shape":"Integer", - "documentation":"

The number of patches from the compliance standard that were installed successfully.

" + "documentation":"

The number of patches from the compliance standard that were installed successfully.

The value can be an integer from 0 to 100000.

" }, "MissingCount":{ "shape":"Integer", - "documentation":"

The number of patches that are part of the compliance standard but are not installed. The count includes patches that failed to install.

" + "documentation":"

The number of patches that are part of the compliance standard but are not installed. The count includes patches that failed to install.

The value can be an integer from 0 to 100000.

" }, "FailedCount":{ "shape":"Integer", - "documentation":"

The number of patches from the compliance standard that failed to install.

" + "documentation":"

The number of patches from the compliance standard that failed to install.

The value can be an integer from 0 to 100000.

" }, "InstalledOtherCount":{ "shape":"Integer", - "documentation":"

The number of installed patches that are not part of the compliance standard.

" + "documentation":"

The number of installed patches that are not part of the compliance standard.

The value can be an integer from 0 to 100000.

" }, "InstalledRejectedCount":{ "shape":"Integer", - "documentation":"

The number of patches that are installed but are also on a list of patches that the customer rejected.

" + "documentation":"

The number of patches that are installed but are also on a list of patches that the customer rejected.

The value can be an integer from 0 to 100000.

" }, "InstalledPendingReboot":{ "shape":"Integer", - "documentation":"

The number of patches that were applied, but that require the instance to be rebooted in order to be marked as installed.

" + "documentation":"

The number of patches that were applied, but that require the instance to be rebooted in order to be marked as installed.

The value can be an integer from 0 to 100000.

" }, "OperationStartTime":{ "shape":"NonEmptyString", @@ -18869,11 +18871,11 @@ }, "RebootOption":{ "shape":"NonEmptyString", - "documentation":"

The reboot option specified for the instance.

" + "documentation":"

The reboot option specified for the instance.

Length Constraints: Minimum length of 1. Maximum length of 256.

" }, "Operation":{ "shape":"NonEmptyString", - "documentation":"

The type of patch operation performed. For Patch Manager, the values are SCAN and INSTALL.

" + "documentation":"

The type of patch operation performed. For Patch Manager, the values are SCAN and INSTALL.

Length Constraints: Minimum length of 1. Maximum length of 256.

" } }, "documentation":"

Provides an overview of the patch compliance status for an instance against a selected compliance standard.

" @@ -18962,11 +18964,11 @@ "members":{ "Name":{ "shape":"NonEmptyString", - "documentation":"

The name of the process.

" + "documentation":"

The name of the process.

Length Constraints: Minimum of 1. Maximum of 64.

" }, "Path":{ "shape":"NonEmptyString", - "documentation":"

The path to the process executable.

" + "documentation":"

The path to the process executable.

Length Constraints: Minimum of 1. Maximum of 512.

" }, "Pid":{ "shape":"Integer", @@ -19084,7 +19086,7 @@ "members":{ "Text":{ "shape":"NonEmptyString", - "documentation":"

Describes the recommended steps to take to remediate an issue identified in a finding.

" + "documentation":"

Describes the recommended steps to take to remediate an issue identified in a finding.

Length Constraints: Minimum of 1 length. Maximum of 512 length.

" }, "Url":{ "shape":"NonEmptyString", @@ -19170,7 +19172,7 @@ "members":{ "Type":{ "shape":"NonEmptyString", - "documentation":"

The type of the resource that details are provided for. If possible, set Type to one of the supported resource types. For example, if the resource is an EC2 instance, then set Type to AwsEc2Instance.

If the resource does not match any of the provided types, then set Type to Other.

" + "documentation":"

The type of the resource that details are provided for. If possible, set Type to one of the supported resource types. For example, if the resource is an EC2 instance, then set Type to AwsEc2Instance.

If the resource does not match any of the provided types, then set Type to Other.

Length Constraints: Minimum length of 1. Maximum length of 256.

" }, "Id":{ "shape":"NonEmptyString", @@ -19182,7 +19184,7 @@ }, "Region":{ "shape":"NonEmptyString", - "documentation":"

The canonical Amazon Web Services external Region name where this resource is located.

" + "documentation":"

The canonical Amazon Web Services external Region name where this resource is located.

Length Constraints: Minimum length of 1. Maximum length of 16.

" }, "ResourceRole":{ "shape":"NonEmptyString", @@ -19190,7 +19192,7 @@ }, "Tags":{ "shape":"FieldMap", - "documentation":"

A list of Amazon Web Services tags associated with a resource at the time the finding was processed.

" + "documentation":"

A list of Amazon Web Services tags associated with a resource at the time the finding was processed. Tags must follow Amazon Web Services tag naming limits and requirements.

" }, "DataClassification":{ "shape":"DataClassificationDetails", @@ -20331,11 +20333,11 @@ }, "Normalized":{ "shape":"Integer", - "documentation":"

Deprecated. The normalized severity of a finding. Instead of providing Normalized, provide Label.

If you provide Label and do not provide Normalized, then Normalized is set automatically as follows.

  • INFORMATIONAL - 0

  • LOW - 1

  • MEDIUM - 40

  • HIGH - 70

  • CRITICAL - 90

" + "documentation":"

Deprecated. The normalized severity of a finding. Instead of providing Normalized, provide Label.

The value of Normalized can be an integer between 0 and 100.

If you provide Label and do not provide Normalized, then Normalized is set automatically as follows.

  • INFORMATIONAL - 0

  • LOW - 1

  • MEDIUM - 40

  • HIGH - 70

  • CRITICAL - 90

" }, "Original":{ "shape":"NonEmptyString", - "documentation":"

The native severity from the finding product that generated the finding.

" + "documentation":"

The native severity from the finding product that generated the finding.

Length Constraints: Minimum length of 1. Maximum length of 64.

" } }, "documentation":"

The severity of the finding.

The finding provider can provide the initial severity. The finding provider can only update the severity if it hasn't been updated using BatchUpdateFindings.

The finding must have either Label or Normalized populated. If only one of these attributes is populated, then Security Hub automatically populates the other one. If neither attribute is populated, then the finding is invalid. Label is the preferred attribute.

" @@ -21077,7 +21079,8 @@ "type":"string", "enum":[ "ACCOUNT", - "ORGANIZATIONAL_UNIT" + "ORGANIZATIONAL_UNIT", + "ROOT" ] }, "Threat":{ @@ -21085,11 +21088,11 @@ "members":{ "Name":{ "shape":"NonEmptyString", - "documentation":"

The name of the threat.

" + "documentation":"

The name of the threat.

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" }, "Severity":{ "shape":"NonEmptyString", - "documentation":"

The severity of the threat.

" + "documentation":"

The severity of the threat.

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" }, "ItemCount":{ "shape":"Integer", @@ -21097,7 +21100,7 @@ }, "FilePaths":{ "shape":"FilePathList", - "documentation":"

Provides information about the file paths that were affected by the threat.

" + "documentation":"

Provides information about the file paths that were affected by the threat.

Array Members: Minimum number of 1 item. Maximum number of 5 items.

" } }, "documentation":"

Provides information about the threat detected in a security finding and the file paths that were affected by the threat.

" @@ -21111,7 +21114,7 @@ }, "Value":{ "shape":"NonEmptyString", - "documentation":"

The value of a threat intelligence indicator.

" + "documentation":"

The value of a threat intelligence indicator.

Length Constraints: Minimum of 1 length. Maximum of 512 length.

" }, "Category":{ "shape":"ThreatIntelIndicatorCategory", @@ -21123,7 +21126,7 @@ }, "Source":{ "shape":"NonEmptyString", - "documentation":"

The source of the threat intelligence indicator.

" + "documentation":"

The source of the threat intelligence indicator.

Length Constraints: Minimum of 1 length. Maximum of 64 length.

" }, "SourceUrl":{ "shape":"NonEmptyString", diff --git a/botocore/data/securitylake/2018-05-10/service-2.json b/botocore/data/securitylake/2018-05-10/service-2.json index e0fd1f2a33..c1c96cff08 100644 --- a/botocore/data/securitylake/2018-05-10/service-2.json +++ b/botocore/data/securitylake/2018-05-10/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"securitylake", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Security Lake", "serviceId":"SecurityLake", "signatureVersion":"v4", "signingName":"securitylake", - "uid":"securitylake-2018-05-10" + "uid":"securitylake-2018-05-10", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateAwsLogSource":{ @@ -651,7 +653,7 @@ "type":"string", "max":1011, "min":1, - "pattern":"^arn:aws:securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$" + "pattern":"^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$" }, "AwsAccountId":{ "type":"string", @@ -959,7 +961,7 @@ }, "subscriberIdentity":{ "shape":"AwsIdentity", - "documentation":"

The AWS identity used to access your data.

" + "documentation":"

The Amazon Web Services identity used to access your data.

" }, "subscriberName":{ "shape":"CreateSubscriberRequestSubscriberNameString", @@ -1036,7 +1038,7 @@ "type":"string", "max":64, "min":1, - "pattern":"^[\\\\\\w\\-_:/.]*$" + "pattern":"^[\\w\\-\\_\\:\\.]*$" }, "CustomLogSourceProvider":{ "type":"structure", @@ -1867,7 +1869,7 @@ }, "Region":{ "type":"string", - "pattern":"^(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$" + "pattern":"^(us(-gov)?|af|ap|ca|eu|me|sa)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$" }, "RegionList":{ "type":"list", @@ -1964,7 +1966,7 @@ }, "resourceShareArn":{ "shape":"ResourceShareArn", - "documentation":"

The Amazon Resource Name (ARN) which uniquely defines the AWS RAM resource share. Before accepting the RAM resource share invitation, you can view details related to the RAM resource share.

This field is available only for Lake Formation subscribers created after March 8, 2023.

" + "documentation":"

The Amazon Resource Name (ARN) which uniquely defines the Amazon Web Services RAM resource share. Before accepting the RAM resource share invitation, you can view details related to the RAM resource share.

This field is available only for Lake Formation subscribers created after March 8, 2023.

" }, "resourceShareName":{ "shape":"ResourceShareName", @@ -2000,7 +2002,7 @@ }, "subscriberIdentity":{ "shape":"AwsIdentity", - "documentation":"

The AWS identity used to access your data.

" + "documentation":"

The Amazon Web Services identity used to access your data.

" }, "subscriberName":{ "shape":"SafeString", diff --git a/botocore/data/ses/2010-12-01/endpoint-rule-set-1.json b/botocore/data/ses/2010-12-01/endpoint-rule-set-1.json index 1d567c5f97..658dc3ff9e 100644 --- a/botocore/data/ses/2010-12-01/endpoint-rule-set-1.json +++ b/botocore/data/ses/2010-12-01/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -58,293 +57,258 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] + ], + "type": "tree" }, { - "conditions": [], - "type": "tree", + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://email-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://email-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] + ], + "type": "tree" }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, true ] } ], - "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://email-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://email-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] + ], + "type": "tree" }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://email.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], - "type": "tree", + ], "rules": [ { "conditions": [], "endpoint": { - "url": "https://email.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://email.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://email.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" + ], + "type": "tree" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/botocore/data/ses/2010-12-01/service-2.json b/botocore/data/ses/2010-12-01/service-2.json index 0e89c30bb7..75573cd7c4 100644 --- a/botocore/data/ses/2010-12-01/service-2.json +++ b/botocore/data/ses/2010-12-01/service-2.json @@ -4,13 +4,15 @@ "apiVersion":"2010-12-01", "endpointPrefix":"email", "protocol":"query", + "protocols":["query"], "serviceAbbreviation":"Amazon SES", "serviceFullName":"Amazon Simple Email Service", "serviceId":"SES", "signatureVersion":"v4", "signingName":"ses", "uid":"email-2010-12-01", - "xmlNamespace":"http://ses.amazonaws.com/doc/2010-12-01/" + "xmlNamespace":"http://ses.amazonaws.com/doc/2010-12-01/", + "auth":["aws.auth#sigv4"] }, "operations":{ "CloneReceiptRuleSet":{ diff --git a/botocore/data/sesv2/2019-09-27/service-2.json b/botocore/data/sesv2/2019-09-27/service-2.json index a3a2aa5e81..1b2a0e9c1e 100644 --- a/botocore/data/sesv2/2019-09-27/service-2.json +++ b/botocore/data/sesv2/2019-09-27/service-2.json @@ -11,7 +11,8 @@ "serviceId":"SESv2", "signatureVersion":"v4", "signingName":"ses", - "uid":"sesv2-2019-09-27" + "uid":"sesv2-2019-09-27", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchGetMetricData":{ @@ -78,7 +79,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

A single configuration set can include more than one event destination.

" + "documentation":"

Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target.

A single configuration set can include more than one event destination.

" }, "CreateContact":{ "name":"CreateContact", @@ -278,7 +279,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Delete an event destination.

Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

" + "documentation":"

Delete an event destination.

Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target.

" }, "DeleteContact":{ "name":"DeleteContact", @@ -460,7 +461,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Retrieve a list of event destinations that are associated with a configuration set.

Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

" + "documentation":"

Retrieve a list of event destinations that are associated with a configuration set.

Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target.

" }, "GetContact":{ "name":"GetContact", @@ -1352,7 +1353,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Update the configuration of an event destination for a configuration set.

Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

" + "documentation":"

Update the configuration of an event destination for a configuration set.

Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target.

" }, "UpdateContact":{ "name":"UpdateContact", @@ -3145,6 +3146,17 @@ "type":"list", "member":{"shape":"Esp"} }, + "EventBridgeDestination":{ + "type":"structure", + "required":["EventBusArn"], + "members":{ + "EventBusArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon EventBridge bus to publish email events to. Only the default bus is supported.

" + } + }, + "documentation":"

An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur.

" + }, "EventDestination":{ "type":"structure", "required":[ @@ -3174,7 +3186,11 @@ }, "SnsDestination":{ "shape":"SnsDestination", - "documentation":"

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.

" + "documentation":"

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notifications when certain email events occur.

" + }, + "EventBridgeDestination":{ + "shape":"EventBridgeDestination", + "documentation":"

An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur.

" }, "PinpointDestination":{ "shape":"PinpointDestination", @@ -3204,7 +3220,11 @@ }, "SnsDestination":{ "shape":"SnsDestination", - "documentation":"

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.

" + "documentation":"

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notifications when certain email events occur.

" + }, + "EventBridgeDestination":{ + "shape":"EventBridgeDestination", + "documentation":"

An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur.

" }, "PinpointDestination":{ "shape":"PinpointDestination", @@ -5489,8 +5509,7 @@ "type":"structure", "required":[ "MailType", - "WebsiteURL", - "UseCaseDescription" + "WebsiteURL" ], "members":{ "MailType":{ @@ -6369,7 +6388,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the Amazon SNS topic to publish email events to. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

" } }, - "documentation":"

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.

" + "documentation":"

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notifications when certain email events occur.

" }, "Subject":{"type":"string"}, "SubscriptionStatus":{ @@ -6939,8 +6958,9 @@ }, "UseCaseDescription":{ "type":"string", + "deprecated":true, + "deprecatedMessage":"Use case description is optional and deprecated", "max":5000, - "min":1, "sensitive":true }, "UseDefaultIfPreferenceUnavailable":{"type":"boolean"}, diff --git a/botocore/data/shield/2016-06-02/endpoint-rule-set-1.json b/botocore/data/shield/2016-06-02/endpoint-rule-set-1.json index 3d3c9c13eb..f53c99a9c8 100644 --- a/botocore/data/shield/2016-06-02/endpoint-rule-set-1.json +++ b/botocore/data/shield/2016-06-02/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -235,7 +233,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -270,7 +267,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -281,14 +277,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -302,14 +300,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -318,11 +314,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -333,14 +329,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -354,7 +352,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -374,7 +371,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -385,14 +381,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -403,9 +401,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/shield/2016-06-02/service-2.json b/botocore/data/shield/2016-06-02/service-2.json index 7be50499c0..dbf34f780d 100644 --- a/botocore/data/shield/2016-06-02/service-2.json +++ b/botocore/data/shield/2016-06-02/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"shield", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"AWS Shield", "serviceFullName":"AWS Shield", "serviceId":"Shield", "signatureVersion":"v4", "targetPrefix":"AWSShield_20160616", - "uid":"shield-2016-06-02" + "uid":"shield-2016-06-02", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateDRTLogBucket":{ diff --git a/botocore/data/snowball/2016-06-30/service-2.json b/botocore/data/snowball/2016-06-30/service-2.json index b087c2dd66..68df84088c 100644 --- a/botocore/data/snowball/2016-06-30/service-2.json +++ b/botocore/data/snowball/2016-06-30/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"snowball", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Amazon Snowball", "serviceFullName":"Amazon Import/Export Snowball", "serviceId":"Snowball", "signatureVersion":"v4", "targetPrefix":"AWSIESnowballJobManagementService", - "uid":"snowball-2016-06-30" + "uid":"snowball-2016-06-30", + "auth":["aws.auth#sigv4"] }, "operations":{ "CancelCluster":{ diff --git a/botocore/data/sns/2010-03-31/service-2.json b/botocore/data/sns/2010-03-31/service-2.json index c97124fa76..fda8eca1a7 100644 --- a/botocore/data/sns/2010-03-31/service-2.json +++ b/botocore/data/sns/2010-03-31/service-2.json @@ -4,12 +4,14 @@ "apiVersion":"2010-03-31", "endpointPrefix":"sns", "protocol":"query", + "protocols":["query"], "serviceAbbreviation":"Amazon SNS", "serviceFullName":"Amazon Simple Notification Service", "serviceId":"SNS", "signatureVersion":"v4", "uid":"sns-2010-03-31", - "xmlNamespace":"http://sns.amazonaws.com/doc/2010-03-31/" + "xmlNamespace":"http://sns.amazonaws.com/doc/2010-03-31/", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddPermission":{ @@ -84,7 +86,7 @@ {"shape":"InternalErrorException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action.

PlatformPrincipal and PlatformCredential are received from the notification service.

  • For ADM, PlatformPrincipal is client id and PlatformCredential is client secret.

  • For Baidu, PlatformPrincipal is API key and PlatformCredential is secret key.

  • For APNS and APNS_SANDBOX using certificate credentials, PlatformPrincipal is SSL certificate and PlatformCredential is private key.

  • For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal is signing key ID and PlatformCredential is signing key.

  • For GCM (Firebase Cloud Messaging) using key credentials, there is no PlatformPrincipal. The PlatformCredential is API key.

  • For GCM (Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal. The PlatformCredential is a JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in string format and special characters must be ignored. To format the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq @json <<< cat service.json`.

  • For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential is private key.

  • For WNS, PlatformPrincipal is Package Security Identifier and PlatformCredential is secret key.

You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action.

" + "documentation":"

Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action.

PlatformPrincipal and PlatformCredential are received from the notification service.

  • For ADM, PlatformPrincipal is client id and PlatformCredential is client secret.

  • For APNS and APNS_SANDBOX using certificate credentials, PlatformPrincipal is SSL certificate and PlatformCredential is private key.

  • For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal is signing key ID and PlatformCredential is signing key.

  • For Baidu, PlatformPrincipal is API key and PlatformCredential is secret key.

  • For GCM (Firebase Cloud Messaging) using key credentials, there is no PlatformPrincipal. The PlatformCredential is API key.

  • For GCM (Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal. The PlatformCredential is a JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in string format and special characters must be ignored. To format the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq @json <<< cat service.json`.

  • For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential is private key.

  • For WNS, PlatformPrincipal is Package Security Identifier and PlatformCredential is secret key.

You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action.

" }, "CreatePlatformEndpoint":{ "name":"CreatePlatformEndpoint", @@ -1097,7 +1099,7 @@ }, "Attributes":{ "shape":"TopicAttributesMap", - "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateTopic action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • DisplayName – The display name to use for a topic with SMS subscriptions.

  • FifoTopic – Set to true to create a FIFO topic.

  • Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.

  • SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1.

  • TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.

The following attribute applies only to server-side encryption:

  • KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.

The following attributes apply only to FIFO topics:

  • ArchivePolicy – Adds or updates an inline policy document to archive messages stored in the specified Amazon SNS topic.

  • BeginningArchiveTime – The earliest starting point at which a message in the topic’s archive can be replayed from. This point in time is based on the configured message retention period set by the topic’s message archiving policy.

  • ContentBasedDeduplication – Enables content-based deduplication for FIFO topics.

    • By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action.

    • When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action.

" + "documentation":"

A map of attributes with their corresponding values.

The following lists names, descriptions, and values of the special request parameters that the CreateTopic action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • DisplayName – The display name to use for a topic with SMS subscriptions.

  • FifoTopic – Set to true to create a FIFO topic.

  • Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.

  • SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1.

  • TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.

The following attribute applies only to server-side encryption:

  • KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.

The following attributes apply only to FIFO topics:

  • ArchivePolicy – Adds or updates an inline policy document to archive messages stored in the specified Amazon SNS topic.

  • BeginningArchiveTime – The earliest starting point at which a message in the topic’s archive can be replayed from. This point in time is based on the configured message retention period set by the topic’s message archiving policy.

  • ContentBasedDeduplication – Enables content-based deduplication for FIFO topics.

    • By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action.

    • When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action.

" }, "Tags":{ "shape":"TagList", @@ -1339,7 +1341,7 @@ "members":{ "Attributes":{ "shape":"SubscriptionAttributesMap", - "documentation":"

A map of the subscription's attributes. Attributes in this map include the following:

  • ConfirmationWasAuthenticatedtrue if the subscription confirmation request was authenticated.

  • DeliveryPolicy – The JSON serialization of the subscription's delivery policy.

  • EffectiveDeliveryPolicy – The JSON serialization of the effective delivery policy that takes into account the topic delivery policy and account system defaults.

  • FilterPolicy – The filter policy JSON that is assigned to the subscription. For more information, see Amazon SNS Message Filtering in the Amazon SNS Developer Guide.

  • FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types:

    • MessageAttributes (default) – The filter is applied on the message attributes.

    • MessageBody – The filter is applied on the message body.

  • Owner – The Amazon Web Services account ID of the subscription's owner.

  • PendingConfirmationtrue if the subscription hasn't been confirmed. To confirm a pending subscription, call the ConfirmSubscription action with a confirmation token.

  • RawMessageDeliverytrue if raw message delivery is enabled for the subscription. Raw messages are free of JSON formatting and can be sent to HTTP/S and Amazon SQS endpoints.

  • RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.

  • SubscriptionArn – The subscription's ARN.

  • TopicArn – The topic ARN that the subscription is associated with.

The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions:

  • SubscriptionRoleArn – The ARN of the IAM role that has the following:

    • Permission to write to the Kinesis Data Firehose delivery stream

    • Amazon SNS listed as a trusted entity

    Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide.

" + "documentation":"

A map of the subscription's attributes. Attributes in this map include the following:

  • ConfirmationWasAuthenticatedtrue if the subscription confirmation request was authenticated.

  • DeliveryPolicy – The JSON serialization of the subscription's delivery policy.

  • EffectiveDeliveryPolicy – The JSON serialization of the effective delivery policy that takes into account the topic delivery policy and account system defaults.

  • FilterPolicy – The filter policy JSON that is assigned to the subscription. For more information, see Amazon SNS Message Filtering in the Amazon SNS Developer Guide.

  • FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types:

    • MessageAttributes (default) – The filter is applied on the message attributes.

    • MessageBody – The filter is applied on the message body.

  • Owner – The Amazon Web Services account ID of the subscription's owner.

  • PendingConfirmationtrue if the subscription hasn't been confirmed. To confirm a pending subscription, call the ConfirmSubscription action with a confirmation token.

  • RawMessageDeliverytrue if raw message delivery is enabled for the subscription. Raw messages are free of JSON formatting and can be sent to HTTP/S and Amazon SQS endpoints.

  • RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.

  • SubscriptionArn – The subscription's ARN.

  • TopicArn – The topic ARN that the subscription is associated with.

The following attribute applies only to Amazon Data Firehose delivery stream subscriptions:

  • SubscriptionRoleArn – The ARN of the IAM role that has the following:

    • Permission to write to the Firehose delivery stream

    • Amazon SNS listed as a trusted entity

    Specifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. For more information, see Fanout to Firehose delivery streams in the Amazon SNS Developer Guide.

" } }, "documentation":"

Response for GetSubscriptionAttributes action.

" @@ -2090,7 +2092,7 @@ }, "Subject":{ "shape":"subject", - "documentation":"

Optional parameter to be used as the \"Subject\" line when the message is delivered to email endpoints. This field will also be included, if present, in the standard JSON messages delivered to other endpoints.

Constraints: Subjects must be ASCII text that begins with a letter, number, or punctuation mark; must not include line breaks or control characters; and must be less than 100 characters long.

" + "documentation":"

Optional parameter to be used as the \"Subject\" line when the message is delivered to email endpoints. This field will also be included, if present, in the standard JSON messages delivered to other endpoints.

Constraints: Subjects must be UTF-8 text with no line breaks or control characters, and less than 100 characters long.

" }, "MessageStructure":{ "shape":"messageStructure", @@ -2287,7 +2289,7 @@ }, "AttributeName":{ "shape":"attributeName", - "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that this action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • FilterPolicy – The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic.

  • FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types:

    • MessageAttributes (default) – The filter is applied on the message attributes.

    • MessageBody – The filter is applied on the message body.

  • RawMessageDelivery – When set to true, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata.

  • RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.

The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions:

  • SubscriptionRoleArn – The ARN of the IAM role that has the following:

    • Permission to write to the Kinesis Data Firehose delivery stream

    • Amazon SNS listed as a trusted entity

    Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide.

" + "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that this action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • FilterPolicy – The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic.

  • FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types:

    • MessageAttributes (default) – The filter is applied on the message attributes.

    • MessageBody – The filter is applied on the message body.

  • RawMessageDelivery – When set to true, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata.

  • RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.

The following attribute applies only to Amazon Data Firehose delivery stream subscriptions:

  • SubscriptionRoleArn – The ARN of the IAM role that has the following:

    • Permission to write to the Firehose delivery stream

    • Amazon SNS listed as a trusted entity

    Specifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. For more information, see Fanout to Firehose delivery streams in the Amazon SNS Developer Guide.

" }, "AttributeValue":{ "shape":"attributeValue", @@ -2353,7 +2355,7 @@ }, "Attributes":{ "shape":"SubscriptionAttributesMap", - "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the Subscribe action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • FilterPolicy – The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic.

  • FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types:

    • MessageAttributes (default) – The filter is applied on the message attributes.

    • MessageBody – The filter is applied on the message body.

  • RawMessageDelivery – When set to true, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata.

  • RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.

The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions:

  • SubscriptionRoleArn – The ARN of the IAM role that has the following:

    • Permission to write to the Kinesis Data Firehose delivery stream

    • Amazon SNS listed as a trusted entity

    Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide.

The following attributes apply only to FIFO topics:

  • ReplayPolicy – Adds or updates an inline policy document for a subscription to replay messages stored in the specified Amazon SNS topic.

  • ReplayStatus – Retrieves the status of the subscription message replay, which can be one of the following:

    • Completed – The replay has successfully redelivered all messages, and is now delivering newly published messages. If an ending point was specified in the ReplayPolicy then the subscription will no longer receive newly published messages.

    • In progress – The replay is currently replaying the selected messages.

    • Failed – The replay was unable to complete.

    • Pending – The default state while the replay initiates.

" + "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the Subscribe action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • FilterPolicy – The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic.

  • FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types:

    • MessageAttributes (default) – The filter is applied on the message attributes.

    • MessageBody – The filter is applied on the message body.

  • RawMessageDelivery – When set to true, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata.

  • RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.

The following attribute applies only to Amazon Data Firehose delivery stream subscriptions:

  • SubscriptionRoleArn – The ARN of the IAM role that has the following:

    • Permission to write to the Firehose delivery stream

    • Amazon SNS listed as a trusted entity

    Specifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. For more information, see Fanout to Firehose delivery streams in the Amazon SNS Developer Guide.

The following attributes apply only to FIFO topics:

  • ReplayPolicy – Adds or updates an inline policy document for a subscription to replay messages stored in the specified Amazon SNS topic.

  • ReplayStatus – Retrieves the status of the subscription message replay, which can be one of the following:

    • Completed – The replay has successfully redelivered all messages, and is now delivering newly published messages. If an ending point was specified in the ReplayPolicy then the subscription will no longer receive newly published messages.

    • In progress – The replay is currently replaying the selected messages.

    • Failed – The replay was unable to complete.

    • Pending – The default state while the replay initiates.

" }, "ReturnSubscriptionArn":{ "shape":"boolean", diff --git a/botocore/data/sqs/2012-11-05/service-2.json b/botocore/data/sqs/2012-11-05/service-2.json index 018749d7f3..f544283aca 100644 --- a/botocore/data/sqs/2012-11-05/service-2.json +++ b/botocore/data/sqs/2012-11-05/service-2.json @@ -7,6 +7,7 @@ "endpointPrefix":"sqs", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Amazon SQS", "serviceFullName":"Amazon Simple Queue Service", "serviceId":"SQS", @@ -346,7 +347,7 @@ {"shape":"KmsInvalidKeyUsage"}, {"shape":"InvalidAddress"} ], - "documentation":"

Delivers a message to the specified queue.

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

" + "documentation":"

Delivers a message to the specified queue.

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed. For more information, see the W3C specification for characters.

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Amazon SQS does not throw an exception or completely reject the message if it contains invalid characters. Instead, it replaces those invalid characters with U+FFFD before storing the message in the queue, as long as the message body contains at least one valid character.

" }, "SendMessageBatch":{ "name":"SendMessageBatch", @@ -375,7 +376,7 @@ {"shape":"KmsInvalidKeyUsage"}, {"shape":"InvalidAddress"} ], - "documentation":"

You can use SendMessageBatch to send up to 10 messages to the specified queue by assigning either identical or different values to each message (or by not assigning values at all). This is a batch version of SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent.

The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KiB (262,144 bytes).

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue.

" + "documentation":"

You can use SendMessageBatch to send up to 10 messages to the specified queue by assigning either identical or different values to each message (or by not assigning values at all). This is a batch version of SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent.

The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KiB (262,144 bytes).

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed. For more information, see the W3C specification for characters.

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Amazon SQS does not throw an exception or completely reject the message if it contains invalid characters. Instead, it replaces those invalid characters with U+FFFD before storing the message in the queue, as long as the message body contains at least one valid character.

If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue.

" }, "SetQueueAttributes":{ "name":"SetQueueAttributes", @@ -1598,7 +1599,7 @@ }, "MessageBody":{ "shape":"String", - "documentation":"

The message to send. The minimum size is one character. The maximum size is 256 KiB.

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

" + "documentation":"

The message to send. The minimum size is one character. The maximum size is 256 KiB.

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed. For more information, see the W3C specification for characters.

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Amazon SQS does not throw an exception or completely reject the message if it contains invalid characters. Instead, it replaces those invalid characters with U+FFFD before storing the message in the queue, as long as the message body contains at least one valid character.

" }, "DelaySeconds":{ "shape":"NullableInteger", diff --git a/botocore/data/ssm-quicksetup/2018-05-10/endpoint-rule-set-1.json b/botocore/data/ssm-quicksetup/2018-05-10/endpoint-rule-set-1.json new file mode 100644 index 0000000000..31c96c7c39 --- /dev/null +++ b/botocore/data/ssm-quicksetup/2018-05-10/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ssm-quicksetup.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/botocore/data/ssm-quicksetup/2018-05-10/paginators-1.json b/botocore/data/ssm-quicksetup/2018-05-10/paginators-1.json new file mode 100644 index 0000000000..da5d6c97f7 --- /dev/null +++ b/botocore/data/ssm-quicksetup/2018-05-10/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListConfigurationManagers": { + "input_token": "StartingToken", + "output_token": "NextToken", + "limit_key": "MaxItems", + "result_key": "ConfigurationManagersList" + } + } +} diff --git a/botocore/data/ssm-quicksetup/2018-05-10/service-2.json b/botocore/data/ssm-quicksetup/2018-05-10/service-2.json new file mode 100644 index 0000000000..7d0677cdca --- /dev/null +++ b/botocore/data/ssm-quicksetup/2018-05-10/service-2.json @@ -0,0 +1,997 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"ssm-quicksetup", + "jsonVersion":"1.1", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"AWS Systems Manager QuickSetup", + "serviceId":"SSM QuickSetup", + "signatureVersion":"v4", + "signingName":"ssm-quicksetup", + "uid":"ssm-quicksetup-2018-05-10", + "auth":["aws.auth#sigv4"] + }, + "operations":{ + "CreateConfigurationManager":{ + "name":"CreateConfigurationManager", + "http":{ + "method":"POST", + "requestUri":"/configurationManager", + "responseCode":200 + }, + "input":{"shape":"CreateConfigurationManagerInput"}, + "output":{"shape":"CreateConfigurationManagerOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Creates a Quick Setup configuration manager resource. This object is a collection of desired state configurations for multiple configuration definitions and summaries describing the deployments of those definitions.

" + }, + "DeleteConfigurationManager":{ + "name":"DeleteConfigurationManager", + "http":{ + "method":"DELETE", + "requestUri":"/configurationManager/{ManagerArn}", + "responseCode":200 + }, + "input":{"shape":"DeleteConfigurationManagerInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes a configuration manager.

", + "idempotent":true + }, + "GetConfigurationManager":{ + "name":"GetConfigurationManager", + "http":{ + "method":"GET", + "requestUri":"/configurationManager/{ManagerArn}", + "responseCode":200 + }, + "input":{"shape":"GetConfigurationManagerInput"}, + "output":{"shape":"GetConfigurationManagerOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns a configuration manager.

" + }, + "GetServiceSettings":{ + "name":"GetServiceSettings", + "http":{ + "method":"GET", + "requestUri":"/serviceSettings", + "responseCode":200 + }, + "output":{"shape":"GetServiceSettingsOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns settings configured for Quick Setup in the requesting Amazon Web Services account and Amazon Web Services Region.

" + }, + "ListConfigurationManagers":{ + "name":"ListConfigurationManagers", + "http":{ + "method":"POST", + "requestUri":"/listConfigurationManagers", + "responseCode":200 + }, + "input":{"shape":"ListConfigurationManagersInput"}, + "output":{"shape":"ListConfigurationManagersOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns Quick Setup configuration managers.

" + }, + "ListQuickSetupTypes":{ + "name":"ListQuickSetupTypes", + "http":{ + "method":"GET", + "requestUri":"/listQuickSetupTypes", + "responseCode":200 + }, + "output":{"shape":"ListQuickSetupTypesOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns the available Quick Setup types.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns tags assigned to the resource.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"PUT", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Assigns key-value pairs of metadata to Amazon Web Services resources.

", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes tags from the specified resource.

", + "idempotent":true + }, + "UpdateConfigurationDefinition":{ + "name":"UpdateConfigurationDefinition", + "http":{ + "method":"PUT", + "requestUri":"/configurationDefinition/{ManagerArn}/{Id}", + "responseCode":200 + }, + "input":{"shape":"UpdateConfigurationDefinitionInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates a Quick Setup configuration definition.

", + "idempotent":true + }, + "UpdateConfigurationManager":{ + "name":"UpdateConfigurationManager", + "http":{ + "method":"PUT", + "requestUri":"/configurationManager/{ManagerArn}", + "responseCode":200 + }, + "input":{"shape":"UpdateConfigurationManagerInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates a Quick Setup configuration manager.

", + "idempotent":true + }, + "UpdateServiceSettings":{ + "name":"UpdateServiceSettings", + "http":{ + "method":"PUT", + "requestUri":"/serviceSettings", + "responseCode":200 + }, + "input":{"shape":"UpdateServiceSettingsInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates settings configured for Quick Setup.

", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The requester has insufficient permissions to perform the operation.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "ConfigurationDefinition":{ + "type":"structure", + "required":[ + "Parameters", + "Type" + ], + "members":{ + "Id":{ + "shape":"String", + "documentation":"

The ID of the configuration definition.

" + }, + "LocalDeploymentAdministrationRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

The ARN of the IAM role used to administrate local configuration deployments.

" + }, + "LocalDeploymentExecutionRoleName":{ + "shape":"ConfigurationDefinitionLocalDeploymentExecutionRoleNameString", + "documentation":"

The name of the IAM role used to deploy local configurations.

" + }, + "Parameters":{ + "shape":"ConfigurationParametersMap", + "documentation":"

A list of key-value pairs containing the required parameters for the configuration type.

" + }, + "Type":{ + "shape":"ConfigurationDefinitionTypeString", + "documentation":"

The type of the Quick Setup configuration.

" + }, + "TypeVersion":{ + "shape":"ConfigurationDefinitionTypeVersionString", + "documentation":"

The version of the Quick Setup type used.

" + } + }, + "documentation":"

The definition of a Quick Setup configuration.

" + }, + "ConfigurationDefinitionInput":{ + "type":"structure", + "required":[ + "Parameters", + "Type" + ], + "members":{ + "LocalDeploymentAdministrationRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

The ARN of the IAM role used to administrate local configuration deployments.

" + }, + "LocalDeploymentExecutionRoleName":{ + "shape":"ConfigurationDefinitionInputLocalDeploymentExecutionRoleNameString", + "documentation":"

The name of the IAM role used to deploy local configurations.

" + }, + "Parameters":{ + "shape":"ConfigurationParametersMap", + "documentation":"

The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following tables outline the parameters for each configuration type.

OpsCenter (Type: Amazon Web ServicesQuickSetupType-SSMOpsCenter)
  • DelegatedAccountId

    • Description: (Required) The ID of the delegated administrator account.

  • TargetOrganizationalUnits

    • Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.

  • TargetRegions

    • Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.

Resource Scheduler (Type: Amazon Web ServicesQuickSetupType-Scheduler)
  • TargetTagKey

    • Description: (Required) The tag key assigned to the instances you want to target.

  • TargetTagValue

    • Description: (Required) The value of the tag key assigned to the instances you want to target.

  • ICalendarString

    • Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use.

  • TargetAccounts

    • Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits.

  • TargetOrganizationalUnits

    • Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.

  • TargetRegions

    • Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.

Default Host Management Configuration (Type: Amazon Web ServicesQuickSetupType-DHMC)
  • UpdateSSMAgent

    • Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \"true\".

  • TargetOrganizationalUnits

    • Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.

  • TargetRegions

    • Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.

Resource Explorer (Type: Amazon Web ServicesQuickSetupType-ResourceExplorer)
  • SelectedAggregatorRegion

    • Description: (Required) The Amazon Web Services Region where you want to create the aggregator index.

  • ReplaceExistingAggregator

    • Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the SelectedAggregatorRegion.

  • TargetOrganizationalUnits

    • Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.

  • TargetRegions

    • Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.

Change Manager (Type: Amazon Web ServicesQuickSetupType-SSMChangeMgr)
  • DelegatedAccountId

    • Description: (Required) The ID of the delegated administrator account.

  • JobFunction

    • Description: (Required) The name for the Change Manager job function.

  • PermissionType

    • Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are CustomPermissions and AdminPermissions. The default value for the parameter is CustomerPermissions.

  • CustomPermissions

    • Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify CustomPermissions for the PermissionType parameter.

  • TargetOrganizationalUnits

    • Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.

  • TargetRegions

    • Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.

DevOps Guru (Type: Amazon Web ServicesQuickSetupType-DevOpsGuru)
  • AnalyseAllResources

    • Description: (Optional) A boolean value that determines whether DevOps Guru analyzes all CloudFormation stacks in the account. The default value is \"false\".

  • EnableSnsNotifications

    • Description: (Optional) A boolean value that determines whether DevOps Guru sends notifications when an insight is created. The default value is \"true\".

  • EnableSsmOpsItems

    • Description: (Optional) A boolean value that determines whether DevOps Guru creates an OpsCenter OpsItem when an insight is created. The default value is \"true\".

  • EnableDriftRemediation

    • Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is \"false\".

  • RemediationSchedule

    • Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days), rate(14 days), rate(1 days), and none. The default value is \"none\".

  • TargetAccounts

    • Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits.

  • TargetOrganizationalUnits

    • Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.

  • TargetRegions

    • Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.

Conformance Packs (Type: Amazon Web ServicesQuickSetupType-CFGCPacks)
  • DelegatedAccountId

    • Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments.

  • RemediationSchedule

    • Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days), rate(14 days), rate(2 days), and none. The default value is \"none\".

  • CPackNames

    • Description: (Required) A comma separated list of Config conformance packs.

  • TargetAccounts

    • Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits.

  • TargetOrganizationalUnits

    • Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.

  • TargetRegions

    • Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.

Config Recording (Type: Amazon Web ServicesQuickSetupType-CFGRecording)
  • RecordAllResources

    • Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is \"true\".

  • ResourceTypesToRecord

    • Description: (Optional) A comma separated list of resource types you want to record.

  • RecordGlobalResourceTypes

    • Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is \"false\".

  • GlobalResourceTypesRegion

    • Description: (Optional) Determines the Amazon Web Services Region where global resources are recorded.

  • UseCustomBucket

    • Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is \"false\".

  • DeliveryBucketName

    • Description: (Optional) The name of the Amazon S3 bucket you want Config to deliver configuration snapshots and configuration history files to.

  • DeliveryBucketPrefix

    • Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.

  • NotificationOptions

    • Description: (Optional) Determines the notification configuration for the recorder. The valid values are NoStreaming, UseExistingTopic, and CreateTopic. The default value is NoStreaming.

  • CustomDeliveryTopicAccountId

    • Description: (Optional) The ID of the Amazon Web Services account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the UseExistingTopic notification option.

  • CustomDeliveryTopicName

    • Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the UseExistingTopic notification option.

  • RemediationSchedule

    • Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days), rate(7 days), rate(1 days), and none. The default value is \"none\".

  • TargetAccounts

    • Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits.

  • TargetOrganizationalUnits

    • Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.

  • TargetRegions

    • Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.

Host Management (Type: Amazon Web ServicesQuickSetupType-SSMHostMgmt)
  • UpdateSSMAgent

    • Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \"true\".

  • UpdateEc2LaunchAgent

    • Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \"false\".

  • CollectInventory

    • Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \"true\".

  • ScanInstances

    • Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is \"true\".

  • InstallCloudWatchAgent

    • Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is \"false\".

  • UpdateCloudWatchAgent

    • Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is \"false\".

  • IsPolicyAttachAllowed

    • Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \"false\".

  • TargetType

    • Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *, InstanceIds, ResourceGroups, and Tags. Use * to target all instances in the account.

  • TargetInstances

    • Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds for the TargetType parameter.

  • TargetTagKey

    • Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter.

  • TargetTagValue

    • Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter.

  • ResourceGroupName

    • Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups for the TargetType parameter.

  • TargetAccounts

    • Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits.

  • TargetOrganizationalUnits

    • Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.

  • TargetRegions

    • Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.

Distributor (Type: Amazon Web ServicesQuickSetupType-Distributor)
  • PackagesToInstall

    • Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are AWSEFSTools, AWSCWAgent, and AWSEC2LaunchAgent.

  • RemediationSchedule

    • Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days), rate(14 days), rate(2 days), and none. The default value is \"rate(30 days)\".

  • IsPolicyAttachAllowed

    • Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \"false\".

  • TargetType

    • Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *, InstanceIds, ResourceGroups, and Tags. Use * to target all instances in the account.

  • TargetInstances

    • Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds for the TargetType parameter.

  • TargetTagKey

    • Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter.

  • TargetTagValue

    • Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter.

  • ResourceGroupName

    • Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups for the TargetType parameter.

  • TargetAccounts

    • Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits.

  • TargetOrganizationalUnits

    • Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.

  • TargetRegions

    • Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.

Patch Policy (Type: Amazon Web ServicesQuickSetupType-PatchPolicy)
  • PatchPolicyName

    • Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag.

  • SelectedPatchBaselines

    • Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy.

  • PatchBaselineUseDefault

    • Description: (Optional) A boolean value that determines whether the selected patch baselines are all Amazon Web Services provided.

  • ConfigurationOptionsPatchOperation

    • Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are Scan and ScanAndInstall. The default value for the parameter is Scan.

  • ConfigurationOptionsScanValue

    • Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches.

  • ConfigurationOptionsInstallValue

    • Description: (Optional) A cron expression that is used as the schedule for when instances install available patches.

  • ConfigurationOptionsScanNextInterval

    • Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \"false\".

  • ConfigurationOptionsInstallNextInterval

    • Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \"false\".

  • RebootOption

    • Description: (Optional) A boolean value that determines whether instances are rebooted after patches are installed. The default value is \"false\".

  • IsPolicyAttachAllowed

    • Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \"false\".

  • OutputLogEnableS3

    • Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3.

  • OutputS3Location

    • Description: (Optional) A JSON string containing information about the Amazon S3 bucket where you want to store the output details of the request.

      • OutputS3BucketRegion

        • Description: (Optional) The Amazon Web Services Region where the Amazon S3 bucket you want Config to deliver command output to is located.

      • OutputS3BucketName

        • Description: (Optional) The name of the Amazon S3 bucket you want Config to deliver command output to.

      • OutputS3KeyPrefix

        • Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.

  • TargetType

    • Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *, InstanceIds, ResourceGroups, and Tags. Use * to target all instances in the account.

  • TargetInstances

    • Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds for the TargetType parameter.

  • TargetTagKey

    • Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter.

  • TargetTagValue

    • Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags for the TargetType parameter.

  • ResourceGroupName

    • Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups for the TargetType parameter.

  • TargetAccounts

    • Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts or TargetOrganizationalUnits.

  • TargetOrganizationalUnits

    • Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.

  • TargetRegions

    • Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.

" + }, + "Type":{ + "shape":"ConfigurationDefinitionInputTypeString", + "documentation":"

The type of the Quick Setup configuration.

" + }, + "TypeVersion":{ + "shape":"ConfigurationDefinitionInputTypeVersionString", + "documentation":"

The version of the Quick Setup type to use.

" + } + }, + "documentation":"

Defines the preferences and options for a configuration definition.

" + }, + "ConfigurationDefinitionInputLocalDeploymentExecutionRoleNameString":{ + "type":"string", + "pattern":"^[\\w+=,.@-]{1,64}$" + }, + "ConfigurationDefinitionInputTypeString":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_\\-.:/]{3,200}$" + }, + "ConfigurationDefinitionInputTypeVersionString":{ + "type":"string", + "max":128, + "min":1 + }, + "ConfigurationDefinitionLocalDeploymentExecutionRoleNameString":{ + "type":"string", + "pattern":"^[\\w+=,.@-]{1,64}$" + }, + "ConfigurationDefinitionSummariesList":{ + "type":"list", + "member":{"shape":"ConfigurationDefinitionSummary"} + }, + "ConfigurationDefinitionSummary":{ + "type":"structure", + "members":{ + "FirstClassParameters":{ + "shape":"ConfigurationParametersMap", + "documentation":"

The common parameters and values for the configuration definition.

" + }, + "Id":{ + "shape":"String", + "documentation":"

The ID of the configuration definition.

" + }, + "Type":{ + "shape":"String", + "documentation":"

The type of the Quick Setup configuration used by the configuration definition.

" + }, + "TypeVersion":{ + "shape":"String", + "documentation":"

The version of the Quick Setup type used by the configuration definition.

" + } + }, + "documentation":"

A summarized definition of a Quick Setup configuration definition.

" + }, + "ConfigurationDefinitionTypeString":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_\\-.:/]{3,200}$" + }, + "ConfigurationDefinitionTypeVersionString":{ + "type":"string", + "max":128, + "min":1 + }, + "ConfigurationDefinitionsInputList":{ + "type":"list", + "member":{"shape":"ConfigurationDefinitionInput"} + }, + "ConfigurationDefinitionsList":{ + "type":"list", + "member":{"shape":"ConfigurationDefinition"} + }, + "ConfigurationManagerList":{ + "type":"list", + "member":{"shape":"ConfigurationManagerSummary"} + }, + "ConfigurationManagerSummary":{ + "type":"structure", + "required":["ManagerArn"], + "members":{ + "ConfigurationDefinitionSummaries":{ + "shape":"ConfigurationDefinitionSummariesList", + "documentation":"

A summary of the Quick Setup configuration definition.

" + }, + "Description":{ + "shape":"String", + "documentation":"

The description of the configuration.

" + }, + "ManagerArn":{ + "shape":"String", + "documentation":"

The ARN of the Quick Setup configuration.

" + }, + "Name":{ + "shape":"String", + "documentation":"

The name of the configuration

" + }, + "StatusSummaries":{ + "shape":"StatusSummariesList", + "documentation":"

Summaries of the state of the configuration manager. These summaries include an aggregate of the statuses from the configuration definition associated with the configuration manager. This includes deployment statuses, association statuses, drift statuses, health checks, and more.

" + } + }, + "documentation":"

A summary of a Quick Setup configuration manager.

" + }, + "ConfigurationParametersMap":{ + "type":"map", + "key":{"shape":"ConfigurationParametersMapKeyString"}, + "value":{"shape":"ConfigurationParametersMapValueString"} + }, + "ConfigurationParametersMapKeyString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[A-Za-z0-9+=@_\\/\\s-]+$" + }, + "ConfigurationParametersMapValueString":{ + "type":"string", + "max":40960, + "min":0 + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

Another request is being processed. Wait a few minutes and try again.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateConfigurationManagerInput":{ + "type":"structure", + "required":["ConfigurationDefinitions"], + "members":{ + "ConfigurationDefinitions":{ + "shape":"ConfigurationDefinitionsInputList", + "documentation":"

The definition of the Quick Setup configuration that the configuration manager deploys.

" + }, + "Description":{ + "shape":"CreateConfigurationManagerInputDescriptionString", + "documentation":"

A description of the configuration manager.

" + }, + "Name":{ + "shape":"CreateConfigurationManagerInputNameString", + "documentation":"

A name for the configuration manager.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

Key-value pairs of metadata to assign to the configuration manager.

" + } + } + }, + "CreateConfigurationManagerInputDescriptionString":{ + "type":"string", + "pattern":"^.{0,512}$" + }, + "CreateConfigurationManagerInputNameString":{ + "type":"string", + "pattern":"^[ A-Za-z0-9._-]{0,120}$" + }, + "CreateConfigurationManagerOutput":{ + "type":"structure", + "required":["ManagerArn"], + "members":{ + "ManagerArn":{ + "shape":"String", + "documentation":"

The ARN for the newly created configuration manager.

" + } + } + }, + "DeleteConfigurationManagerInput":{ + "type":"structure", + "required":["ManagerArn"], + "members":{ + "ManagerArn":{ + "shape":"DeleteConfigurationManagerInputManagerArnString", + "documentation":"

The ID of the configuration manager.

", + "location":"uri", + "locationName":"ManagerArn" + } + } + }, + "DeleteConfigurationManagerInputManagerArnString":{ + "type":"string", + "pattern":"^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$" + }, + "Filter":{ + "type":"structure", + "required":[ + "Key", + "Values" + ], + "members":{ + "Key":{ + "shape":"FilterKeyString", + "documentation":"

The key for the filter.

" + }, + "Values":{ + "shape":"FilterValues", + "documentation":"

The values for the filter keys.

" + } + }, + "documentation":"

A key-value pair to filter results.

" + }, + "FilterKeyString":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^[A-Za-z0-9+=@_\\/\\s-]*$" + }, + "FilterValues":{ + "type":"list", + "member":{"shape":"FilterValuesMemberString"} + }, + "FilterValuesMemberString":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^[A-Za-z0-9+=@_\\/\\s-]*$" + }, + "FiltersList":{ + "type":"list", + "member":{"shape":"Filter"} + }, + "GetConfigurationManagerInput":{ + "type":"structure", + "required":["ManagerArn"], + "members":{ + "ManagerArn":{ + "shape":"GetConfigurationManagerInputManagerArnString", + "documentation":"

The ARN of the configuration manager.

", + "location":"uri", + "locationName":"ManagerArn" + } + } + }, + "GetConfigurationManagerInputManagerArnString":{ + "type":"string", + "min":1, + "pattern":"^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$" + }, + "GetConfigurationManagerOutput":{ + "type":"structure", + "required":["ManagerArn"], + "members":{ + "ConfigurationDefinitions":{ + "shape":"ConfigurationDefinitionsList", + "documentation":"

The configuration definitions association with the configuration manager.

" + }, + "CreatedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The datetime stamp when the configuration manager was created.

" + }, + "Description":{ + "shape":"String", + "documentation":"

The description of the configuration manager.

" + }, + "LastModifiedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The datetime stamp when the configuration manager was last updated.

" + }, + "ManagerArn":{ + "shape":"String", + "documentation":"

The ARN of the configuration manager.

" + }, + "Name":{ + "shape":"String", + "documentation":"

The name of the configuration manager.

" + }, + "StatusSummaries":{ + "shape":"StatusSummariesList", + "documentation":"

A summary of the state of the configuration manager. This includes deployment statuses, association statuses, drift statuses, health checks, and more.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

Key-value pairs of metadata to assign to the configuration manager.

" + } + } + }, + "GetServiceSettingsOutput":{ + "type":"structure", + "members":{ + "ServiceSettings":{ + "shape":"ServiceSettings", + "documentation":"

Returns details about the settings for Quick Setup in the requesting Amazon Web Services account and Amazon Web Services Region.

" + } + } + }, + "IAMRoleArn":{"type":"string"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

An error occurred on the server side.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ListConfigurationManagersInput":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FiltersList", + "documentation":"

Filters the results returned by the request.

" + }, + "MaxItems":{ + "shape":"ListConfigurationManagersInputMaxItemsInteger", + "documentation":"

Specifies the maximum number of configuration managers that are returned by the request.

" + }, + "StartingToken":{ + "shape":"ListConfigurationManagersInputStartingTokenString", + "documentation":"

The token to use when requesting a specific set of items from a list.

" + } + } + }, + "ListConfigurationManagersInputMaxItemsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListConfigurationManagersInputStartingTokenString":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"^[A-Za-z0-9+=@_\\/\\s-]*$" + }, + "ListConfigurationManagersOutput":{ + "type":"structure", + "members":{ + "ConfigurationManagersList":{ + "shape":"ConfigurationManagerList", + "documentation":"

The configuration managers returned by the request.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use when requesting the next set of configuration managers. If there are no additional operations to return, the string is empty.

" + } + } + }, + "ListQuickSetupTypesOutput":{ + "type":"structure", + "members":{ + "QuickSetupTypeList":{ + "shape":"QuickSetupTypeList", + "documentation":"

An array of Quick Setup types.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

The ARN of the resource the tag is assigned to.

", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

Key-value pairs of metadata assigned to the resource.

" + } + } + }, + "QuickSetupTypeList":{ + "type":"list", + "member":{"shape":"QuickSetupTypeOutput"} + }, + "QuickSetupTypeOutput":{ + "type":"structure", + "members":{ + "LatestVersion":{ + "shape":"String", + "documentation":"

The latest version number of the configuration.

" + }, + "Type":{ + "shape":"String", + "documentation":"

The type of the Quick Setup configuration.

" + } + }, + "documentation":"

Information about the Quick Setup type.

" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The resource couldn't be found. Check the ID or name and try again.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ServiceSettings":{ + "type":"structure", + "members":{ + "ExplorerEnablingRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

The IAM role used to enable Explorer.

" + } + }, + "documentation":"

Settings configured for Quick Setup.

" + }, + "Status":{ + "type":"string", + "enum":[ + "INITIALIZING", + "DEPLOYING", + "SUCCEEDED", + "DELETING", + "STOPPING", + "FAILED", + "STOPPED", + "DELETE_FAILED", + "STOP_FAILED", + "NONE" + ] + }, + "StatusDetails":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "StatusSummariesList":{ + "type":"list", + "member":{"shape":"StatusSummary"} + }, + "StatusSummary":{ + "type":"structure", + "required":[ + "LastUpdatedAt", + "StatusType" + ], + "members":{ + "LastUpdatedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The datetime stamp when the status was last updated.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The current status.

" + }, + "StatusDetails":{ + "shape":"StatusDetails", + "documentation":"

Details about the status.

" + }, + "StatusMessage":{ + "shape":"String", + "documentation":"

When applicable, returns an informational message relevant to the current status and status type of the status summary object. We don't recommend implementing parsing logic around this value since the messages returned can vary in format.

" + }, + "StatusType":{ + "shape":"StatusType", + "documentation":"

The type of a status summary.

" + } + }, + "documentation":"

A summarized description of the status.

" + }, + "StatusType":{ + "type":"string", + "enum":[ + "Deployment", + "AsyncExecutions" + ] + }, + "String":{"type":"string"}, + "SyntheticTimestamp_date_time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "TagEntry":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"TagEntryKeyString", + "documentation":"

The key for the tag.

" + }, + "Value":{ + "shape":"TagEntryValueString", + "documentation":"

The value for the tag.

" + } + }, + "documentation":"

Key-value pairs of metadata.

", + "sensitive":true + }, + "TagEntryKeyString":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Za-z0-9 _=@:.+-/]+$" + }, + "TagEntryValueString":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^[A-Za-z0-9 _=@:.+-/]+$" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"String"} + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

The ARN of the resource to tag.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

Key-value pairs of metadata to assign to the resource.

" + } + } + }, + "Tags":{ + "type":"list", + "member":{"shape":"TagEntry"}, + "sensitive":true + }, + "TagsMap":{ + "type":"map", + "key":{"shape":"TagsMapKeyString"}, + "value":{"shape":"TagsMapValueString"}, + "sensitive":true + }, + "TagsMapKeyString":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Za-z0-9 _=@:.+-/]+$" + }, + "TagsMapValueString":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^[A-Za-z0-9 _=@:.+-/]+$" + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The request or operation exceeds the maximum allowed request rate per Amazon Web Services account and Amazon Web Services Region.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

The ARN of the resource to remove tags from.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeys", + "documentation":"

The keys of the tags to remove from the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UpdateConfigurationDefinitionInput":{ + "type":"structure", + "required":[ + "Id", + "ManagerArn" + ], + "members":{ + "Id":{ + "shape":"UpdateConfigurationDefinitionInputIdString", + "documentation":"

The ID of the configuration definition you want to update.

", + "location":"uri", + "locationName":"Id" + }, + "LocalDeploymentAdministrationRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

The ARN of the IAM role used to administrate local configuration deployments.

" + }, + "LocalDeploymentExecutionRoleName":{ + "shape":"UpdateConfigurationDefinitionInputLocalDeploymentExecutionRoleNameString", + "documentation":"

The name of the IAM role used to deploy local configurations.

" + }, + "ManagerArn":{ + "shape":"UpdateConfigurationDefinitionInputManagerArnString", + "documentation":"

The ARN of the configuration manager associated with the definition to update.

", + "location":"uri", + "locationName":"ManagerArn" + }, + "Parameters":{ + "shape":"ConfigurationParametersMap", + "documentation":"

The parameters for the configuration definition type.

" + }, + "TypeVersion":{ + "shape":"UpdateConfigurationDefinitionInputTypeVersionString", + "documentation":"

The version of the Quick Setup type to use.

" + } + } + }, + "UpdateConfigurationDefinitionInputIdString":{ + "type":"string", + "pattern":"^[a-z0-9-]{1,20}$" + }, + "UpdateConfigurationDefinitionInputLocalDeploymentExecutionRoleNameString":{ + "type":"string", + "pattern":"^[\\w+=,.@-]{1,64}$" + }, + "UpdateConfigurationDefinitionInputManagerArnString":{ + "type":"string", + "pattern":"^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$" + }, + "UpdateConfigurationDefinitionInputTypeVersionString":{ + "type":"string", + "pattern":"^\\d{1,3}(\\.\\d{1,3})?$|^LATEST$" + }, + "UpdateConfigurationManagerInput":{ + "type":"structure", + "required":["ManagerArn"], + "members":{ + "Description":{ + "shape":"UpdateConfigurationManagerInputDescriptionString", + "documentation":"

A description of the configuration manager.

" + }, + "ManagerArn":{ + "shape":"UpdateConfigurationManagerInputManagerArnString", + "documentation":"

The ARN of the configuration manager.

", + "location":"uri", + "locationName":"ManagerArn" + }, + "Name":{ + "shape":"UpdateConfigurationManagerInputNameString", + "documentation":"

A name for the configuration manager.

" + } + } + }, + "UpdateConfigurationManagerInputDescriptionString":{ + "type":"string", + "pattern":"^.{0,512}$" + }, + "UpdateConfigurationManagerInputManagerArnString":{ + "type":"string", + "pattern":"^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$" + }, + "UpdateConfigurationManagerInputNameString":{ + "type":"string", + "pattern":"^[ A-Za-z0-9._-]{0,120}$" + }, + "UpdateServiceSettingsInput":{ + "type":"structure", + "members":{ + "ExplorerEnablingRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

The IAM role used to enable Explorer.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The request is invalid. Verify the values provided for the request parameters are accurate.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"

Quick Setup helps you quickly configure frequently used services and features with recommended best practices. Quick Setup simplifies setting up services, including Systems Manager, by automating common or recommended tasks.

" +} diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index 7746ccf6ff..c0e9d5c26b 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -11,7 +11,8 @@ "serviceId":"SSM", "signatureVersion":"v4", "targetPrefix":"AmazonSSM", - "uid":"ssm-2014-11-06" + "uid":"ssm-2014-11-06", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddTagsToResource":{ @@ -681,7 +682,7 @@ {"shape":"InvalidInstanceInformationFilterValue"}, {"shape":"InvalidFilterKey"} ], - "documentation":"

Provides information about one or more of your managed nodes, including the operating system platform, SSM Agent version, association status, and IP address. This operation does not return information for nodes that are either Stopped or Terminated.

If you specify one or more node IDs, the operation returns information for those managed nodes. If you don't specify node IDs, it returns information for all your managed nodes. If you specify a node ID that isn't valid or a node that you don't own, you receive an error.

The IamRole field returned for this API operation is the Identity and Access Management (IAM) role assigned to on-premises managed nodes. This operation does not return the IAM role for EC2 instances.

" + "documentation":"

Provides information about one or more of your managed nodes, including the operating system platform, SSM Agent version, association status, and IP address. This operation does not return information for nodes that are either Stopped or Terminated.

If you specify one or more node IDs, the operation returns information for those managed nodes. If you don't specify node IDs, it returns information for all your managed nodes. If you specify a node ID that isn't valid or a node that you don't own, you receive an error.

The IamRole field returned for this API operation is the role assigned to an Amazon EC2 instance configured with a Systems Manager Quick Setup host management configuration or the role assigned to an on-premises managed node.

" }, "DescribeInstancePatchStates":{ "name":"DescribeInstancePatchStates", @@ -952,7 +953,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Lists the properties of available patches organized by product, product family, classification, severity, and other properties of available patches. You can use the reported properties in the filters you specify in requests for operations such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines.

The following section lists the properties that can be used in filters for each major operating system type:

AMAZON_LINUX

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

AMAZON_LINUX_2

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

CENTOS

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

DEBIAN

Valid properties: PRODUCT | PRIORITY

MACOS

Valid properties: PRODUCT | CLASSIFICATION

ORACLE_LINUX

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

REDHAT_ENTERPRISE_LINUX

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

SUSE

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

UBUNTU

Valid properties: PRODUCT | PRIORITY

WINDOWS

Valid properties: PRODUCT | PRODUCT_FAMILY | CLASSIFICATION | MSRC_SEVERITY

" + "documentation":"

Lists the properties of available patches organized by product, product family, classification, severity, and other properties of available patches. You can use the reported properties in the filters you specify in requests for operations such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines.

The following section lists the properties that can be used in filters for each major operating system type:

AMAZON_LINUX

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

AMAZON_LINUX_2

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

AMAZON_LINUX_2023

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

CENTOS

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

DEBIAN

Valid properties: PRODUCT | PRIORITY

MACOS

Valid properties: PRODUCT | CLASSIFICATION

ORACLE_LINUX

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

REDHAT_ENTERPRISE_LINUX

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

SUSE

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

UBUNTU

Valid properties: PRODUCT | PRIORITY

WINDOWS

Valid properties: PRODUCT | PRODUCT_FAMILY | CLASSIFICATION | MSRC_SEVERITY

" }, "DescribeSessions":{ "name":"DescribeSessions", @@ -1031,7 +1032,7 @@ {"shape":"InvalidPluginName"}, {"shape":"InvocationDoesNotExist"} ], - "documentation":"

Returns detailed information about command execution for an invocation or plugin.

GetCommandInvocation only gives the execution status of a plugin in a document. To get the command execution status on a specific managed node, use ListCommandInvocations. To get the command execution status across managed nodes, use ListCommands.

" + "documentation":"

Returns detailed information about command execution for an invocation or plugin. The Run Command API follows an eventual consistency model, due to the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your resources might not be immediately visible to all subsequent commands you run. You should keep this in mind when you carry out an API command that immediately follows a previous API command.

GetCommandInvocation only gives the execution status of a plugin in a document. To get the command execution status on a specific managed node, use ListCommandInvocations. To get the command execution status across managed nodes, use ListCommands.

" }, "GetConnectionStatus":{ "name":"GetConnectionStatus", @@ -5015,7 +5016,7 @@ }, "RejectedPatchesAction":{ "shape":"PatchAction", - "documentation":"

The action for Patch Manager to take on patches included in the RejectedPackages list.

  • ALLOW_AS_DEPENDENCY : A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther. This is the default action if no option is specified.

  • BLOCK: Packages in the Rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the Rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as InstalledRejected.

" + "documentation":"

The action for Patch Manager to take on patches included in the RejectedPackages list.

ALLOW_AS_DEPENDENCY

Linux and macOS: A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as INSTALLED_OTHER. This is the default action if no option is specified.

Windows Server: Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as INSTALLED_OTHER. Any package not already installed on the node is skipped. This is the default action if no option is specified.

BLOCK

All OSs: Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as INSTALLED_REJECTED.

" }, "Description":{ "shape":"BaselineDescription", @@ -8173,7 +8174,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide.

" }, "TaskType":{ "shape":"MaintenanceWindowTaskType", @@ -8675,7 +8676,8 @@ "IPAddress":{ "type":"string", "max":46, - "min":1 + "min":1, + "sensitive":true }, "ISO8601String":{"type":"string"}, "IamRole":{ @@ -8892,7 +8894,7 @@ }, "IamRole":{ "shape":"IamRole", - "documentation":"

The Identity and Access Management (IAM) role assigned to the on-premises Systems Manager managed node. This call doesn't return the IAM role for Amazon Elastic Compute Cloud (Amazon EC2) instances. To retrieve the IAM role for an EC2 instance, use the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference.

" + "documentation":"

The role assigned to an Amazon EC2 instance configured with a Systems Manager Quick Setup host management configuration or the role assigned to an on-premises managed node.

This call doesn't return the IAM role for unmanaged Amazon EC2 instances (instances not configured for Systems Manager). To retrieve the role for an unmanaged instance, use the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference.

" }, "RegistrationDate":{ "shape":"DateTime", @@ -9224,7 +9226,7 @@ }, "Architecture":{ "shape":"Architecture", - "documentation":"

The CPU architecture of the node. For example, x86_64.

" + "documentation":"

The CPU architecture of the node. For example, x86_64.

" }, "IPAddress":{ "shape":"IPAddress", @@ -11353,7 +11355,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide.

" }, "TimeoutSeconds":{ "shape":"TimeoutSeconds", @@ -11478,7 +11480,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide.

" }, "MaxConcurrency":{ "shape":"MaxConcurrency", @@ -13385,7 +13387,7 @@ }, "DefaultBaseline":{ "shape":"DefaultBaseline", - "documentation":"

Whether this is the default baseline. Amazon Web Services Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.

" + "documentation":"

Indicates whether this is the default baseline. Amazon Web Services Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.

" } }, "documentation":"

Defines the basic information about a patch baseline.

" @@ -13703,12 +13705,12 @@ }, "ApproveAfterDays":{ "shape":"ApproveAfterDays", - "documentation":"

The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released. Not supported on Debian Server or Ubuntu Server.

", + "documentation":"

The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released.

This parameter is marked as not required, but your request must include a value for either ApproveAfterDays or ApproveUntilDate.

Not supported for Debian Server or Ubuntu Server.

", "box":true }, "ApproveUntilDate":{ "shape":"PatchStringDateTime", - "documentation":"

The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Debian Server or Ubuntu Server.

Enter dates in the format YYYY-MM-DD. For example, 2021-12-31.

", + "documentation":"

The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.

Enter dates in the format YYYY-MM-DD. For example, 2021-12-31.

This parameter is marked as not required, but your request must include a value for either ApproveUntilDate or ApproveAfterDays.

Not supported for Debian Server or Ubuntu Server.

", "box":true }, "EnableNonSecurity":{ @@ -16708,7 +16710,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide.

" }, "TaskParameters":{ "shape":"MaintenanceWindowTaskParameters", @@ -16916,7 +16918,7 @@ }, "RejectedPatchesAction":{ "shape":"PatchAction", - "documentation":"

The action for Patch Manager to take on patches included in the RejectedPackages list.

  • ALLOW_AS_DEPENDENCY : A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther. This is the default action if no option is specified.

  • BLOCK: Packages in the Rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the Rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as InstalledRejected.

" + "documentation":"

The action for Patch Manager to take on patches included in the RejectedPackages list.

ALLOW_AS_DEPENDENCY

Linux and macOS: A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as INSTALLED_OTHER. This is the default action if no option is specified.

Windows Server: Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as INSTALLED_OTHER. Any package not already installed on the node is skipped. This is the default action if no option is specified.

BLOCK

All OSs: Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as INSTALLED_REJECTED.

" }, "Description":{ "shape":"BaselineDescription", diff --git a/botocore/data/stepfunctions/2016-11-23/service-2.json b/botocore/data/stepfunctions/2016-11-23/service-2.json index cf26dad030..9462c7dfec 100644 --- a/botocore/data/stepfunctions/2016-11-23/service-2.json +++ b/botocore/data/stepfunctions/2016-11-23/service-2.json @@ -11,7 +11,8 @@ "serviceId":"SFN", "signatureVersion":"v4", "targetPrefix":"AWSStepFunctions", - "uid":"states-2016-11-23" + "uid":"states-2016-11-23", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateActivity":{ @@ -24,8 +25,12 @@ "output":{"shape":"CreateActivityOutput"}, "errors":[ {"shape":"ActivityLimitExceeded"}, + {"shape":"ActivityAlreadyExists"}, {"shape":"InvalidName"}, - {"shape":"TooManyTags"} + {"shape":"TooManyTags"}, + {"shape":"InvalidEncryptionConfiguration"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsThrottlingException"} ], "documentation":"

Creates an activity. An activity is a task that you write in any programming language and host on any machine that has access to Step Functions. Activities must poll Step Functions using the GetActivityTask API action and respond using SendTask* API actions. This function lets Step Functions know the existence of your activity and returns an identifier for use in a state machine and when polling from the activity.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

CreateActivity is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateActivity's idempotency check is based on the activity name. If a following request has different tags values, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, tags will not be updated, even if they are different.

", "idempotent":true @@ -50,9 +55,12 @@ {"shape":"StateMachineTypeNotSupported"}, {"shape":"TooManyTags"}, {"shape":"ValidationException"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"InvalidEncryptionConfiguration"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsThrottlingException"} ], - "documentation":"

Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the Step Functions User Guide.

If you set the publish parameter of this API action to true, it publishes version 1 as the first revision of the state machine.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

CreateStateMachine is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateStateMachine's idempotency check is based on the state machine name, definition, type, LoggingConfiguration, and TracingConfiguration. The check is also based on the publish and versionDescription parameters. If a following request has a different roleArn or tags, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even if they are different.

", + "documentation":"

Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the Step Functions User Guide.

If you set the publish parameter of this API action to true, it publishes version 1 as the first revision of the state machine.

For additional control over security, you can encrypt your data using a customer-managed key for Step Functions state machines. You can configure a symmetric KMS key and data key reuse period when creating or updating a State Machine. The execution history and state machine definition will be encrypted with the key applied to the State Machine.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

CreateStateMachine is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateStateMachine's idempotency check is based on the state machine name, definition, type, LoggingConfiguration, TracingConfiguration, and EncryptionConfiguration The check is also based on the publish and versionDescription parameters. If a following request has a different roleArn or tags, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even if they are different.

", "idempotent":true }, "CreateStateMachineAlias":{ @@ -156,7 +164,10 @@ "output":{"shape":"DescribeExecutionOutput"}, "errors":[ {"shape":"ExecutionDoesNotExist"}, - {"shape":"InvalidArn"} + {"shape":"InvalidArn"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsInvalidStateException"}, + {"shape":"KmsThrottlingException"} ], "documentation":"

Provides information about a state machine execution, such as the state machine associated with the execution, the execution input and output, and relevant execution metadata. If you've redriven an execution, you can use this API action to return information about the redrives of that execution. In addition, you can use this API action to return the Map Run Amazon Resource Name (ARN) if the execution was dispatched by a Map Run.

If you specify a version or alias ARN when you call the StartExecution API action, DescribeExecution returns that ARN.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

Executions of an EXPRESS state machine aren't supported by DescribeExecution unless a Map Run dispatched them.

" }, @@ -184,7 +195,10 @@ "output":{"shape":"DescribeStateMachineOutput"}, "errors":[ {"shape":"InvalidArn"}, - {"shape":"StateMachineDoesNotExist"} + {"shape":"StateMachineDoesNotExist"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsInvalidStateException"}, + {"shape":"KmsThrottlingException"} ], "documentation":"

Provides information about a state machine's definition, its IAM role Amazon Resource Name (ARN), and configuration.

A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.

The following are some examples of qualified and unqualified state machine ARNs:

  • The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine.

    arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel

    If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException.

  • The following qualified state machine ARN refers to an alias named PROD.

    arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine:PROD>

    If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias.

  • The following unqualified state machine ARN refers to a state machine named myStateMachine.

    arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine>

This API action returns the details for a state machine version if the stateMachineArn you specify is a state machine version ARN.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

" }, @@ -213,7 +227,10 @@ "output":{"shape":"DescribeStateMachineForExecutionOutput"}, "errors":[ {"shape":"ExecutionDoesNotExist"}, - {"shape":"InvalidArn"} + {"shape":"InvalidArn"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsInvalidStateException"}, + {"shape":"KmsThrottlingException"} ], "documentation":"

Provides information about a state machine's definition, its execution role ARN, and configuration. If a Map Run dispatched the execution, this action returns the Map Run Amazon Resource Name (ARN) in the response. The state machine returned is the state machine associated with the Map Run.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

This API action is not supported by EXPRESS state machines.

" }, @@ -228,7 +245,10 @@ "errors":[ {"shape":"ActivityDoesNotExist"}, {"shape":"ActivityWorkerLimitExceeded"}, - {"shape":"InvalidArn"} + {"shape":"InvalidArn"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsInvalidStateException"}, + {"shape":"KmsThrottlingException"} ], "documentation":"

Used by workers to retrieve a task (with the specified activity ARN) which has been scheduled for execution by a running state machine. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available (i.e. an execution of a task of this type is needed.) The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll returns a taskToken with a null string.

This API action isn't logged in CloudTrail.

Workers should set their client side socket timeout to at least 65 seconds (5 seconds higher than the maximum time the service may hold the poll request).

Polling with GetActivityTask can cause latency in some implementations. See Avoid Latency When Polling for Activity Tasks in the Step Functions Developer Guide.

" }, @@ -243,7 +263,10 @@ "errors":[ {"shape":"ExecutionDoesNotExist"}, {"shape":"InvalidArn"}, - {"shape":"InvalidToken"} + {"shape":"InvalidToken"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsInvalidStateException"}, + {"shape":"KmsThrottlingException"} ], "documentation":"

Returns the history of the specified execution as a list of events. By default, the results are returned in ascending order of the timeStamp of the events. Use the reverseOrder parameter to get the latest events first.

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

This API action is not supported by EXPRESS state machines.

" }, @@ -400,9 +423,12 @@ "errors":[ {"shape":"TaskDoesNotExist"}, {"shape":"InvalidToken"}, - {"shape":"TaskTimedOut"} + {"shape":"TaskTimedOut"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsInvalidStateException"}, + {"shape":"KmsThrottlingException"} ], - "documentation":"

Used by activity workers, Task states using the callback pattern, and optionally Task states using the job run pattern to report that the task identified by the taskToken failed.

" + "documentation":"

Used by activity workers, Task states using the callback pattern, and optionally Task states using the job run pattern to report that the task identified by the taskToken failed.

For an execution with encryption enabled, Step Functions will encrypt the error and cause fields using the KMS key for the execution role.

A caller can mark a task as fail without using any KMS permissions in the execution role if the caller provides a null value for both error and cause fields because no data needs to be encrypted.

" }, "SendTaskHeartbeat":{ "name":"SendTaskHeartbeat", @@ -431,7 +457,10 @@ {"shape":"TaskDoesNotExist"}, {"shape":"InvalidOutput"}, {"shape":"InvalidToken"}, - {"shape":"TaskTimedOut"} + {"shape":"TaskTimedOut"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsInvalidStateException"}, + {"shape":"KmsThrottlingException"} ], "documentation":"

Used by activity workers, Task states using the callback pattern, and optionally Task states using the job run pattern to report that the task identified by the taskToken completed successfully.

" }, @@ -451,7 +480,10 @@ {"shape":"InvalidName"}, {"shape":"StateMachineDoesNotExist"}, {"shape":"StateMachineDeleting"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsInvalidStateException"}, + {"shape":"KmsThrottlingException"} ], "documentation":"

Starts a state machine execution.

A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.

The following are some examples of qualified and unqualified state machine ARNs:

  • The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine.

    arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel

    If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException.

  • The following qualified state machine ARN refers to an alias named PROD.

    arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine:PROD>

    If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias.

  • The following unqualified state machine ARN refers to a state machine named myStateMachine.

    arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine>

If you start an execution with an unqualified state machine ARN, Step Functions uses the latest revision of the state machine for the execution.

To start executions of a state machine version, call StartExecution and provide the version ARN or the ARN of an alias that points to the version.

StartExecution is idempotent for STANDARD workflows. For a STANDARD workflow, if you call StartExecution with the same name and input as a running execution, the call succeeds and return the same response as the original request. If the execution is closed or if the input is different, it returns a 400 ExecutionAlreadyExists error. You can reuse names after 90 days.

StartExecution isn't idempotent for EXPRESS workflows.

", "idempotent":true @@ -470,7 +502,10 @@ {"shape":"InvalidName"}, {"shape":"StateMachineDoesNotExist"}, {"shape":"StateMachineDeleting"}, - {"shape":"StateMachineTypeNotSupported"} + {"shape":"StateMachineTypeNotSupported"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsInvalidStateException"}, + {"shape":"KmsThrottlingException"} ], "documentation":"

Starts a Synchronous Express state machine execution. StartSyncExecution is not available for STANDARD workflows.

StartSyncExecution will return a 200 OK response, even if your execution fails, because the status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your execution from running, such as permissions errors, limit errors, or issues with your state machine code and configuration.

This API action isn't logged in CloudTrail.

", "endpoint":{"hostPrefix":"sync-"} @@ -486,9 +521,12 @@ "errors":[ {"shape":"ExecutionDoesNotExist"}, {"shape":"InvalidArn"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsInvalidStateException"}, + {"shape":"KmsThrottlingException"} ], - "documentation":"

Stops an execution.

This API action is not supported by EXPRESS state machines.

" + "documentation":"

Stops an execution.

This API action is not supported by EXPRESS state machines.

For an execution with encryption enabled, Step Functions will encrypt the error and cause fields using the KMS key for the execution role.

A caller can stop an execution without using any KMS permissions in the execution role if the caller provides a null value for both error and cause fields because no data needs to be encrypted.

" }, "TagResource":{ "name":"TagResource", @@ -569,9 +607,12 @@ {"shape":"StateMachineDoesNotExist"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"ConflictException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"InvalidEncryptionConfiguration"}, + {"shape":"KmsAccessDeniedException"}, + {"shape":"KmsThrottlingException"} ], - "documentation":"

Updates an existing state machine by modifying its definition, roleArn, or loggingConfiguration. Running executions will continue to use the previous definition and roleArn. You must include at least one of definition or roleArn or you will receive a MissingRequiredParameter error.

A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName.

A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.

The following are some examples of qualified and unqualified state machine ARNs:

  • The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine.

    arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel

    If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException.

  • The following qualified state machine ARN refers to an alias named PROD.

    arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine:PROD>

    If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias.

  • The following unqualified state machine ARN refers to a state machine named myStateMachine.

    arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine>

After you update your state machine, you can set the publish parameter to true in the same action to publish a new version. This way, you can opt-in to strict versioning of your state machine.

Step Functions assigns monotonically increasing integers for state machine versions, starting at version number 1.

All StartExecution calls within a few seconds use the updated definition and roleArn. Executions started immediately after you call UpdateStateMachine may use the previous state machine definition and roleArn.

", + "documentation":"

Updates an existing state machine by modifying its definition, roleArn, loggingConfiguration, or EncryptionConfiguration. Running executions will continue to use the previous definition and roleArn. You must include at least one of definition or roleArn or you will receive a MissingRequiredParameter error.

A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName.

A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.

The following are some examples of qualified and unqualified state machine ARNs:

  • The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine.

    arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel

    If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException.

  • The following qualified state machine ARN refers to an alias named PROD.

    arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine:PROD>

    If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias.

  • The following unqualified state machine ARN refers to a state machine named myStateMachine.

    arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine>

After you update your state machine, you can set the publish parameter to true in the same action to publish a new version. This way, you can opt-in to strict versioning of your state machine.

Step Functions assigns monotonically increasing integers for state machine versions, starting at version number 1.

All StartExecution calls within a few seconds use the updated definition and roleArn. Executions started immediately after you call UpdateStateMachine may use the previous state machine definition and roleArn.

", "idempotent":true }, "UpdateStateMachineAlias":{ @@ -606,6 +647,14 @@ } }, "shapes":{ + "ActivityAlreadyExists":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

Activity already exists. EncryptionConfiguration may not be updated.

", + "exception":true + }, "ActivityDoesNotExist":{ "type":"structure", "members":{ @@ -841,6 +890,10 @@ "tags":{ "shape":"TagList", "documentation":"

The list of tags to add to a resource.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

Settings to configure server-side encryption.

" } } }, @@ -942,6 +995,10 @@ "versionDescription":{ "shape":"VersionDescription", "documentation":"

Sets description about the state machine version. You can only set the description if the publish parameter is set to true. Otherwise, if you set versionDescription, but publish to false, this API action throws ValidationException.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

Settings to configure server-side encryption.

" } } }, @@ -1061,6 +1118,10 @@ "creationDate":{ "shape":"Timestamp", "documentation":"

The date the activity is created.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

Settings for configured server-side encryption.

" } } }, @@ -1071,6 +1132,10 @@ "executionArn":{ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the execution to describe.

" + }, + "includedData":{ + "shape":"IncludedData", + "documentation":"

If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call DescribeStateMachine API with includedData = METADATA_ONLY to get a successful response without the encrypted definition.

" } } }, @@ -1279,6 +1344,10 @@ "executionArn":{ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the execution you want state machine information for.

" + }, + "includedData":{ + "shape":"IncludedData", + "documentation":"

If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition.

" } } }, @@ -1328,6 +1397,10 @@ "revisionId":{ "shape":"RevisionId", "documentation":"

The revision identifier for the state machine. The first revision ID when you create the state machine is null.

Use the state machine revisionId parameter to compare the revision of a state machine with the configuration of the state machine used for executions without performing a diff of the properties, such as definition and roleArn.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

Settings to configure server-side encryption.

" } } }, @@ -1338,6 +1411,10 @@ "stateMachineArn":{ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the state machine for which you want the information.

If you specify a state machine version ARN, this API returns details about that version. The version ARN is a combination of state machine ARN and the version number separated by a colon (:). For example, stateMachineARN:1.

" + }, + "includedData":{ + "shape":"IncludedData", + "documentation":"

If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition.

When calling a labelled ARN for an encrypted state machine, the includedData = METADATA_ONLY parameter will not apply because Step Functions needs to decrypt the entire state machine definition to get the Distributed Map state’s definition. In this case, the API caller needs to have kms:Decrypt permission.

" } } }, @@ -1366,7 +1443,7 @@ }, "definition":{ "shape":"Definition", - "documentation":"

The Amazon States Language definition of the state machine. See Amazon States Language.

" + "documentation":"

The Amazon States Language definition of the state machine. See Amazon States Language.

If called with includedData = METADATA_ONLY, the returned definition will be {}.

" }, "roleArn":{ "shape":"Arn", @@ -1396,10 +1473,41 @@ "description":{ "shape":"VersionDescription", "documentation":"

The description of the state machine version.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

Settings to configure server-side encryption.

" } } }, "Enabled":{"type":"boolean"}, + "EncryptionConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "kmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

An alias, alias ARN, key ID, or key ARN of a symmetric encryption KMS key to encrypt data. To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

" + }, + "kmsDataKeyReusePeriodSeconds":{ + "shape":"KmsDataKeyReusePeriodSeconds", + "documentation":"

Maximum duration that Step Functions will reuse data keys. When the period expires, Step Functions will call GenerateDataKey. Only applies to customer managed keys.

", + "box":true + }, + "type":{ + "shape":"EncryptionType", + "documentation":"

Encryption type

" + } + }, + "documentation":"

Settings to configure server-side encryption.

For additional control over security, you can encrypt your data using a customer-managed key for Step Functions state machines and activities. You can configure a symmetric KMS key and data key reuse period when creating or updating a State Machine, and when creating an Activity. The execution history and state machine definition will be encrypted with the key applied to the State Machine. Activity inputs will be encrypted with the key applied to the Activity.

Step Functions automatically enables encryption at rest using Amazon Web Services owned keys at no charge. However, KMS charges apply when using a customer managed key. For more information about pricing, see Key Management Service pricing.

For more information on KMS, see What is Key Management Service?

" + }, + "EncryptionType":{ + "type":"string", + "enum":[ + "AWS_OWNED_KEY", + "CUSTOMER_MANAGED_KMS_KEY" + ] + }, "ErrorMessage":{"type":"string"}, "EventId":{"type":"long"}, "ExecutionAbortedEventDetails":{ @@ -1905,6 +2013,13 @@ "type":"boolean", "box":true }, + "IncludedData":{ + "type":"string", + "enum":[ + "ALL_DATA", + "METADATA_ONLY" + ] + }, "InspectionData":{ "type":"structure", "members":{ @@ -2020,6 +2135,14 @@ "documentation":"

The provided Amazon States Language definition is not valid.

", "exception":true }, + "InvalidEncryptionConfiguration":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

Received when encryptionConfiguration is specified but various conditions exist which make the configuration invalid. For example, if type is set to CUSTOMER_MANAGED_KMS_KEY, but kmsKeyId is null, or kmsDataKeyReusePeriodSeconds is not between 60 and 900, or the KMS key is not symmetric or inactive.

", + "exception":true + }, "InvalidExecutionInput":{ "type":"structure", "members":{ @@ -2033,7 +2156,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

", + "documentation":"

Configuration is not valid.

", "exception":true }, "InvalidName":{ @@ -2068,6 +2191,55 @@ "documentation":"

Your tracingConfiguration key does not match, or enabled has not been set to true or false.

", "exception":true }, + "KmsAccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

Either your KMS key policy or API caller does not have the required permissions.

", + "exception":true + }, + "KmsDataKeyReusePeriodSeconds":{ + "type":"integer", + "box":true, + "max":900, + "min":60 + }, + "KmsInvalidStateException":{ + "type":"structure", + "members":{ + "kmsKeyState":{ + "shape":"KmsKeyState", + "documentation":"

Current status of the KMS; key. For example: DISABLED, PENDING_DELETION, PENDING_IMPORT, UNAVAILABLE, CREATING.

" + }, + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The KMS key is not in valid state, for example: Disabled or Deleted.

", + "exception":true + }, + "KmsKeyId":{ + "type":"string", + "max":2048, + "min":1 + }, + "KmsKeyState":{ + "type":"string", + "enum":[ + "DISABLED", + "PENDING_DELETION", + "PENDING_IMPORT", + "UNAVAILABLE", + "CREATING" + ] + }, + "KmsThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

Received when KMS returns ThrottlingException for a KMS call that Step Functions makes on behalf of the caller.

", + "exception":true + }, "LambdaFunctionFailedEventDetails":{ "type":"structure", "members":{ @@ -2923,6 +3095,10 @@ "traceHeader":{ "shape":"TraceHeader", "documentation":"

Passes the X-Ray trace header. The trace header can also be passed in the request payload.

" + }, + "includedData":{ + "shape":"IncludedData", + "documentation":"

If your state machine definition is encrypted with a KMS key, callers must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the API with includedData = METADATA_ONLY to get a successful response without the encrypted definition.

" } } }, @@ -3130,7 +3306,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

", + "documentation":"

State machine type is not supported.

", "exception":true }, "StateMachineVersionList":{ @@ -3494,7 +3670,7 @@ }, "TaskToken":{ "type":"string", - "max":1024, + "max":2048, "min":1 }, "TestExecutionStatus":{ @@ -3720,6 +3896,10 @@ "versionDescription":{ "shape":"VersionDescription", "documentation":"

An optional description of the state machine version to publish.

You can only specify the versionDescription parameter if you've set publish to true.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

Settings to configure server-side encryption.

" } } }, @@ -3851,5 +4031,5 @@ "includedDetails":{"type":"boolean"}, "truncated":{"type":"boolean"} }, - "documentation":"Step Functions

Step Functions is a service that lets you coordinate the components of distributed applications and microservices using visual workflows.

You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues.

Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API. For more information about Step Functions, see the Step Functions Developer Guide .

If you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution and specify its parameter as StateMachineArn.

" + "documentation":"Step Functions

Step Functions coordinates the components of distributed applications and microservices using visual workflows.

You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues.

Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on Amazon Web Services, your own servers, or any system that has access to Amazon Web Services. You can access and use Step Functions using the console, the Amazon Web Services SDKs, or an HTTP API. For more information about Step Functions, see the Step Functions Developer Guide .

If you use the Step Functions API actions using Amazon Web Services SDK integrations, make sure the API actions are in camel case and parameter names are in Pascal case. For example, you could use Step Functions API action startSyncExecution and specify its parameter as StateMachineArn.

" } diff --git a/botocore/data/storagegateway/2013-06-30/service-2.json b/botocore/data/storagegateway/2013-06-30/service-2.json index f86ef02157..93da2d52d4 100644 --- a/botocore/data/storagegateway/2013-06-30/service-2.json +++ b/botocore/data/storagegateway/2013-06-30/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"storagegateway", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS Storage Gateway", "serviceId":"Storage Gateway", "signatureVersion":"v4", "targetPrefix":"StorageGateway_20130630", - "uid":"storagegateway-2013-06-30" + "uid":"storagegateway-2013-06-30", + "auth":["aws.auth#sigv4"] }, "operations":{ "ActivateGateway":{ @@ -544,7 +546,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns your gateway's weekly maintenance start time including the day and time of the week. Note that values are in terms of the gateway's time zone.

" + "documentation":"

Returns your gateway's maintenance window schedule information, with values for monthly or weekly cadence, specific day and time to begin maintenance, and which types of updates to apply. Time values returned are for the gateway's time zone.

" }, "DescribeNFSFileShares":{ "name":"DescribeNFSFileShares", @@ -1146,7 +1148,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates a gateway's metadata, which includes the gateway's name and time zone. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

For gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID rather than the gateway name. However, changing the name of the gateway has no effect on the gateway's ARN.

" + "documentation":"

Updates a gateway's metadata, which includes the gateway's name, time zone, and metadata cache size. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

For gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID rather than the gateway name. However, changing the name of the gateway has no effect on the gateway's ARN.

" }, "UpdateGatewaySoftwareNow":{ "name":"UpdateGatewaySoftwareNow", @@ -1174,7 +1176,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates a gateway's weekly maintenance start time information, including day and time of the week. The maintenance time is the time in your gateway's time zone.

" + "documentation":"

Updates a gateway's maintenance window schedule, with settings for monthly or weekly cadence, specific day and time to begin maintenance, and which types of updates to apply. Time configuration uses the gateway's time zone. You can pass values for a complete maintenance schedule, or update policy, or both. Previous values will persist for whichever setting you choose not to modify. If an incomplete or invalid maintenance schedule is passed, the entire request will be rejected with an error and no changes will occur.

A complete maintenance schedule must include values for both MinuteOfHour and HourOfDay, and either DayOfMonth or DayOfWeek.

We recommend keeping maintenance updates turned on, except in specific use cases where the brief disruptions caused by updating the gateway could critically impact your deployment.

" }, "UpdateNFSFileShare":{ "name":"UpdateNFSFileShare", @@ -1244,7 +1246,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates the SMB security strategy on a file gateway. This action is only supported in file gateways.

This API is called Security level in the User Guide.

A higher security level can affect performance of the gateway.

" + "documentation":"

Updates the SMB security strategy level for an Amazon S3 file gateway. This action is only supported for Amazon S3 file gateways.

For information about configuring this setting using the Amazon Web Services console, see Setting a security level for your gateway in the Amazon S3 File Gateway User Guide.

A higher security strategy level can affect performance of the gateway.

" }, "UpdateSnapshotSchedule":{ "name":"UpdateSnapshotSchedule", @@ -1295,7 +1297,7 @@ }, "GatewayTimezone":{ "shape":"GatewayTimezone", - "documentation":"

A value that indicates the time zone you want to set for the gateway. The time zone is of the format \"GMT-hr:mm\" or \"GMT+hr:mm\". For example, GMT-4:00 indicates the time is 4 hours behind GMT. GMT+2:00 indicates the time is 2 hours ahead of GMT. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule.

" + "documentation":"

A value that indicates the time zone you want to set for the gateway. The time zone is of the format \"GMT\", \"GMT-hr:mm\", or \"GMT+hr:mm\". For example, GMT indicates Greenwich Mean Time without any offset. GMT-4:00 indicates the time is 4 hours behind GMT. GMT+2:00 indicates the time is 2 hours ahead of GMT. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule.

" }, "GatewayRegion":{ "shape":"RegionId", @@ -1303,7 +1305,7 @@ }, "GatewayType":{ "shape":"GatewayType", - "documentation":"

A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED.

Valid Values: STORED | CACHED | VTL | VTL_SNOW | FILE_S3 | FILE_FSX_SMB

" + "documentation":"

A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED.

Valid Values: STORED | CACHED | VTL | FILE_S3 | FILE_FSX_SMB

" }, "TapeDriveType":{ "shape":"TapeDriveType", @@ -1623,6 +1625,13 @@ "max":10, "min":1 }, + "AutomaticUpdatePolicy":{ + "type":"string", + "enum":[ + "ALL_VERSIONS", + "EMERGENCY_VERSIONS_ONLY" + ] + }, "AvailabilityMonitorTestStatus":{ "type":"string", "enum":[ @@ -2921,7 +2930,7 @@ }, "HostEnvironment":{ "shape":"HostEnvironment", - "documentation":"

The type of hardware or software platform on which the gateway is running.

" + "documentation":"

The type of hardware or software platform on which the gateway is running.

Tape Gateway is no longer available on Snow Family devices.

" }, "EndpointType":{ "shape":"EndpointType", @@ -2980,14 +2989,18 @@ }, "DayOfMonth":{ "shape":"DayOfMonth", - "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.

" + "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month. It is not possible to set the maintenance schedule to start on days 29 through 31.

" }, "Timezone":{ "shape":"GatewayTimezone", "documentation":"

A value that indicates the time zone that is set for the gateway. The start time and day of week specified should be in the time zone of the gateway.

" + }, + "SoftwareUpdatePreferences":{ + "shape":"SoftwareUpdatePreferences", + "documentation":"

A set of variables indicating the software update preferences for the gateway.

Includes AutomaticUpdatePolicy field with the following inputs:

ALL_VERSIONS - Enables regular gateway maintenance updates.

EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance updates.

" } }, - "documentation":"

A JSON object containing the following fields:

" + "documentation":"

A JSON object containing the following fields:

" }, "DescribeNFSFileSharesInput":{ "type":"structure", @@ -3056,7 +3069,7 @@ }, "SMBSecurityStrategy":{ "shape":"SMBSecurityStrategy", - "documentation":"

The type of security strategy that was specified for file gateway.

  • ClientSpecified: If you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment. Only supported for S3 File Gateways.

  • MandatorySigning: If you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

  • MandatoryEncryption: If you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" + "documentation":"

The type of security strategy that was specified for file gateway.

  • ClientSpecified: If you choose this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment. Supported only for S3 File Gateway.

  • MandatorySigning: If you choose this option, File Gateway only allows connections from SMBv2 or SMBv3 clients that have signing turned on. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008, or later.

  • MandatoryEncryption: If you choose this option, File Gateway only allows connections from SMBv3 clients that have encryption turned on. Both 256-bit and 128-bit algorithms are allowed. This option is recommended for environments that handle sensitive data. It works with SMB clients on Microsoft Windows 8, Windows Server 2012, or later.

  • MandatoryEncryptionNoAes128: If you choose this option, File Gateway only allows connections from SMBv3 clients that use 256-bit AES encryption algorithms. 128-bit algorithms are not allowed. This option is recommended for environments that handle sensitive data. It works with SMB clients on Microsoft Windows 8, Windows Server 2012, or later.

" }, "FileSharesVisible":{ "shape":"Boolean", @@ -3803,7 +3816,7 @@ }, "HostEnvironment":{ "shape":"HostEnvironment", - "documentation":"

The type of hardware or software platform on which the gateway is running.

" + "documentation":"

The type of hardware or software platform on which the gateway is running.

Tape Gateway is no longer available on Snow Family devices.

" }, "HostEnvironmentId":{ "shape":"HostEnvironmentId", @@ -4598,7 +4611,7 @@ }, "FolderList":{ "shape":"FolderList", - "documentation":"

A comma-separated list of the paths of folders to refresh in the cache. The default is [\"/\"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to true, the entire S3 bucket that the file share has access to is refreshed.

" + "documentation":"

A comma-separated list of the paths of folders to refresh in the cache. The default is [\"/\"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to true, the entire S3 bucket that the file share has access to is refreshed.

Do not include / when specifying folder names. For example, you would specify samplefolder rather than samplefolder/.

" }, "Recursive":{ "shape":"Boolean", @@ -4860,7 +4873,8 @@ "enum":[ "ClientSpecified", "MandatorySigning", - "MandatoryEncryption" + "MandatoryEncryption", + "MandatoryEncryptionNoAes128" ] }, "ServiceUnavailableError":{ @@ -4947,6 +4961,16 @@ "type":"string", "pattern":"\\Asnap-([0-9A-Fa-f]{8}|[0-9A-Fa-f]{17})\\z" }, + "SoftwareUpdatePreferences":{ + "type":"structure", + "members":{ + "AutomaticUpdatePolicy":{ + "shape":"AutomaticUpdatePolicy", + "documentation":"

Indicates the automatic update policy for a gateway.

ALL_VERSIONS - Enables regular gateway maintenance updates.

EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance updates.

" + } + }, + "documentation":"

A set of variables indicating the software update preferences for the gateway.

" + }, "SoftwareUpdatesEndDate":{ "type":"string", "max":25, @@ -5510,7 +5534,7 @@ }, "GatewayCapacity":{ "shape":"GatewayCapacity", - "documentation":"

Specifies the size of the gateway's metadata cache.

" + "documentation":"

Specifies the size of the gateway's metadata cache. This setting impacts gateway performance and hardware recommendations. For more information, see Performance guidance for gateways with multiple file shares in the Amazon S3 File Gateway User Guide.

" } } }, @@ -5542,11 +5566,7 @@ }, "UpdateMaintenanceStartTimeInput":{ "type":"structure", - "required":[ - "GatewayARN", - "HourOfDay", - "MinuteOfHour" - ], + "required":["GatewayARN"], "members":{ "GatewayARN":{"shape":"GatewayARN"}, "HourOfDay":{ @@ -5559,14 +5579,18 @@ }, "DayOfWeek":{ "shape":"DayOfWeek", - "documentation":"

The day of the week component of the maintenance start time week represented as an ordinal number from 0 to 6, where 0 represents Sunday and 6 Saturday.

" + "documentation":"

The day of the week component of the maintenance start time week represented as an ordinal number from 0 to 6, where 0 represents Sunday and 6 represents Saturday.

" }, "DayOfMonth":{ "shape":"DayOfMonth", - "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.

" + "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month. It is not possible to set the maintenance schedule to start on days 29 through 31.

" + }, + "SoftwareUpdatePreferences":{ + "shape":"SoftwareUpdatePreferences", + "documentation":"

A set of variables indicating the software update preferences for the gateway.

Includes AutomaticUpdatePolicy field with the following inputs:

ALL_VERSIONS - Enables regular gateway maintenance updates.

EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance updates.

" } }, - "documentation":"

A JSON object containing the following fields:

" + "documentation":"

A JSON object containing the following fields:

" }, "UpdateMaintenanceStartTimeOutput":{ "type":"structure", @@ -5795,7 +5819,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "SMBSecurityStrategy":{ "shape":"SMBSecurityStrategy", - "documentation":"

Specifies the type of security strategy.

ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment. Supported only in S3 File Gateway.

MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" + "documentation":"

Specifies the type of security strategy.

ClientSpecified: If you choose this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment. Supported only for S3 File Gateway.

MandatorySigning: If you choose this option, File Gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: If you choose this option, File Gateway only allows connections from SMBv3 clients that have encryption enabled. This option is recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

MandatoryEncryptionNoAes128: If you choose this option, File Gateway only allows connections from SMBv3 clients that use 256-bit AES encryption algorithms. 128-bit algorithms are not allowed. This option is recommended for environments that handle sensitive data. It works with SMB clients on Microsoft Windows 8, Windows Server 2012, or later.

" } } }, diff --git a/botocore/data/support/2013-04-15/service-2.json b/botocore/data/support/2013-04-15/service-2.json index bde6d62f88..abfad31c8e 100644 --- a/botocore/data/support/2013-04-15/service-2.json +++ b/botocore/data/support/2013-04-15/service-2.json @@ -10,7 +10,8 @@ "serviceId":"Support", "signatureVersion":"v4", "targetPrefix":"AWSSupport_20130415", - "uid":"support-2013-04-15" + "uid":"support-2013-04-15", + "auth":["aws.auth#sigv4"] }, "operations":{ "AddAttachmentsToSet":{ @@ -317,7 +318,7 @@ "documentation":"

The content of the attachment file.

" } }, - "documentation":"

An attachment to a case communication. The attachment consists of the file name and the content of the file.

" + "documentation":"

An attachment to a case communication. The attachment consists of the file name and the content of the file. Each attachment file size should not exceed 5 MB. File types that are supported include the following: pdf, jpeg,.doc, .log, .text

" }, "AttachmentDetails":{ "type":"structure", @@ -429,7 +430,7 @@ }, "status":{ "shape":"Status", - "documentation":"

The status of the case.

Valid values:

  • opened

  • pending-customer-action

  • reopened

  • resolved

  • unassigned

  • work-in-progress

" + "documentation":"

The status of the case.

Valid values:

  • all-open

  • customer-action-completed

  • opened

  • pending-customer-action

  • reopened

  • resolved

  • unassigned

  • work-in-progress

" }, "serviceCode":{ "shape":"ServiceCode", @@ -464,7 +465,7 @@ "documentation":"

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

" } }, - "documentation":"

A JSON-formatted object that contains the metadata for a support case. It is contained in the response from a DescribeCases request. CaseDetails contains the following fields:

  • caseId - The support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47.

  • categoryCode - The category of problem for the support case. Corresponds to the CategoryCode values returned by a call to DescribeServices.

  • displayId - The identifier for the case on pages in the Amazon Web Services Support Center.

  • language - The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

  • nextToken - A resumption point for pagination.

  • recentCommunications - One or more Communication objects. Fields of these objects are attachments, body, caseId, submittedBy, and timeCreated.

  • serviceCode - The identifier for the Amazon Web Services service that corresponds to the service code defined in the call to DescribeServices.

  • severityCode - The severity code assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels. The possible values are: low, normal, high, urgent, and critical.

  • status - The status of the case in the Amazon Web Services Support Center. Valid values:

    • opened

    • pending-customer-action

    • reopened

    • resolved

    • unassigned

    • work-in-progress

  • subject - The subject line of the case.

  • submittedBy - The email address of the account that submitted the case.

  • timeCreated - The time the case was created, in ISO-8601 format.

" + "documentation":"

A JSON-formatted object that contains the metadata for a support case. It is contained in the response from a DescribeCases request. CaseDetails contains the following fields:

  • caseId - The support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47.

  • categoryCode - The category of problem for the support case. Corresponds to the CategoryCode values returned by a call to DescribeServices.

  • displayId - The identifier for the case on pages in the Amazon Web Services Support Center.

  • language - The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

  • nextToken - A resumption point for pagination.

  • recentCommunications - One or more Communication objects. Fields of these objects are attachments, body, caseId, submittedBy, and timeCreated.

  • serviceCode - The identifier for the Amazon Web Services service that corresponds to the service code defined in the call to DescribeServices.

  • severityCode - The severity code assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels. The possible values are: low, normal, high, urgent, and critical.

  • status - The status of the case in the Amazon Web Services Support Center. Valid values:

    • all-open

    • customer-action-completed

    • opened

    • pending-customer-action

    • reopened

    • resolved

    • unassigned

    • work-in-progress

  • subject - The subject line of the case.

  • submittedBy - The email address of the account that submitted the case.

  • timeCreated - The time the case was created, in ISO-8601 format.

" }, "CaseId":{"type":"string"}, "CaseIdList":{ @@ -1439,5 +1440,5 @@ "min":0 } }, - "documentation":"Amazon Web Services Support

The Amazon Web Services Support API Reference is intended for programmers who need detailed information about the Amazon Web Services Support operations and data types. You can use the API to manage your support cases programmatically. The Amazon Web Services Support API uses HTTP methods that return results in JSON format.

  • You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API.

  • If you call the Amazon Web Services Support API from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web Services Support.

You can also use the Amazon Web Services Support API to access features for Trusted Advisor. You can return a list of checks and their descriptions, get check results, specify checks to refresh, and get the refresh status of checks.

You can manage your support cases with the following Amazon Web Services Support API operations:

You can also use the Amazon Web Services Support API to call the Trusted Advisor operations. For more information, see Trusted Advisor in the Amazon Web Services Support User Guide.

For authentication of requests, Amazon Web Services Support uses Signature Version 4 Signing Process.

For more information about this service and the endpoints to use, see About the Amazon Web Services Support API in the Amazon Web Services Support User Guide.

" + "documentation":"Amazon Web Services Support

The Amazon Web Services Support API Reference is intended for programmers who need detailed information about the Amazon Web Services Support operations and data types. You can use the API to manage your support cases programmatically. The Amazon Web Services Support API uses HTTP methods that return results in JSON format.

  • You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API.

  • If you call the Amazon Web Services Support API from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web Services Support.

You can also use the Amazon Web Services Support API to access features for Trusted Advisor. You can return a list of checks and their descriptions, get check results, specify checks to refresh, and get the refresh status of checks.

You can manage your support cases with the following Amazon Web Services Support API operations:

You can also use the Amazon Web Services Support API to call the Trusted Advisor operations. For more information, see Trusted Advisor in the Amazon Web Services Support User Guide.

For authentication of requests, Amazon Web Services Support uses Signature Version 4 Signing Process.

For more information about this service and the endpoints to use, see About the Amazon Web Services Support API in the Amazon Web Services Support User Guide.

" } diff --git a/botocore/data/swf/2012-01-25/endpoint-rule-set-1.json b/botocore/data/swf/2012-01-25/endpoint-rule-set-1.json index c25159f6f5..407e24f303 100644 --- a/botocore/data/swf/2012-01-25/endpoint-rule-set-1.json +++ b/botocore/data/swf/2012-01-25/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -256,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -277,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -297,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -308,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/swf/2012-01-25/service-2.json b/botocore/data/swf/2012-01-25/service-2.json index afbf005dc6..e72971db71 100644 --- a/botocore/data/swf/2012-01-25/service-2.json +++ b/botocore/data/swf/2012-01-25/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"swf", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"Amazon SWF", "serviceFullName":"Amazon Simple Workflow Service", "serviceId":"SWF", "signatureVersion":"v4", "targetPrefix":"SimpleWorkflowService", - "uid":"swf-2012-01-25" + "uid":"swf-2012-01-25", + "auth":["aws.auth#sigv4"] }, "operations":{ "CountClosedWorkflowExecutions":{ @@ -69,6 +71,34 @@ ], "documentation":"

Returns the estimated number of decision tasks in the specified task list. The count returned is an approximation and isn't guaranteed to be exact. If you specify a task list that no decision task was ever scheduled in then 0 is returned.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

" }, + "DeleteActivityType":{ + "name":"DeleteActivityType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteActivityTypeInput"}, + "errors":[ + {"shape":"UnknownResourceFault"}, + {"shape":"TypeNotDeprecatedFault"}, + {"shape":"OperationNotPermittedFault"} + ], + "documentation":"

Deletes the specified activity type.

Note: Prior to deletion, activity types must first be deprecated.

After an activity type has been deleted, you cannot schedule new activities of that type. Activities that started before the type was deleted will continue to run.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

" + }, + "DeleteWorkflowType":{ + "name":"DeleteWorkflowType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteWorkflowTypeInput"}, + "errors":[ + {"shape":"UnknownResourceFault"}, + {"shape":"TypeNotDeprecatedFault"}, + {"shape":"OperationNotPermittedFault"} + ], + "documentation":"

Deletes the specified workflow type.

Note: Prior to deletion, workflow types must first be deprecated.

After a workflow type has been deleted, you cannot create new executions of that type. Executions that started before the type was deleted will continue to run.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • workflowType.name: String constraint. The key is swf:workflowType.name.

    • workflowType.version: String constraint. The key is swf:workflowType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

" + }, "DeprecateActivityType":{ "name":"DeprecateActivityType", "http":{ @@ -81,7 +111,7 @@ {"shape":"TypeDeprecatedFault"}, {"shape":"OperationNotPermittedFault"} ], - "documentation":"

Deprecates the specified activity type. After an activity type has been deprecated, you cannot create new tasks of that activity type. Tasks of this type that were scheduled before the type was deprecated continue to run.

This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

" + "documentation":"

Deprecates the specified activity type. After an activity type has been deprecated, you cannot create new tasks of that activity type. Tasks of this type that were scheduled before the type was deprecated continue to run.

Access Control

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

  • Use a Resource element with the domain name to limit the action to only specified domains.

  • Use an Action element to allow or deny permission to call this action.

  • Constrain the following parameters by using a Condition element with the appropriate keys.

    • activityType.name: String constraint. The key is swf:activityType.name.

    • activityType.version: String constraint. The key is swf:activityType.version.

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

" }, "DeprecateDomain":{ "name":"DeprecateDomain", @@ -1647,6 +1677,40 @@ "documentation":"

The StartWorkflowExecution API action was called without the required parameters set.

Some workflow execution parameters, such as the decision taskList, must be set to start the execution. However, these parameters might have been set as defaults when the workflow type was registered. In this case, you can omit these parameters from the StartWorkflowExecution call and Amazon SWF uses the values defined in the workflow type.

If these parameters aren't set and no default parameters were defined in the workflow type, this error is displayed.

", "exception":true }, + "DeleteActivityTypeInput":{ + "type":"structure", + "required":[ + "domain", + "activityType" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain in which the activity type is registered.

" + }, + "activityType":{ + "shape":"ActivityType", + "documentation":"

The activity type to delete.

" + } + } + }, + "DeleteWorkflowTypeInput":{ + "type":"structure", + "required":[ + "domain", + "workflowType" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain in which the workflow type is registered.

" + }, + "workflowType":{ + "shape":"WorkflowType", + "documentation":"

The workflow type to delete.

" + } + } + }, "DeprecateActivityTypeInput":{ "type":"structure", "required":[ @@ -4054,6 +4118,14 @@ "documentation":"

Returned when the specified activity or workflow type was already deprecated.

", "exception":true }, + "TypeNotDeprecatedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned when the resource type has not been deprecated.

", + "exception":true + }, "UndeprecateActivityTypeInput":{ "type":"structure", "required":[ diff --git a/botocore/data/taxsettings/2018-05-10/endpoint-rule-set-1.json b/botocore/data/taxsettings/2018-05-10/endpoint-rule-set-1.json new file mode 100644 index 0000000000..dbd7cc6df0 --- /dev/null +++ b/botocore/data/taxsettings/2018-05-10/endpoint-rule-set-1.json @@ -0,0 +1,372 @@ +{ + "version": "1.0", + "parameters": { + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://tax-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://tax-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://tax.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://tax.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/botocore/data/taxsettings/2018-05-10/paginators-1.json b/botocore/data/taxsettings/2018-05-10/paginators-1.json new file mode 100644 index 0000000000..3944b04ceb --- /dev/null +++ b/botocore/data/taxsettings/2018-05-10/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListTaxRegistrations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "accountDetails" + } + } +} diff --git a/botocore/data/taxsettings/2018-05-10/service-2.json b/botocore/data/taxsettings/2018-05-10/service-2.json new file mode 100644 index 0000000000..31e0ca5a69 --- /dev/null +++ b/botocore/data/taxsettings/2018-05-10/service-2.json @@ -0,0 +1,1530 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"tax", + "jsonVersion":"1.1", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"Tax Settings", + "serviceId":"TaxSettings", + "signatureVersion":"v4", + "signingName":"tax", + "uid":"taxsettings-2018-05-10", + "auth":["aws.auth#sigv4"] + }, + "operations":{ + "BatchDeleteTaxRegistration":{ + "name":"BatchDeleteTaxRegistration", + "http":{ + "method":"POST", + "requestUri":"/BatchDeleteTaxRegistration", + "responseCode":200 + }, + "input":{"shape":"BatchDeleteTaxRegistrationRequest"}, + "output":{"shape":"BatchDeleteTaxRegistrationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes tax registration for multiple accounts in batch. This can be used to delete tax registrations for up to five accounts in one batch.

This API operation can't be used to delete your tax registration in Brazil. Use the Payment preferences page in the Billing and Cost Management console instead.

" + }, + "BatchPutTaxRegistration":{ + "name":"BatchPutTaxRegistration", + "http":{ + "method":"POST", + "requestUri":"/BatchPutTaxRegistration", + "responseCode":200 + }, + "input":{"shape":"BatchPutTaxRegistrationRequest"}, + "output":{"shape":"BatchPutTaxRegistrationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Adds or updates tax registration for multiple accounts in batch. This can be used to add or update tax registrations for up to five accounts in one batch. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first.

To call this API operation for specific countries, see the following country-specific requirements.

Bangladesh

  • You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object.

Brazil

  • You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation.

  • For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address.

Georgia

  • The valid personType values are Physical Person and Business.

Kenya

  • You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object.

  • If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object.

Malaysia

  • If you use this operation to set a tax registration number (TRN) in Malaysia, only resellers with a valid sales and service tax (SST) number are required to provide tax registration information.

  • By using this API operation to set a TRN in Malaysia, Amazon Web Services will regard you as self-declaring that you're an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD) and have a valid SST number.

  • Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate.

  • If you're not a reseller of Amazon Web Services, we don't recommend that you use this operation to set the TRN in Malaysia.

  • Only use this API operation to upload the TRNs for accounts through which you're reselling Amazon Web Services.

  • Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller.

    Taxable service and service tax codes:

    Consultancy - 9907061674

    Training or coaching service - 9907071685

    IT service - 9907101676

    Digital services and electronic medium - 9907121690

Nepal

  • The sector valid values are Business and Individual.

Saudi Arabia

  • For address, you must specify addressLine3.

South Korea

  • You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName.

  • You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields.

  • You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object.

  • For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion.

Spain

  • You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object.

  • If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object.

Turkey

  • You must specify the sector in the taxRegistrationEntry object.

  • If your sector is Business, Individual, or Government:

    • Specify the taxOffice. If your sector is Individual, don't enter this value.

    • (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value.

    • Note: In the Tax Settings page of the Billing console, Government appears as Public institutions

  • If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field.

  • For address, you must specify districtOrCounty.

Ukraine

  • The sector valid values are Business and Individual.

" + }, + "DeleteTaxRegistration":{ + "name":"DeleteTaxRegistration", + "http":{ + "method":"POST", + "requestUri":"/DeleteTaxRegistration", + "responseCode":200 + }, + "input":{"shape":"DeleteTaxRegistrationRequest"}, + "output":{"shape":"DeleteTaxRegistrationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes tax registration for a single account.

This API operation can't be used to delete your tax registration in Brazil. Use the Payment preferences page in the Billing and Cost Management console instead.

" + }, + "GetTaxRegistration":{ + "name":"GetTaxRegistration", + "http":{ + "method":"POST", + "requestUri":"/GetTaxRegistration", + "responseCode":200 + }, + "input":{"shape":"GetTaxRegistrationRequest"}, + "output":{"shape":"GetTaxRegistrationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves tax registration for a single account.

" + }, + "GetTaxRegistrationDocument":{ + "name":"GetTaxRegistrationDocument", + "http":{ + "method":"POST", + "requestUri":"/GetTaxRegistrationDocument", + "responseCode":200 + }, + "input":{"shape":"GetTaxRegistrationDocumentRequest"}, + "output":{"shape":"GetTaxRegistrationDocumentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Downloads your tax documents to the Amazon S3 bucket that you specify in your request.

" + }, + "ListTaxRegistrations":{ + "name":"ListTaxRegistrations", + "http":{ + "method":"POST", + "requestUri":"/ListTaxRegistrations", + "responseCode":200 + }, + "input":{"shape":"ListTaxRegistrationsRequest"}, + "output":{"shape":"ListTaxRegistrationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves the tax registration of accounts listed in a consolidated billing family. This can be used to retrieve up to 100 accounts' tax registrations in one call (default 50).

" + }, + "PutTaxRegistration":{ + "name":"PutTaxRegistration", + "http":{ + "method":"POST", + "requestUri":"/PutTaxRegistration", + "responseCode":200 + }, + "input":{"shape":"PutTaxRegistrationRequest"}, + "output":{"shape":"PutTaxRegistrationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Adds or updates tax registration for a single account. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first.

To call this API operation for specific countries, see the following country-specific requirements.

Bangladesh

  • You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object.

Brazil

  • You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation.

  • For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address.

Georgia

  • The valid personType values are Physical Person and Business.

Kenya

  • You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object.

  • If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object.

Malaysia

  • If you use this operation to set a tax registration number (TRN) in Malaysia, only resellers with a valid sales and service tax (SST) number are required to provide tax registration information.

  • By using this API operation to set a TRN in Malaysia, Amazon Web Services will regard you as self-declaring that you're an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD) and have a valid SST number.

  • Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate.

  • If you're not a reseller of Amazon Web Services, we don't recommend that you use this operation to set the TRN in Malaysia.

  • Only use this API operation to upload the TRNs for accounts through which you're reselling Amazon Web Services.

  • Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller.

    Taxable service and service tax codes:

    Consultancy - 9907061674

    Training or coaching service - 9907071685

    IT service - 9907101676

    Digital services and electronic medium - 9907121690

Nepal

  • The sector valid values are Business and Individual.

Saudi Arabia

  • For address, you must specify addressLine3.

South Korea

  • You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName.

  • You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields.

  • You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object.

  • For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion.

Spain

  • You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object.

  • If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object.

Turkey

  • You must specify the sector in the taxRegistrationEntry object.

  • If your sector is Business, Individual, or Government:

    • Specify the taxOffice. If your sector is Individual, don't enter this value.

    • (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value.

    • Note: In the Tax Settings page of the Billing console, Government appears as Public institutions

  • If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field.

  • For address, you must specify districtOrCounty.

Ukraine

  • The sector valid values are Business and Individual.

" + } + }, + "shapes":{ + "AccountDetails":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

List of unique account identifiers.

" + }, + "accountMetaData":{ + "shape":"AccountMetaData", + "documentation":"

The meta data information associated with the account.

" + }, + "taxInheritanceDetails":{ + "shape":"TaxInheritanceDetails", + "documentation":"

Tax inheritance information associated with the account.

" + }, + "taxRegistration":{ + "shape":"TaxRegistrationWithJurisdiction", + "documentation":"

Your TRN information. Instead of having full legal address, here TRN information will have jurisdiction details (for example, country code and state/region/province if applicable).

" + } + }, + "documentation":"

An object with your accountId and TRN information.

", + "sensitive":true + }, + "AccountDetailsList":{ + "type":"list", + "member":{"shape":"AccountDetails"} + }, + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^\\d+$" + }, + "AccountIds":{ + "type":"list", + "member":{"shape":"AccountId"}, + "max":5, + "min":1 + }, + "AccountMetaData":{ + "type":"structure", + "members":{ + "accountName":{ + "shape":"AccountName", + "documentation":"

The Amazon Web Services accounts name.

" + }, + "address":{"shape":"Address"}, + "addressRoleMap":{ + "shape":"AddressRoleMap", + "documentation":"

Address roles associated with the account containing country code information.

" + }, + "addressType":{ + "shape":"AddressRoleType", + "documentation":"

The type of address associated with the legal profile.

" + }, + "seller":{ + "shape":"Seller", + "documentation":"

Seller information associated with the account.

" + } + }, + "documentation":"

The meta data information associated with the account.

", + "sensitive":true + }, + "AccountName":{ + "type":"string", + "pattern":"^[\\s\\S]*$" + }, + "AdditionalInfoRequest":{ + "type":"structure", + "members":{ + "canadaAdditionalInfo":{ + "shape":"CanadaAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Canada.

" + }, + "estoniaAdditionalInfo":{ + "shape":"EstoniaAdditionalInfo", + "documentation":"

Additional tax information to specify for a TRN in Estonia.

" + }, + "georgiaAdditionalInfo":{ + "shape":"GeorgiaAdditionalInfo", + "documentation":"

Additional tax information to specify for a TRN in Georgia.

" + }, + "israelAdditionalInfo":{ + "shape":"IsraelAdditionalInfo", + "documentation":"

Additional tax information to specify for a TRN in Israel.

" + }, + "italyAdditionalInfo":{ + "shape":"ItalyAdditionalInfo", + "documentation":"

Additional tax information to specify for a TRN in Italy.

" + }, + "kenyaAdditionalInfo":{ + "shape":"KenyaAdditionalInfo", + "documentation":"

Additional tax information to specify for a TRN in Kenya.

" + }, + "malaysiaAdditionalInfo":{ + "shape":"MalaysiaAdditionalInfo", + "documentation":"

Additional tax information to specify for a TRN in Malaysia.

" + }, + "polandAdditionalInfo":{ + "shape":"PolandAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Poland.

" + }, + "romaniaAdditionalInfo":{ + "shape":"RomaniaAdditionalInfo", + "documentation":"

Additional tax information to specify for a TRN in Romania.

" + }, + "saudiArabiaAdditionalInfo":{ + "shape":"SaudiArabiaAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Saudi Arabia.

" + }, + "southKoreaAdditionalInfo":{ + "shape":"SouthKoreaAdditionalInfo", + "documentation":"

Additional tax information to specify for a TRN in South Korea.

" + }, + "spainAdditionalInfo":{ + "shape":"SpainAdditionalInfo", + "documentation":"

Additional tax information to specify for a TRN in Spain.

" + }, + "turkeyAdditionalInfo":{ + "shape":"TurkeyAdditionalInfo", + "documentation":"

Additional tax information to specify for a TRN in Turkey.

" + }, + "ukraineAdditionalInfo":{ + "shape":"UkraineAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Ukraine.

" + } + }, + "documentation":"

Additional tax information associated with your tax registration number (TRN). Depending on the TRN for a specific country, you might need to specify this information when you set your TRN.

You can only specify one of the following parameters and the value can't be empty.

The parameter that you specify must match the country for the TRN, if available. For example, if you set a TRN in Canada for specific provinces, you must also specify the canadaAdditionalInfo parameter.

" + }, + "AdditionalInfoResponse":{ + "type":"structure", + "members":{ + "brazilAdditionalInfo":{ + "shape":"BrazilAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Brazil. The Tax Settings API returns this information in your response when any additional information is present with your TRN in Brazil.

" + }, + "canadaAdditionalInfo":{ + "shape":"CanadaAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Canada.

" + }, + "estoniaAdditionalInfo":{ + "shape":"EstoniaAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Estonia.

" + }, + "georgiaAdditionalInfo":{ + "shape":"GeorgiaAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Georgia.

" + }, + "indiaAdditionalInfo":{ + "shape":"IndiaAdditionalInfo", + "documentation":"

Additional tax information in India.

" + }, + "israelAdditionalInfo":{ + "shape":"IsraelAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Israel.

" + }, + "italyAdditionalInfo":{ + "shape":"ItalyAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Italy.

" + }, + "kenyaAdditionalInfo":{ + "shape":"KenyaAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Kenya.

" + }, + "malaysiaAdditionalInfo":{ + "shape":"MalaysiaAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Malaysia.

" + }, + "polandAdditionalInfo":{ + "shape":"PolandAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Poland.

" + }, + "romaniaAdditionalInfo":{ + "shape":"RomaniaAdditionalInfo", + "documentation":"

Additional tax information to specify for a TRN in Romania.

" + }, + "saudiArabiaAdditionalInfo":{ + "shape":"SaudiArabiaAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Saudi Arabia.

" + }, + "southKoreaAdditionalInfo":{ + "shape":"SouthKoreaAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in South Korea.

" + }, + "spainAdditionalInfo":{ + "shape":"SpainAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Spain.

" + }, + "turkeyAdditionalInfo":{ + "shape":"TurkeyAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Turkey.

" + }, + "ukraineAdditionalInfo":{ + "shape":"UkraineAdditionalInfo", + "documentation":"

Additional tax information associated with your TRN in Ukraine.

" + } + }, + "documentation":"

Additional tax information associated with your TRN. The Tax Settings API returns country-specific information in the response when any additional information is present with your TRN for the following countries.

" + }, + "Address":{ + "type":"structure", + "required":[ + "addressLine1", + "city", + "countryCode", + "postalCode" + ], + "members":{ + "addressLine1":{ + "shape":"AddressLine1", + "documentation":"

The first line of the address.

" + }, + "addressLine2":{ + "shape":"AddressLine2", + "documentation":"

The second line of the address, if applicable.

" + }, + "addressLine3":{ + "shape":"AddressLine3", + "documentation":"

The third line of the address, if applicable. Currently, the Tax Settings API accepts the addressLine3 parameter only for Saudi Arabia. When you specify a TRN in Saudi Arabia, you must enter the addressLine3 and specify the building number for the address. For example, you might enter 1234.

" + }, + "city":{ + "shape":"City", + "documentation":"

The city that the address is in.

" + }, + "countryCode":{ + "shape":"CountryCode", + "documentation":"

The country code for the country that the address is in.

" + }, + "districtOrCounty":{ + "shape":"District", + "documentation":"

The district or county the address is located.

For addresses in Brazil, this parameter uses the name of the neighborhood. When you set a TRN in Brazil, use districtOrCounty for the neighborhood name.

" + }, + "postalCode":{ + "shape":"PostalCode", + "documentation":"

The postal code associated with the address.

" + }, + "stateOrRegion":{ + "shape":"State", + "documentation":"

The state, region, or province that the address is located.

If this is required for tax settings, use the same name as shown on the Tax Settings page.

" + } + }, + "documentation":"

The details of the address associated with the TRN information.

" + }, + "AddressLine1":{ + "type":"string", + "max":180, + "min":1, + "pattern":"^(?!\\s*$)[\\s\\S]+$" + }, + "AddressLine2":{ + "type":"string", + "max":60, + "min":1, + "pattern":"^(?!\\s*$)[\\s\\S]+$" + }, + "AddressLine3":{ + "type":"string", + "max":60, + "min":1, + "pattern":"^(?!\\s*$)[\\s\\S]+$" + }, + "AddressRoleMap":{ + "type":"map", + "key":{"shape":"AddressRoleType"}, + "value":{"shape":"Jurisdiction"} + }, + "AddressRoleType":{ + "type":"string", + "enum":[ + "TaxAddress", + "BillingAddress", + "ContactAddress" + ] + }, + "BatchDeleteTaxRegistrationError":{ + "type":"structure", + "required":[ + "accountId", + "message" + ], + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

The unique account identifier for the account whose tax registration couldn't be deleted during the BatchDeleteTaxRegistration operation.

" + }, + "code":{ + "shape":"ErrorCode", + "documentation":"

The error code for an individual failure in BatchDeleteTaxRegistration operation.

" + }, + "message":{ + "shape":"ErrorMessage", + "documentation":"

The error message for an individual failure in the BatchDeleteTaxRegistration operation.

" + } + }, + "documentation":"

The error object for representing failures in the BatchDeleteTaxRegistration operation.

" + }, + "BatchDeleteTaxRegistrationErrors":{ + "type":"list", + "member":{"shape":"BatchDeleteTaxRegistrationError"} + }, + "BatchDeleteTaxRegistrationRequest":{ + "type":"structure", + "required":["accountIds"], + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

List of unique account identifiers.

" + } + } + }, + "BatchDeleteTaxRegistrationResponse":{ + "type":"structure", + "required":["errors"], + "members":{ + "errors":{ + "shape":"BatchDeleteTaxRegistrationErrors", + "documentation":"

The list of errors for the accounts the TRN information could not be deleted for.

" + } + } + }, + "BatchPutTaxRegistrationError":{ + "type":"structure", + "required":[ + "accountId", + "message" + ], + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

The unique account identifier for the account that the tax registration couldn't be added, or updated during the BatchPutTaxRegistration operation.

" + }, + "code":{ + "shape":"ErrorCode", + "documentation":"

The error code for an individual failure in the BatchPutTaxRegistration operation.

" + }, + "message":{ + "shape":"ErrorMessage", + "documentation":"

The error message for an individual failure in the BatchPutTaxRegistration operation.

" + } + }, + "documentation":"

The error object for representing failures in the BatchPutTaxRegistration operation.

" + }, + "BatchPutTaxRegistrationErrors":{ + "type":"list", + "member":{"shape":"BatchPutTaxRegistrationError"} + }, + "BatchPutTaxRegistrationRequest":{ + "type":"structure", + "required":[ + "accountIds", + "taxRegistrationEntry" + ], + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

List of unique account identifiers.

" + }, + "taxRegistrationEntry":{ + "shape":"TaxRegistrationEntry", + "documentation":"

Your TRN information that will be stored to the accounts mentioned in putEntries.

" + } + } + }, + "BatchPutTaxRegistrationResponse":{ + "type":"structure", + "required":["errors"], + "members":{ + "errors":{ + "shape":"BatchPutTaxRegistrationErrors", + "documentation":"

List of errors for the accounts the TRN information could not be added or updated to.

" + }, + "status":{ + "shape":"TaxRegistrationStatus", + "documentation":"

The status of your TRN stored in the system after processing. Based on the validation occurring on the TRN, the status can be Verified, Pending or Rejected.

" + } + } + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "BrazilAdditionalInfo":{ + "type":"structure", + "members":{ + "ccmCode":{ + "shape":"CcmCode", + "documentation":"

The Cadastro de Contribuintes Mobiliários (CCM) code for your TRN in Brazil. This only applies for a CNPJ tax type for the São Paulo municipality.

" + }, + "legalNatureCode":{ + "shape":"LegalNatureCode", + "documentation":"

Legal nature of business, based on your TRN in Brazil. This only applies for a CNPJ tax type.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Brazil.

" + }, + "BusinessRepresentativeName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9\\u3130-\\u318F\\uAC00-\\uD7AF,.( )-\\\\s]*$" + }, + "CanadaAdditionalInfo":{ + "type":"structure", + "members":{ + "canadaQuebecSalesTaxNumber":{ + "shape":"CanadaQuebecSalesTaxNumberString", + "documentation":"

The Quebec Sales Tax ID number. Leave blank if you do not have a Quebec Sales Tax ID number.

" + }, + "canadaRetailSalesTaxNumber":{ + "shape":"CanadaRetailSalesTaxNumberString", + "documentation":"

Manitoba Retail Sales Tax ID number. Customers purchasing Amazon Web Services for resale in Manitoba must provide a valid Retail Sales Tax ID number for Manitoba. Leave this blank if you do not have a Retail Sales Tax ID number in Manitoba or are not purchasing Amazon Web Services for resale.

" + }, + "isResellerAccount":{ + "shape":"Boolean", + "documentation":"

The value for this parameter must be true if the provincialSalesTaxId value is provided for a TRN in British Columbia, Saskatchewan, or Manitoba provinces.

To claim a provincial sales tax (PST) and retail sales tax (RST) reseller exemption, you must confirm that purchases from this account were made for resale. Otherwise, remove the PST or RST number from the provincialSalesTaxId parameter from your request.

" + }, + "provincialSalesTaxId":{ + "shape":"CanadaProvincialSalesTaxIdString", + "documentation":"

The provincial sales tax ID for your TRN in Canada. This parameter can represent the following:

  • Provincial sales tax ID number for British Columbia and Saskatchewan provinces

  • Manitoba retail sales tax ID number for Manitoba province

  • Quebec sales tax ID number for Quebec province

The Tax Setting API only accepts this parameter if the TRN is specified for the previous provinces. For other provinces, the Tax Settings API doesn't accept this parameter.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Canada .

" + }, + "CanadaProvincialSalesTaxIdString":{ + "type":"string", + "max":16, + "min":7, + "pattern":"^([0-9A-Z/-]+)$" + }, + "CanadaQuebecSalesTaxNumberString":{ + "type":"string", + "pattern":"^([0-9]{10})(TQ[0-9]{4})?$" + }, + "CanadaRetailSalesTaxNumberString":{ + "type":"string", + "pattern":"^([0-9]{6}-[0-9]{1})$" + }, + "CcmCode":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"^\\d+$" + }, + "CertifiedEmailId":{ + "type":"string", + "pattern":"^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,20}$" + }, + "CigNumber":{ + "type":"string", + "pattern":"^([0-9A-Z]{1,15})$" + }, + "City":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^(?!\\s*$)[\\s\\S]+$" + }, + "ConflictException":{ + "type":"structure", + "required":[ + "errorCode", + "message" + ], + "members":{ + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

409

" + }, + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The exception when the input is creating conflict with the given state.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CountryCode":{ + "type":"string", + "max":2, + "min":2, + "pattern":"^[a-zA-Z]+$" + }, + "CupNumber":{ + "type":"string", + "pattern":"^([0-9A-Z]{1,15})$" + }, + "DateOfBirth":{ + "type":"string", + "max":10, + "min":10, + "pattern":"^(\\d{4}-(0[0-9]|1[0-2])-([0-2][0-9]|3[0-1]))$" + }, + "DeleteTaxRegistrationRequest":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

Unique account identifier for the TRN information that needs to be deleted. If this isn't passed, the account ID corresponding to the credentials of the API caller will be used for this parameter.

" + } + } + }, + "DeleteTaxRegistrationResponse":{ + "type":"structure", + "members":{ + } + }, + "DestinationFilePath":{ + "type":"string", + "pattern":"^[\\s\\S]*$" + }, + "DestinationS3Location":{ + "type":"structure", + "required":["bucket"], + "members":{ + "bucket":{ + "shape":"S3BucketName", + "documentation":"

The name of your Amazon S3 bucket that you specify to download your tax documents to.

" + }, + "prefix":{ + "shape":"S3Prefix", + "documentation":"

The Amazon S3 object prefix that you specify for your tax document file.

" + } + }, + "documentation":"

The location of the Amazon S3 bucket that you specify to download your tax documents to.

" + }, + "District":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^(?!\\s*$)[\\s\\S]+$" + }, + "ErrorCode":{ + "type":"string", + "max":50, + "min":0, + "pattern":"^[\\s\\S]*$" + }, + "ErrorMessage":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"^[\\s\\S]*$", + "sensitive":true + }, + "EstoniaAdditionalInfo":{ + "type":"structure", + "required":["registryCommercialCode"], + "members":{ + "registryCommercialCode":{ + "shape":"RegistryCommercialCode", + "documentation":"

Registry commercial code (RCC) for your TRN in Estonia. This value is an eight-numeric string, such as 12345678.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Estonia.

" + }, + "FieldName":{ + "type":"string", + "pattern":"^(?!\\s*$)[\\s\\S]+$" + }, + "GeorgiaAdditionalInfo":{ + "type":"structure", + "required":["personType"], + "members":{ + "personType":{ + "shape":"PersonType", + "documentation":"

The legal person or physical person assigned to this TRN in Georgia.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Georgia.

" + }, + "GetTaxRegistrationDocumentRequest":{ + "type":"structure", + "required":[ + "destinationS3Location", + "taxDocumentMetadata" + ], + "members":{ + "destinationS3Location":{ + "shape":"DestinationS3Location", + "documentation":"

The Amazon S3 bucket that you specify to download your tax documents to.

" + }, + "taxDocumentMetadata":{ + "shape":"TaxDocumentMetadata", + "documentation":"

The metadata for your tax document.

" + } + } + }, + "GetTaxRegistrationDocumentResponse":{ + "type":"structure", + "members":{ + "destinationFilePath":{ + "shape":"DestinationFilePath", + "documentation":"

The file path of the Amazon S3 bucket where you want to download your tax document to.

" + } + } + }, + "GetTaxRegistrationRequest":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

Your unique account identifier.

" + } + } + }, + "GetTaxRegistrationResponse":{ + "type":"structure", + "members":{ + "taxRegistration":{ + "shape":"TaxRegistration", + "documentation":"

TRN information of the account mentioned in the request.

" + } + } + }, + "IndiaAdditionalInfo":{ + "type":"structure", + "members":{ + "pan":{ + "shape":"Pan", + "documentation":"

India pan information associated with the account.

" + } + }, + "documentation":"

Additional tax information in India.

" + }, + "IndividualRegistrationNumber":{ + "type":"string", + "pattern":"^([0-9]{10})$" + }, + "Industries":{ + "type":"string", + "enum":[ + "CirculatingOrg", + "ProfessionalOrg", + "Banks", + "Insurance", + "PensionAndBenefitFunds", + "DevelopmentAgencies" + ] + }, + "InheritanceObtainedReason":{ + "type":"string", + "pattern":"^[\\s\\S]*$" + }, + "InternalServerException":{ + "type":"structure", + "required":[ + "errorCode", + "message" + ], + "members":{ + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

500

" + }, + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The exception thrown when an unexpected error occurs when processing a request.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "IsraelAdditionalInfo":{ + "type":"structure", + "required":[ + "customerType", + "dealerType" + ], + "members":{ + "customerType":{ + "shape":"IsraelCustomerType", + "documentation":"

Customer type for your TRN in Israel. The value can be Business or Individual. Use Businessfor entities such as not-for-profit and financial institutions.

" + }, + "dealerType":{ + "shape":"IsraelDealerType", + "documentation":"

Dealer type for your TRN in Israel. If you're not a local authorized dealer with an Israeli VAT ID, specify your tax identification number so that Amazon Web Services can send you a compliant tax invoice.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Israel.

" + }, + "IsraelCustomerType":{ + "type":"string", + "enum":[ + "Business", + "Individual" + ] + }, + "IsraelDealerType":{ + "type":"string", + "enum":[ + "Authorized", + "Non-authorized" + ] + }, + "ItalyAdditionalInfo":{ + "type":"structure", + "members":{ + "cigNumber":{ + "shape":"CigNumber", + "documentation":"

The tender procedure identification code.

" + }, + "cupNumber":{ + "shape":"CupNumber", + "documentation":"

Additional tax information to specify for a TRN in Italy. This is managed by the Interministerial Committee for Economic Planning (CIPE) which characterizes every public investment project (Individual Project Code).

" + }, + "sdiAccountId":{ + "shape":"SdiAccountId", + "documentation":"

Additional tax information to specify for a TRN in Italy. Use CodiceDestinatario to receive your invoices via web service (API) or FTP.

" + }, + "taxCode":{ + "shape":"TaxCode", + "documentation":"

List of service tax codes for your TRN in Italy. You can use your customer tax code as part of a VAT Group.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Italy.

" + }, + "ItemOfBusiness":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[0-9\\u3130-\\u318F\\uAC00-\\uD7AF,.( )-\\\\s]*$" + }, + "Jurisdiction":{ + "type":"structure", + "required":["countryCode"], + "members":{ + "countryCode":{ + "shape":"CountryCode", + "documentation":"

The country code of the jurisdiction.

" + }, + "stateOrRegion":{ + "shape":"State", + "documentation":"

The state, region, or province associated with the country of the jurisdiction, if applicable.

" + } + }, + "documentation":"

The jurisdiction details of the TRN information of the customers. This doesn't contain full legal address, and contains only country code and state/region/province.

" + }, + "KenyaAdditionalInfo":{ + "type":"structure", + "required":["personType"], + "members":{ + "personType":{ + "shape":"PersonType", + "documentation":"

The legal person or physical person assigned to this TRN in Kenya.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Kenya.

" + }, + "KepEmailId":{ + "type":"string", + "pattern":"^[\\s\\S]*$" + }, + "LegalName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^(?!\\s*$)[\\s\\S]+$" + }, + "LegalNatureCode":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"^\\d+$" + }, + "LineOfBusiness":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[0-9\\u3130-\\u318F\\uAC00-\\uD7AF,.( )-\\\\s]*$" + }, + "ListTaxRegistrationsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

Number of accountDetails results you want in one response.

" + }, + "nextToken":{ + "shape":"PaginationTokenString", + "documentation":"

The token to retrieve the next set of results.

" + } + } + }, + "ListTaxRegistrationsResponse":{ + "type":"structure", + "required":["accountDetails"], + "members":{ + "accountDetails":{ + "shape":"AccountDetailsList", + "documentation":"

The list of account details. This contains account Ids and TRN Information for each of the linked accounts.

" + }, + "nextToken":{ + "shape":"PaginationTokenString", + "documentation":"

The token to retrieve the next set of results.

" + } + } + }, + "MalaysiaAdditionalInfo":{ + "type":"structure", + "required":["serviceTaxCodes"], + "members":{ + "serviceTaxCodes":{ + "shape":"MalaysiaServiceTaxCodesList", + "documentation":"

List of service tax codes for your TRN in Malaysia.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Malaysia.

" + }, + "MalaysiaServiceTaxCode":{ + "type":"string", + "enum":[ + "Consultancy", + "Digital Service And Electronic Medium", + "IT Services", + "Training Or Coaching" + ] + }, + "MalaysiaServiceTaxCodesList":{ + "type":"list", + "member":{"shape":"MalaysiaServiceTaxCode"}, + "max":4, + "min":1 + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "PaginationTokenString":{ + "type":"string", + "max":2000, + "min":1, + "pattern":"^[-A-Za-z0-9_+\\=\\/]+$" + }, + "Pan":{ + "type":"string", + "pattern":"^[A-Z]{5}[0-9]{4}[A-Z]{1}$" + }, + "PersonType":{ + "type":"string", + "enum":[ + "Legal Person", + "Physical Person", + "Business" + ] + }, + "PolandAdditionalInfo":{ + "type":"structure", + "members":{ + "individualRegistrationNumber":{ + "shape":"IndividualRegistrationNumber", + "documentation":"

The individual tax registration number (NIP). Individual NIP is valid for other taxes excluding VAT purposes.

" + }, + "isGroupVatEnabled":{ + "shape":"Boolean", + "documentation":"

True if your business is a member of a VAT group with a NIP active for VAT purposes. Otherwise, this is false.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Poland.

" + }, + "PostalCode":{ + "type":"string", + "max":20, + "min":1, + "pattern":"^(?!\\s*$)[\\s\\S]+$" + }, + "PutTaxRegistrationRequest":{ + "type":"structure", + "required":["taxRegistrationEntry"], + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

Your unique account identifier.

" + }, + "taxRegistrationEntry":{ + "shape":"TaxRegistrationEntry", + "documentation":"

Your TRN information that will be stored to the account mentioned in accountId.

" + } + } + }, + "PutTaxRegistrationResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"TaxRegistrationStatus", + "documentation":"

The status of your TRN stored in the system after processing. Based on the validation occurring on the TRN, the status can be Verified, Pending or Rejected.

" + } + } + }, + "RegistrationId":{ + "type":"string", + "max":20, + "min":1, + "pattern":"^(?!\\s*$)[\\s\\S]+$" + }, + "RegistrationType":{ + "type":"string", + "enum":[ + "Intra-EU", + "Local" + ] + }, + "RegistryCommercialCode":{ + "type":"string", + "max":8, + "min":8, + "pattern":"^\\d+$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "errorCode", + "message" + ], + "members":{ + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

404

" + }, + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The exception thrown when the input doesn't have a resource associated to it.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "RomaniaAdditionalInfo":{ + "type":"structure", + "required":["taxRegistrationNumberType"], + "members":{ + "taxRegistrationNumberType":{ + "shape":"TaxRegistrationNumberType", + "documentation":"

The tax registration number type. The value can be TaxRegistrationNumber or LocalRegistrationNumber.

" + } + }, + "documentation":"

Additional tax information to specify for a TRN in Romania.

" + }, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^(?=^.{3,63}$)(?!^(\\d+\\.)+\\d+$)(^(([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])\\.)*([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])$)$" + }, + "S3Key":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^.*\\S.*$" + }, + "S3Prefix":{ + "type":"string", + "max":512, + "min":0, + "pattern":"^.*\\S.*$" + }, + "SaudiArabiaAdditionalInfo":{ + "type":"structure", + "members":{ + "taxRegistrationNumberType":{ + "shape":"SaudiArabiaTaxRegistrationNumberType", + "documentation":"

The tax registration number type.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Saudi Arabia.

" + }, + "SaudiArabiaTaxRegistrationNumberType":{ + "type":"string", + "enum":[ + "TaxRegistrationNumber", + "TaxIdentificationNumber", + "CommercialRegistrationNumber" + ] + }, + "SdiAccountId":{ + "type":"string", + "pattern":"^[0-9A-Z]{6,7}$" + }, + "SecondaryTaxId":{ + "type":"string", + "pattern":"^([0-9]{10})$" + }, + "Sector":{ + "type":"string", + "enum":[ + "Business", + "Individual", + "Government" + ] + }, + "Seller":{ + "type":"string", + "pattern":"^[\\s\\S]*$" + }, + "SourceS3Location":{ + "type":"structure", + "required":[ + "bucket", + "key" + ], + "members":{ + "bucket":{ + "shape":"S3BucketName", + "documentation":"

The name of your Amazon S3 bucket that your tax document is located.

" + }, + "key":{ + "shape":"S3Key", + "documentation":"

The object key of your tax document object in Amazon S3.

" + } + }, + "documentation":"

The Amazon S3 bucket in your account where your tax document is located.

" + }, + "SouthKoreaAdditionalInfo":{ + "type":"structure", + "required":[ + "businessRepresentativeName", + "itemOfBusiness", + "lineOfBusiness" + ], + "members":{ + "businessRepresentativeName":{ + "shape":"BusinessRepresentativeName", + "documentation":"

The business legal name based on the most recently uploaded tax registration certificate.

" + }, + "itemOfBusiness":{ + "shape":"ItemOfBusiness", + "documentation":"

Item of business based on the most recently uploaded tax registration certificate.

" + }, + "lineOfBusiness":{ + "shape":"LineOfBusiness", + "documentation":"

Line of business based on the most recently uploaded tax registration certificate.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in South Korea.

" + }, + "SpainAdditionalInfo":{ + "type":"structure", + "required":["registrationType"], + "members":{ + "registrationType":{ + "shape":"RegistrationType", + "documentation":"

The registration type in Spain.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Spain.

" + }, + "State":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^(?!\\s*$)[\\s\\S]+$" + }, + "TaxCode":{ + "type":"string", + "pattern":"^([0-9]{11}|[A-Z]{6}[0-9]{2}[A-Z][0-9]{2}[A-Z][0-9]{3}[A-Z])$" + }, + "TaxDocumentAccessToken":{ + "type":"string", + "pattern":"^[\\s\\S]*$" + }, + "TaxDocumentMetadata":{ + "type":"structure", + "required":[ + "taxDocumentAccessToken", + "taxDocumentName" + ], + "members":{ + "taxDocumentAccessToken":{ + "shape":"TaxDocumentAccessToken", + "documentation":"

The tax document access token, which contains information that the Tax Settings API uses to locate the tax document.

If you update your tax registration, the existing taxDocumentAccessToken won't be valid. To get the latest token, call the GetTaxRegistration or ListTaxRegistrations API operation. This token is valid for 24 hours.

" + }, + "taxDocumentName":{ + "shape":"TaxDocumentName", + "documentation":"

The name of your tax document.

" + } + }, + "documentation":"

The metadata for your tax document.

" + }, + "TaxDocumentMetadatas":{ + "type":"list", + "member":{"shape":"TaxDocumentMetadata"}, + "max":5, + "min":1 + }, + "TaxDocumentName":{ + "type":"string", + "pattern":"^[\\s\\S]*$" + }, + "TaxInheritanceDetails":{ + "type":"structure", + "members":{ + "inheritanceObtainedReason":{ + "shape":"InheritanceObtainedReason", + "documentation":"

Tax inheritance reason information associated with the account.

" + }, + "parentEntityId":{ + "shape":"AccountId", + "documentation":"

Tax inheritance parent account information associated with the account.

" + } + }, + "documentation":"

Tax inheritance information associated with the account.

" + }, + "TaxOffice":{ + "type":"string", + "pattern":"^[\\s\\S]*$" + }, + "TaxRegistration":{ + "type":"structure", + "required":[ + "legalAddress", + "legalName", + "registrationId", + "registrationType", + "status" + ], + "members":{ + "additionalTaxInformation":{ + "shape":"AdditionalInfoResponse", + "documentation":"

Additional tax information associated with your TRN.

" + }, + "certifiedEmailId":{ + "shape":"CertifiedEmailId", + "documentation":"

The email address to receive VAT invoices.

" + }, + "legalAddress":{ + "shape":"Address", + "documentation":"

The legal address associated with your TRN registration.

" + }, + "legalName":{ + "shape":"LegalName", + "documentation":"

The legal name associated with your TRN registration.

" + }, + "registrationId":{ + "shape":"RegistrationId", + "documentation":"

Your tax registration unique identifier.

" + }, + "registrationType":{ + "shape":"TaxRegistrationType", + "documentation":"

Type of your tax registration. This can be either VAT or GST.

" + }, + "sector":{ + "shape":"Sector", + "documentation":"

The industry that describes your business. For business-to-business (B2B) customers, specify Business. For business-to-consumer (B2C) customers, specify Individual. For business-to-government (B2G), specify Government. Note that certain values may not applicable for the request country. Please refer to country specific information in API document.

" + }, + "status":{ + "shape":"TaxRegistrationStatus", + "documentation":"

The status of your TRN. This can be either Verified, Pending, Deleted, or Rejected.

" + }, + "taxDocumentMetadatas":{ + "shape":"TaxDocumentMetadatas", + "documentation":"

The metadata for your tax document.

" + } + }, + "documentation":"

Your TRN information.

", + "sensitive":true + }, + "TaxRegistrationDocument":{ + "type":"structure", + "required":["s3Location"], + "members":{ + "s3Location":{ + "shape":"SourceS3Location", + "documentation":"

The Amazon S3 location where your tax registration document is stored.

" + } + }, + "documentation":"

Tax registration document information.

" + }, + "TaxRegistrationDocuments":{ + "type":"list", + "member":{"shape":"TaxRegistrationDocument"}, + "max":5, + "min":1 + }, + "TaxRegistrationEntry":{ + "type":"structure", + "required":[ + "registrationId", + "registrationType" + ], + "members":{ + "additionalTaxInformation":{ + "shape":"AdditionalInfoRequest", + "documentation":"

Additional tax information associated with your TRN. You only need to specify this parameter if Amazon Web Services collects any additional information for your country within AdditionalInfoRequest.

" + }, + "certifiedEmailId":{ + "shape":"CertifiedEmailId", + "documentation":"

The email address to receive VAT invoices.

" + }, + "legalAddress":{ + "shape":"Address", + "documentation":"

The legal address associated with your TRN.

If you're setting a TRN in Brazil for the CNPJ tax type, you don't need to specify the legal address.

For TRNs in other countries and for CPF tax types Brazil, you must specify the legal address.

" + }, + "legalName":{ + "shape":"LegalName", + "documentation":"

The legal name associated with your TRN.

If you're setting a TRN in Brazil, you don't need to specify the legal name. For TRNs in other countries, you must specify the legal name.

" + }, + "registrationId":{ + "shape":"RegistrationId", + "documentation":"

Your tax registration unique identifier.

" + }, + "registrationType":{ + "shape":"TaxRegistrationType", + "documentation":"

Your tax registration type. This can be either VAT or GST.

" + }, + "sector":{ + "shape":"Sector", + "documentation":"

The industry that describes your business. For business-to-business (B2B) customers, specify Business. For business-to-consumer (B2C) customers, specify Individual. For business-to-government (B2G), specify Government.Note that certain values may not applicable for the request country. Please refer to country specific information in API document.

" + }, + "verificationDetails":{ + "shape":"VerificationDetails", + "documentation":"

Additional details needed to verify your TRN information in Brazil. You only need to specify this parameter when you set a TRN in Brazil that is the CPF tax type.

Don't specify this parameter to set a TRN in Brazil of the CNPJ tax type or to set a TRN for another country.

" + } + }, + "documentation":"

The TRN information you provide when you add a new TRN, or update.

", + "sensitive":true + }, + "TaxRegistrationNumberType":{ + "type":"string", + "enum":[ + "TaxRegistrationNumber", + "LocalRegistrationNumber" + ] + }, + "TaxRegistrationStatus":{ + "type":"string", + "enum":[ + "Verified", + "Pending", + "Deleted", + "Rejected" + ] + }, + "TaxRegistrationType":{ + "type":"string", + "enum":[ + "VAT", + "GST", + "CPF", + "CNPJ", + "SST" + ] + }, + "TaxRegistrationWithJurisdiction":{ + "type":"structure", + "required":[ + "jurisdiction", + "legalName", + "registrationId", + "registrationType", + "status" + ], + "members":{ + "additionalTaxInformation":{ + "shape":"AdditionalInfoResponse", + "documentation":"

Additional tax information associated with your TRN.

" + }, + "certifiedEmailId":{ + "shape":"CertifiedEmailId", + "documentation":"

The email address to receive VAT invoices.

" + }, + "jurisdiction":{ + "shape":"Jurisdiction", + "documentation":"

The jurisdiction associated with your TRN information.

" + }, + "legalName":{ + "shape":"LegalName", + "documentation":"

The legal name associated with your TRN information.

" + }, + "registrationId":{ + "shape":"RegistrationId", + "documentation":"

Your tax registration unique identifier.

" + }, + "registrationType":{ + "shape":"TaxRegistrationType", + "documentation":"

The type of your tax registration. This can be either VAT or GST.

" + }, + "sector":{ + "shape":"Sector", + "documentation":"

The industry that describes your business. For business-to-business (B2B) customers, specify Business. For business-to-consumer (B2C) customers, specify Individual. For business-to-government (B2G), specify Government.Note that certain values may not applicable for the request country. Please refer to country specific information in API document.

" + }, + "status":{ + "shape":"TaxRegistrationStatus", + "documentation":"

The status of your TRN. This can be either Verified, Pending, Deleted, or Rejected.

" + }, + "taxDocumentMetadatas":{ + "shape":"TaxDocumentMetadatas", + "documentation":"

The metadata for your tax document.

" + } + }, + "documentation":"

Your TRN information with jurisdiction details. This doesn't contain the full legal address associated with the TRN information.

", + "sensitive":true + }, + "TurkeyAdditionalInfo":{ + "type":"structure", + "members":{ + "industries":{ + "shape":"Industries", + "documentation":"

The industry information that tells the Tax Settings API if you're subject to additional withholding taxes. This information required for business-to-business (B2B) customers. This information is conditionally mandatory for B2B customers who are subject to KDV tax.

" + }, + "kepEmailId":{ + "shape":"KepEmailId", + "documentation":"

The Registered Electronic Mail (REM) that is used to send notarized communication. This parameter is optional for business-to-business (B2B) and business-to-government (B2G) customers. It's not required for business-to-consumer (B2C) customers.

" + }, + "secondaryTaxId":{ + "shape":"SecondaryTaxId", + "documentation":"

Secondary tax ID (“harcama birimi VKN”si”). If one isn't provided, we will use your VKN as the secondary ID.

" + }, + "taxOffice":{ + "shape":"TaxOffice", + "documentation":"

The tax office where you're registered. You can enter this information as a string. The Tax Settings API will add this information to your invoice. This parameter is required for business-to-business (B2B) and business-to-government customers. It's not required for business-to-consumer (B2C) customers.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Turkey.

" + }, + "UkraineAdditionalInfo":{ + "type":"structure", + "required":["ukraineTrnType"], + "members":{ + "ukraineTrnType":{ + "shape":"UkraineTrnType", + "documentation":"

The tax registration type.

" + } + }, + "documentation":"

Additional tax information associated with your TRN in Ukraine.

" + }, + "UkraineTrnType":{ + "type":"string", + "enum":[ + "Business", + "Individual" + ] + }, + "ValidationException":{ + "type":"structure", + "required":[ + "errorCode", + "message" + ], + "members":{ + "errorCode":{ + "shape":"ValidationExceptionErrorCode", + "documentation":"

400

" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

400

" + }, + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The exception when the input doesn't pass validation for at least one of the input parameters.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionErrorCode":{ + "type":"string", + "enum":[ + "MalformedToken", + "ExpiredToken", + "InvalidToken", + "FieldValidationFailed", + "MissingInput" + ] + }, + "ValidationExceptionField":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"FieldName", + "documentation":"

The name of the parameter that caused a ValidationException error.

" + } + }, + "documentation":"

The information about the specified parameter in the request that caused an error.

" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "VerificationDetails":{ + "type":"structure", + "members":{ + "dateOfBirth":{ + "shape":"DateOfBirth", + "documentation":"

Date of birth to verify your submitted TRN. Use the YYYY-MM-DD format.

" + }, + "taxRegistrationDocuments":{ + "shape":"TaxRegistrationDocuments", + "documentation":"

The tax registration document, which is required for specific countries such as Bangladesh, Kenya, South Korea and Spain.

" + } + }, + "documentation":"

Required information to verify your TRN.

" + } + }, + "documentation":"

You can use the tax setting API to programmatically set, modify, and delete the tax registration number (TRN), associated business legal name, and address (Collectively referred to as \"TRN information\"). You can also programmatically view TRN information and tax addresses (\"Tax profiles\").

You can use this API to automate your TRN information settings instead of manually using the console.

Service Endpoint

  • https://tax.us-east-1.amazonaws.com

" +} diff --git a/botocore/data/timestream-query/2018-11-01/service-2.json b/botocore/data/timestream-query/2018-11-01/service-2.json index 51a7a97cdf..2d8acf6c5c 100644 --- a/botocore/data/timestream-query/2018-11-01/service-2.json +++ b/botocore/data/timestream-query/2018-11-01/service-2.json @@ -12,7 +12,8 @@ "signatureVersion":"v4", "signingName":"timestream", "targetPrefix":"Timestream_20181101", - "uid":"timestream-query-2018-11-01" + "uid":"timestream-query-2018-11-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "CancelQuery":{ @@ -1581,7 +1582,7 @@ }, "QueryPricingModel":{ "shape":"QueryPricingModel", - "documentation":"

The pricing model for queries in an account.

" + "documentation":"

The pricing model for queries in an account.

The QueryPricingModel parameter is used by several Timestream operations; however, the UpdateAccountSettings API operation doesn't recognize any values other than COMPUTE_UNITS.

" } } }, diff --git a/botocore/data/tnb/2008-10-21/endpoint-rule-set-1.json b/botocore/data/tnb/2008-10-21/endpoint-rule-set-1.json index bdbdea94cf..836b7d68d1 100644 --- a/botocore/data/tnb/2008-10-21/endpoint-rule-set-1.json +++ b/botocore/data/tnb/2008-10-21/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -59,7 +58,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -87,13 +85,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -106,7 +105,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -120,7 +118,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -143,7 +140,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -178,11 +174,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -193,16 +187,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -216,14 +213,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -232,15 +227,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -251,16 +245,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -274,7 +271,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -294,11 +290,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -309,20 +303,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -333,18 +329,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] } \ No newline at end of file diff --git a/botocore/data/tnb/2008-10-21/service-2.json b/botocore/data/tnb/2008-10-21/service-2.json index 3c410d46c4..617899a976 100644 --- a/botocore/data/tnb/2008-10-21/service-2.json +++ b/botocore/data/tnb/2008-10-21/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"tnb", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Telco Network Builder", "serviceId":"tnb", "signatureVersion":"v4", "signingName":"tnb", - "uid":"tnb-2008-10-21" + "uid":"tnb-2008-10-21", + "auth":["aws.auth#sigv4"] }, "operations":{ "CancelSolNetworkOperation":{ @@ -154,7 +156,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets the details of a network function instance, including the instantation state and metadata from the function package descriptor in the network function package.

A network function instance is a function in a function package .

" + "documentation":"

Gets the details of a network function instance, including the instantiation state and metadata from the function package descriptor in the network function package.

A network function instance is a function in a function package .

" }, "GetSolFunctionPackage":{ "name":"GetSolFunctionPackage", @@ -552,7 +554,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Update a network instance.

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.

" + "documentation":"

Update a network instance.

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.

Choose the updateType parameter to target the necessary update of the network instance.

" }, "UpdateSolNetworkPackage":{ "name":"UpdateSolNetworkPackage", @@ -1236,9 +1238,21 @@ "shape":"SyntheticTimestamp_date_time", "documentation":"

The date that the resource was created.

" }, + "instantiateMetadata":{ + "shape":"InstantiateMetadata", + "documentation":"

Metadata related to the network operation occurrence for network instantiation. This is populated only if the lcmOperationType is INSTANTIATE.

" + }, "lastModified":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The date that the resource was last modified.

" + }, + "modifyVnfInfoMetadata":{ + "shape":"ModifyVnfInfoMetadata", + "documentation":"

Metadata related to the network operation occurrence for network function updates in a network instance. This is populated only if the lcmOperationType is UPDATE and the updateType is MODIFY_VNF_INFORMATION.

" + }, + "updateNsMetadata":{ + "shape":"UpdateNsMetadata", + "documentation":"

Metadata related to the network operation occurrence for network instance updates. This is populated only if the lcmOperationType is UPDATE and the updateType is UPDATE_NS.

" } }, "documentation":"

Metadata related to a network operation occurrence.

A network operation is any operation that is done to your network, such as network instance instantiation or termination.

" @@ -1282,6 +1296,10 @@ "tasks":{ "shape":"GetSolNetworkOperationTasksList", "documentation":"

All tasks associated with this operation occurrence.

" + }, + "updateType":{ + "shape":"UpdateSolNetworkType", + "documentation":"

Type of the update. Only present if the network operation lcmOperationType is UPDATE.

" } } }, @@ -1522,6 +1540,21 @@ }, "documentation":"

The metadata of a network function.

A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, update, and delete) can be performed.

" }, + "InstantiateMetadata":{ + "type":"structure", + "required":["nsdInfoId"], + "members":{ + "additionalParamsForNs":{ + "shape":"Document", + "documentation":"

The configurable properties used during instantiation.

" + }, + "nsdInfoId":{ + "shape":"NsdInfoId", + "documentation":"

The network service descriptor used for instantiating the network instance.

" + } + }, + "documentation":"

Metadata related to the configuration properties used during instantiation of the network instance.

" + }, "InstantiateSolNetworkInstanceInput":{ "type":"structure", "required":["nsInstanceId"], @@ -1544,7 +1577,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" } } }, @@ -1558,7 +1591,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" } } }, @@ -1948,6 +1981,10 @@ "operationState":{ "shape":"NsLcmOperationState", "documentation":"

The state of the network operation.

" + }, + "updateType":{ + "shape":"UpdateSolNetworkType", + "documentation":"

Type of the update. Only present if the network operation lcmOperationType is UPDATE.

" } }, "documentation":"

Information parameters for a network operation.

" @@ -1966,6 +2003,12 @@ "documentation":"

The token for the next page of results.

", "location":"querystring", "locationName":"nextpage_opaque_marker" + }, + "nsInstanceId":{ + "shape":"NsInstanceId", + "documentation":"

Network instance id filter, to retrieve network operations associated to a network instance.

", + "location":"querystring", + "locationName":"nsInstanceId" } } }, @@ -1989,6 +2032,14 @@ "lastModified":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The date that the resource was last modified.

" + }, + "nsdInfoId":{ + "shape":"NsdInfoId", + "documentation":"

The network service descriptor id used for the operation.

Only present if the updateType is UPDATE_NS.

" + }, + "vnfInstanceId":{ + "shape":"VnfInstanceId", + "documentation":"

The network function id used for the operation.

Only present if the updateType is MODIFY_VNF_INFO.

" } }, "documentation":"

Metadata related to a network operation.

A network operation is any operation that is done to your network, such as network instance instantiation or termination.

" @@ -2153,6 +2204,24 @@ } } }, + "ModifyVnfInfoMetadata":{ + "type":"structure", + "required":[ + "vnfConfigurableProperties", + "vnfInstanceId" + ], + "members":{ + "vnfConfigurableProperties":{ + "shape":"Document", + "documentation":"

The configurable properties used during update of the network function instance.

" + }, + "vnfInstanceId":{ + "shape":"VnfInstanceId", + "documentation":"

The network function instance that was updated in the network instance.

" + } + }, + "documentation":"

Metadata related to the configuration properties used during update of a specific network function in a network instance.

" + }, "NetworkArtifactMeta":{ "type":"structure", "members":{ @@ -2194,10 +2263,13 @@ "enum":[ "INSTANTIATED", "NOT_INSTANTIATED", + "UPDATED", "IMPAIRED", + "UPDATE_FAILED", "STOPPED", "DELETED", "INSTANTIATE_IN_PROGRESS", + "INTENT_TO_UPDATE_IN_PROGRESS", "UPDATE_IN_PROGRESS", "TERMINATE_IN_PROGRESS" ] @@ -2289,7 +2361,7 @@ "locationName":"Content-Type" }, "file":{ - "shape":"Blob", + "shape":"SensitiveBlob", "documentation":"

Function package file.

" }, "vnfPkgId":{ @@ -2359,7 +2431,7 @@ "locationName":"Content-Type" }, "file":{ - "shape":"Blob", + "shape":"SensitiveBlob", "documentation":"

Network package file.

" }, "nsdInfoId":{ @@ -2433,6 +2505,10 @@ }, "exception":true }, + "SensitiveBlob":{ + "type":"blob", + "sensitive":true + }, "ServiceQuotaExceededException":{ "type":"structure", "required":["message"], @@ -2531,7 +2607,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" } } }, @@ -2544,7 +2620,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" } } }, @@ -2601,6 +2677,21 @@ "members":{ } }, + "UpdateNsMetadata":{ + "type":"structure", + "required":["nsdInfoId"], + "members":{ + "additionalParamsForNs":{ + "shape":"Document", + "documentation":"

The configurable properties used during update.

" + }, + "nsdInfoId":{ + "shape":"NsdInfoId", + "documentation":"

The network service descriptor used for updating the network instance.

" + } + }, + "documentation":"

Metadata related to the configuration properties used during update of a network instance.

" + }, "UpdateSolFunctionPackageInput":{ "type":"structure", "required":[ @@ -2639,7 +2730,7 @@ "members":{ "modifyVnfInfoData":{ "shape":"UpdateSolNetworkModify", - "documentation":"

Identifies the network function information parameters and/or the configurable properties of the network function to be modified.

" + "documentation":"

Identifies the network function information parameters and/or the configurable properties of the network function to be modified.

Include this property only if the update type is MODIFY_VNF_INFORMATION.

" }, "nsInstanceId":{ "shape":"NsInstanceId", @@ -2649,11 +2740,15 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + }, + "updateNs":{ + "shape":"UpdateSolNetworkServiceData", + "documentation":"

Identifies the network service descriptor and the configurable properties of the descriptor, to be used for the update.

Include this property only if the update type is UPDATE_NS.

" }, "updateType":{ "shape":"UpdateSolNetworkType", - "documentation":"

The type of update.

" + "documentation":"

The type of update.

  • Use the MODIFY_VNF_INFORMATION update type, to update a specific network function configuration, in the network instance.

  • Use the UPDATE_NS update type, to update the network instance to a new network service descriptor.

" } } }, @@ -2666,7 +2761,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are transferred to the network operation that is created. Use tags to search and filter your resources or track your Amazon Web Services costs.

" + "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. When you use this API, the tags are only applied to the network operation that is created. These tags are not applied to the network instance. Use tags to search and filter your resources or track your Amazon Web Services costs.

" } } }, @@ -2717,9 +2812,27 @@ } } }, + "UpdateSolNetworkServiceData":{ + "type":"structure", + "required":["nsdInfoId"], + "members":{ + "additionalParamsForNs":{ + "shape":"Document", + "documentation":"

Values for the configurable properties declared in the network service descriptor.

" + }, + "nsdInfoId":{ + "shape":"NsdInfoId", + "documentation":"

ID of the network service descriptor.

" + } + }, + "documentation":"

Information parameters and/or the configurable properties for a network descriptor used for update.

" + }, "UpdateSolNetworkType":{ "type":"string", - "enum":["MODIFY_VNF_INFORMATION"] + "enum":[ + "MODIFY_VNF_INFORMATION", + "UPDATE_NS" + ] }, "UsageState":{ "type":"string", @@ -2742,7 +2855,7 @@ "locationName":"Content-Type" }, "file":{ - "shape":"Blob", + "shape":"SensitiveBlob", "documentation":"

Function package file.

" }, "vnfPkgId":{ @@ -2812,7 +2925,7 @@ "locationName":"Content-Type" }, "file":{ - "shape":"Blob", + "shape":"SensitiveBlob", "documentation":"

Network package file.

" }, "nsdInfoId":{ diff --git a/botocore/data/transfer/2018-11-05/service-2.json b/botocore/data/transfer/2018-11-05/service-2.json index fca73962e9..3422efdcb6 100644 --- a/botocore/data/transfer/2018-11-05/service-2.json +++ b/botocore/data/transfer/2018-11-05/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"transfer", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"AWS Transfer", "serviceFullName":"AWS Transfer Family", "serviceId":"Transfer", @@ -118,7 +119,8 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ResourceExistsException"} ], - "documentation":"

Creates a user and associates them with an existing file transfer protocol-enabled server. You can only create and associate users with servers that have the IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, you can specify the user name, set the home directory, store the user's public key, and assign the user's Identity and Access Management (IAM) role. You can also optionally add a session policy, and assign metadata with tags that can be used to group and search for users.

" + "documentation":"

Creates a user and associates them with an existing file transfer protocol-enabled server. You can only create and associate users with servers that have the IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, you can specify the user name, set the home directory, store the user's public key, and assign the user's Identity and Access Management (IAM) role. You can also optionally add a session policy, and assign metadata with tags that can be used to group and search for users.

", + "idempotent":true }, "CreateWorkflow":{ "name":"CreateWorkflow", @@ -166,7 +168,8 @@ {"shape":"InternalServiceError"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Delete the agreement that's specified in the provided AgreementId.

" + "documentation":"

Delete the agreement that's specified in the provided AgreementId.

", + "idempotent":true }, "DeleteCertificate":{ "name":"DeleteCertificate", @@ -181,7 +184,8 @@ {"shape":"InternalServiceError"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Deletes the certificate that's specified in the CertificateId parameter.

" + "documentation":"

Deletes the certificate that's specified in the CertificateId parameter.

", + "idempotent":true }, "DeleteConnector":{ "name":"DeleteConnector", @@ -196,7 +200,8 @@ {"shape":"InternalServiceError"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Deletes the connector that's specified in the provided ConnectorId.

" + "documentation":"

Deletes the connector that's specified in the provided ConnectorId.

", + "idempotent":true }, "DeleteHostKey":{ "name":"DeleteHostKey", @@ -227,7 +232,8 @@ {"shape":"InternalServiceError"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Deletes the profile that's specified in the ProfileId parameter.

" + "documentation":"

Deletes the profile that's specified in the ProfileId parameter.

", + "idempotent":true }, "DeleteServer":{ "name":"DeleteServer", @@ -243,7 +249,8 @@ {"shape":"ServiceUnavailableException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Deletes the file transfer protocol-enabled server that you specify.

No response returns from this operation.

" + "documentation":"

Deletes the file transfer protocol-enabled server that you specify.

No response returns from this operation.

", + "idempotent":true }, "DeleteSshPublicKey":{ "name":"DeleteSshPublicKey", @@ -274,7 +281,8 @@ {"shape":"InternalServiceError"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Deletes the user belonging to a file transfer protocol-enabled server you specify.

No response returns from this operation.

When you delete a user from a server, the user's information is lost.

" + "documentation":"

Deletes the user belonging to a file transfer protocol-enabled server you specify.

No response returns from this operation.

When you delete a user from a server, the user's information is lost.

", + "idempotent":true }, "DeleteWorkflow":{ "name":"DeleteWorkflow", @@ -290,7 +298,8 @@ {"shape":"ServiceUnavailableException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Deletes the specified workflow.

" + "documentation":"

Deletes the specified workflow.

", + "idempotent":true }, "DescribeAccess":{ "name":"DescribeAccess", @@ -2188,7 +2197,7 @@ }, "Usage":{ "shape":"CertificateUsageType", - "documentation":"

Specifies whether this certificate is used for signing or encryption.

" + "documentation":"

Specifies how this certificate is used. It can be used in the following ways:

  • SIGNING: For signing AS2 messages

  • ENCRYPTION: For encrypting AS2 messages

  • TLS: For securing AS2 communications sent over HTTPS

" }, "Status":{ "shape":"CertificateStatusType", @@ -2446,7 +2455,7 @@ }, "Domain":{ "shape":"Domain", - "documentation":"

Specifies the domain of the storage system that is used for file transfers.

" + "documentation":"

Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3.

" }, "EndpointDetails":{ "shape":"EndpointDetails", @@ -2935,7 +2944,7 @@ "members":{ "Usage":{ "shape":"CertificateUsageType", - "documentation":"

Specifies whether this certificate is used for signing or encryption.

" + "documentation":"

Specifies how this certificate is used. It can be used in the following ways:

  • SIGNING: For signing AS2 messages

  • ENCRYPTION: For encrypting AS2 messages

  • TLS: For securing AS2 communications sent over HTTPS

" }, "Certificate":{ "shape":"CertificateBodyType", @@ -3572,7 +3581,7 @@ }, "Usage":{ "shape":"CertificateUsageType", - "documentation":"

Specifies whether this certificate is used for signing or encryption.

" + "documentation":"

Specifies how this certificate is used. It can be used in the following ways:

  • SIGNING: For signing AS2 messages

  • ENCRYPTION: For encrypting AS2 messages

  • TLS: For securing AS2 communications sent over HTTPS

" }, "Status":{ "shape":"CertificateStatusType", @@ -3720,7 +3729,7 @@ }, "Domain":{ "shape":"Domain", - "documentation":"

Specifies the domain of the storage system that is used for file transfers.

" + "documentation":"

Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3.

" }, "IdentityProviderType":{ "shape":"IdentityProviderType", @@ -4286,7 +4295,8 @@ }, "ServiceManagedEgressIpAddresses":{ "type":"list", - "member":{"shape":"ServiceManagedEgressIpAddress"} + "member":{"shape":"ServiceManagedEgressIpAddress"}, + "documentation":"

The list of egress IP addresses of this server. These IP addresses are only relevant for servers that use the AS2 protocol. They are used for sending asynchronous MDNs. These IP addresses are assigned automatically when you create an AS2 server. Additionally, if you update an existing server and add the AS2 protocol, static IP addresses are assigned as well.

" }, "ServiceMetadata":{ "type":"structure", @@ -4403,7 +4413,8 @@ "SshPublicKeyBody":{ "type":"string", "max":2048, - "min":0 + "min":0, + "pattern":"\\s*(ssh|ecdsa)-[a-z0-9-]+[ \\t]+(([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{1,3})?(={0,3})?)(\\s*|[ \\t]+[\\S \\t]*\\s*)" }, "SshPublicKeyCount":{ "type":"integer", diff --git a/botocore/data/verifiedpermissions/2021-12-01/service-2.json b/botocore/data/verifiedpermissions/2021-12-01/service-2.json index 0f70e2ca26..4c63db0d37 100644 --- a/botocore/data/verifiedpermissions/2021-12-01/service-2.json +++ b/botocore/data/verifiedpermissions/2021-12-01/service-2.json @@ -65,7 +65,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP).

After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken operation. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens and access tokens, and Verified Permissions can use either or both. Any combination of identity and access tokens results in the same Cedar principal. Verified Permissions automatically translates the information about the identities into the standard Cedar attributes that can be evaluated by your policies. Because the Amazon Cognito identity and access tokens can contain different information, the tokens you choose to use determine which principal attributes are available to access when evaluating Cedar policies.

If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

To reference a user from this identity source in your Cedar policies, use the following syntax.

IdentityType::\"<CognitoUserPoolIdentifier>|<CognitoClientId>

Where IdentityType is the string that you provide to the PrincipalEntityType parameter for this operation. The CognitoUserPoolId and CognitoClientId are defined by the Amazon Cognito user pool.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations.

", + "documentation":"

Adds an identity source to a policy store–an Amazon Cognito user pool or OpenID Connect (OIDC) identity provider (IdP).

After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken or BatchIsAuthorizedWithToken API operations. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Identity sources provide identity (ID) tokens and access tokens. Verified Permissions derives information about your user and session from token claims. Access tokens provide action context to your policies, and ID tokens provide principal Attributes.

Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store

To reference a user from this identity source in your Cedar policies, refer to the following syntax examples.

  • Amazon Cognito user pool: Namespace::[Entity type]::[User pool ID]|[user principal attribute], for example MyCorp::User::us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111.

  • OpenID Connect (OIDC) provider: Namespace::[Entity type]::[principalIdClaim]|[user principal attribute], for example MyCorp::User::MyOIDCProvider|a1b2c3d4-5678-90ab-cdef-EXAMPLE22222.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations.

", "idempotent":true }, "CreatePolicy":{ @@ -318,7 +318,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source in the form of an identity token formatted as a JSON web token (JWT). The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision.

At this time, Verified Permissions accepts tokens from only Amazon Cognito.

Verified Permissions validates each token that is specified in a request by checking its expiration date and its signature.

If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

" + "documentation":"

Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source in the form of an identity token formatted as a JSON web token (JWT). The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision.

At this time, Verified Permissions accepts tokens from only Amazon Cognito.

Verified Permissions validates each token that is specified in a request by checking its expiration date and its signature.

Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store

" }, "ListIdentitySources":{ "name":"ListIdentitySources", @@ -423,7 +423,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations.

", + "documentation":"

Updates the specified identity source to use a new identity provider (IdP), or to change the mapping of identities from the IdP to a different principal entity type.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations.

", "idempotent":true }, "UpdatePolicy":{ @@ -562,6 +562,17 @@ "documentation":"

The value of an attribute.

Contains information about the runtime context for a request for which an authorization decision is made.

This data type is used as a member of the ContextDefinition structure which is uses as a request parameter for the IsAuthorized, BatchIsAuthorized, and IsAuthorizedWithToken operations.

", "union":true }, + "Audience":{ + "type":"string", + "max":255, + "min":1 + }, + "Audiences":{ + "type":"list", + "member":{"shape":"Audience"}, + "max":255, + "min":1 + }, "BatchIsAuthorizedInput":{ "type":"structure", "required":[ @@ -759,6 +770,11 @@ "box":true, "sensitive":true }, + "Claim":{ + "type":"string", + "min":1, + "sensitive":true + }, "ClientId":{ "type":"string", "max":255, @@ -820,7 +836,7 @@ "documentation":"

The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source.

" } }, - "documentation":"

The configuration for an identity source that represents a connection to an Amazon Cognito user pool used as an identity provider for Verified Permissions.

This data type is used as a field that is part of an Configuration structure that is used as a parameter to CreateIdentitySource.

Example:\"CognitoUserPoolConfiguration\":{\"UserPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"ClientIds\": [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}

" + "documentation":"

The configuration for an identity source that represents a connection to an Amazon Cognito user pool used as an identity provider for Verified Permissions.

This data type part of a Configuration structure that is used as a parameter to CreateIdentitySource.

Example:\"CognitoUserPoolConfiguration\":{\"UserPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"ClientIds\": [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}

" }, "CognitoUserPoolConfigurationDetail":{ "type":"structure", @@ -882,9 +898,13 @@ "cognitoUserPoolConfiguration":{ "shape":"CognitoUserPoolConfiguration", "documentation":"

Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool and one or more application client IDs.

Example: \"configuration\":{\"cognitoUserPoolConfiguration\":{\"userPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"clientIds\": [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}}

" + }, + "openIdConnectConfiguration":{ + "shape":"OpenIdConnectConfiguration", + "documentation":"

Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details.

Example:\"configuration\":{\"openIdConnectConfiguration\":{\"issuer\":\"https://auth.example.com\",\"tokenSelection\":{\"accessTokenOnly\":{\"audiences\":[\"https://myapp.example.com\",\"https://myapp2.example.com\"],\"principalIdClaim\":\"sub\"}},\"entityIdPrefix\":\"MyOIDCProvider\",\"groupConfiguration\":{\"groupClaim\":\"groups\",\"groupEntityType\":\"MyCorp::UserGroup\"}}}

" } }, - "documentation":"

Contains configuration information used when creating a new identity source.

At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.

Specifies a userPoolArn, a groupConfiguration, and a ClientId.

This data type is used as a request parameter for the CreateIdentitySource operation.

", + "documentation":"

Contains configuration information used when creating a new identity source.

This data type is used as a request parameter for the CreateIdentitySource operation.

", "union":true }, "ConfigurationDetail":{ @@ -893,6 +913,10 @@ "cognitoUserPoolConfiguration":{ "shape":"CognitoUserPoolConfigurationDetail", "documentation":"

Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool, the policy store entity that you want to assign to user groups, and one or more application client IDs.

Example: \"configuration\":{\"cognitoUserPoolConfiguration\":{\"userPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"clientIds\": [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}}

" + }, + "openIdConnectConfiguration":{ + "shape":"OpenIdConnectConfigurationDetail", + "documentation":"

Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details.

Example:\"configuration\":{\"openIdConnectConfiguration\":{\"issuer\":\"https://auth.example.com\",\"tokenSelection\":{\"accessTokenOnly\":{\"audiences\":[\"https://myapp.example.com\",\"https://myapp2.example.com\"],\"principalIdClaim\":\"sub\"}},\"entityIdPrefix\":\"MyOIDCProvider\",\"groupConfiguration\":{\"groupClaim\":\"groups\",\"groupEntityType\":\"MyCorp::UserGroup\"}}}

" } }, "documentation":"

Contains configuration information about an identity source.

This data type is a response parameter to the GetIdentitySource operation.

", @@ -904,6 +928,10 @@ "cognitoUserPoolConfiguration":{ "shape":"CognitoUserPoolConfigurationItem", "documentation":"

Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool, the policy store entity that you want to assign to user groups, and one or more application client IDs.

Example: \"configuration\":{\"cognitoUserPoolConfiguration\":{\"userPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"clientIds\": [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}}

" + }, + "openIdConnectConfiguration":{ + "shape":"OpenIdConnectConfigurationItem", + "documentation":"

Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details.

Example:\"configuration\":{\"openIdConnectConfiguration\":{\"issuer\":\"https://auth.example.com\",\"tokenSelection\":{\"accessTokenOnly\":{\"audiences\":[\"https://myapp.example.com\",\"https://myapp2.example.com\"],\"principalIdClaim\":\"sub\"}},\"entityIdPrefix\":\"MyOIDCProvider\",\"groupConfiguration\":{\"groupClaim\":\"groups\",\"groupEntityType\":\"MyCorp::UserGroup\"}}}

" } }, "documentation":"

Contains configuration information about an identity source.

This data type is a response parameter to the ListIdentitySources operation.

", @@ -939,7 +967,8 @@ "ContextMap":{ "type":"map", "key":{"shape":"String"}, - "value":{"shape":"AttributeValue"} + "value":{"shape":"AttributeValue"}, + "sensitive":true }, "CreateIdentitySourceInput":{ "type":"structure", @@ -959,7 +988,7 @@ }, "configuration":{ "shape":"Configuration", - "documentation":"

Specifies the details required to communicate with the identity provider (IdP) associated with this identity source.

At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.

You must specify a UserPoolArn, and optionally, a ClientId.

" + "documentation":"

Specifies the details required to communicate with the identity provider (IdP) associated with this identity source.

" }, "principalEntityType":{ "shape":"PrincipalEntityType", @@ -1295,6 +1324,12 @@ "pattern":".*", "sensitive":true }, + "EntityIdPrefix":{ + "type":"string", + "max":100, + "min":1, + "sensitive":true + }, "EntityIdentifier":{ "type":"structure", "required":[ @@ -1327,7 +1362,7 @@ }, "parents":{ "shape":"ParentList", - "documentation":"

The parents in the hierarchy that contains the entity.

" + "documentation":"

The parent entities in the hierarchy that contains the entity. A principal or resource entity can be defined with at most 99 transitive parents per authorization request.

A transitive parent is an entity in the hierarchy of entities including all direct parents, and parents of parents. For example, a user can be a member of 91 groups if one of those groups is a member of eight groups, for a total of 100: one entity, 91 entity parents, and eight parents of parents.

" } }, "documentation":"

Contains information about an entity that can be referenced in a Cedar policy.

This data type is used as one of the fields in the EntitiesDefinition structure.

{ \"identifier\": { \"entityType\": \"Photo\", \"entityId\": \"VacationPhoto94.jpg\" }, \"attributes\": {}, \"parents\": [ { \"entityType\": \"Album\", \"entityId\": \"alice_folder\" } ] }

" @@ -2081,15 +2116,274 @@ "min":1, "pattern":"[A-Za-z0-9-_=+/\\.]*" }, + "OpenIdConnectAccessTokenConfiguration":{ + "type":"structure", + "members":{ + "principalIdClaim":{ + "shape":"Claim", + "documentation":"

The claim that determines the principal in OIDC access tokens. For example, sub.

" + }, + "audiences":{ + "shape":"Audiences", + "documentation":"

The access token aud claim values that you want to accept in your policy store. For example, https://myapp.example.com, https://myapp2.example.com.

" + } + }, + "documentation":"

The configuration of an OpenID Connect (OIDC) identity source for handling access token claims. Contains the claim that you want to identify as the principal in an authorization request, and the values of the aud claim, or audiences, that you want to accept.

This data type is part of a OpenIdConnectTokenSelection structure, which is a parameter of CreateIdentitySource.

" + }, + "OpenIdConnectAccessTokenConfigurationDetail":{ + "type":"structure", + "members":{ + "principalIdClaim":{ + "shape":"Claim", + "documentation":"

The claim that determines the principal in OIDC access tokens. For example, sub.

" + }, + "audiences":{ + "shape":"Audiences", + "documentation":"

The access token aud claim values that you want to accept in your policy store. For example, https://myapp.example.com, https://myapp2.example.com.

" + } + }, + "documentation":"

The configuration of an OpenID Connect (OIDC) identity source for handling access token claims. Contains the claim that you want to identify as the principal in an authorization request, and the values of the aud claim, or audiences, that you want to accept.

This data type is part of a OpenIdConnectTokenSelectionDetail structure, which is a parameter of GetIdentitySource.

" + }, + "OpenIdConnectAccessTokenConfigurationItem":{ + "type":"structure", + "members":{ + "principalIdClaim":{ + "shape":"Claim", + "documentation":"

The claim that determines the principal in OIDC access tokens. For example, sub.

" + }, + "audiences":{ + "shape":"Audiences", + "documentation":"

The access token aud claim values that you want to accept in your policy store. For example, https://myapp.example.com, https://myapp2.example.com.

" + } + }, + "documentation":"

The configuration of an OpenID Connect (OIDC) identity source for handling access token claims. Contains the claim that you want to identify as the principal in an authorization request, and the values of the aud claim, or audiences, that you want to accept.

This data type is part of a OpenIdConnectTokenSelectionItem structure, which is a parameter of ListIdentitySources.

" + }, + "OpenIdConnectConfiguration":{ + "type":"structure", + "required":[ + "issuer", + "tokenSelection" + ], + "members":{ + "issuer":{ + "shape":"Issuer", + "documentation":"

The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery endpoint at the path .well-known/openid-configuration.

" + }, + "entityIdPrefix":{ + "shape":"EntityIdPrefix", + "documentation":"

A descriptive string that you want to prefix to user entities from your OIDC identity provider. For example, if you set an entityIdPrefix of MyOIDCProvider, you can reference principals in your policies in the format MyCorp::User::MyOIDCProvider|Carlos.

" + }, + "groupConfiguration":{ + "shape":"OpenIdConnectGroupConfiguration", + "documentation":"

The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup.

" + }, + "tokenSelection":{ + "shape":"OpenIdConnectTokenSelection", + "documentation":"

The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source.

" + } + }, + "documentation":"

Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details.

This data type is part of a Configuration structure, which is a parameter to CreateIdentitySource.

" + }, + "OpenIdConnectConfigurationDetail":{ + "type":"structure", + "required":[ + "issuer", + "tokenSelection" + ], + "members":{ + "issuer":{ + "shape":"Issuer", + "documentation":"

The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery endpoint at the path .well-known/openid-configuration.

" + }, + "entityIdPrefix":{ + "shape":"EntityIdPrefix", + "documentation":"

A descriptive string that you want to prefix to user entities from your OIDC identity provider. For example, if you set an entityIdPrefix of MyOIDCProvider, you can reference principals in your policies in the format MyCorp::User::MyOIDCProvider|Carlos.

" + }, + "groupConfiguration":{ + "shape":"OpenIdConnectGroupConfigurationDetail", + "documentation":"

The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup.

" + }, + "tokenSelection":{ + "shape":"OpenIdConnectTokenSelectionDetail", + "documentation":"

The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source.

" + } + }, + "documentation":"

Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details.

This data type is part of a ConfigurationDetail structure, which is a parameter to GetIdentitySource.

" + }, + "OpenIdConnectConfigurationItem":{ + "type":"structure", + "required":[ + "issuer", + "tokenSelection" + ], + "members":{ + "issuer":{ + "shape":"Issuer", + "documentation":"

The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery endpoint at the path .well-known/openid-configuration.

" + }, + "entityIdPrefix":{ + "shape":"EntityIdPrefix", + "documentation":"

A descriptive string that you want to prefix to user entities from your OIDC identity provider. For example, if you set an entityIdPrefix of MyOIDCProvider, you can reference principals in your policies in the format MyCorp::User::MyOIDCProvider|Carlos.

" + }, + "groupConfiguration":{ + "shape":"OpenIdConnectGroupConfigurationItem", + "documentation":"

The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup.

" + }, + "tokenSelection":{ + "shape":"OpenIdConnectTokenSelectionItem", + "documentation":"

The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source.

" + } + }, + "documentation":"

Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details.

This data type is part of a ConfigurationItem structure, which is a parameter to ListIdentitySources.

" + }, + "OpenIdConnectGroupConfiguration":{ + "type":"structure", + "required":[ + "groupClaim", + "groupEntityType" + ], + "members":{ + "groupClaim":{ + "shape":"Claim", + "documentation":"

The token claim that you want Verified Permissions to interpret as group membership. For example, groups.

" + }, + "groupEntityType":{ + "shape":"GroupEntityType", + "documentation":"

The policy store entity type that you want to map your users' group claim to. For example, MyCorp::UserGroup. A group entity type is an entity that can have a user entity type as a member.

" + } + }, + "documentation":"

The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup.

This data type is part of a OpenIdConnectConfiguration structure, which is a parameter of CreateIdentitySource.

" + }, + "OpenIdConnectGroupConfigurationDetail":{ + "type":"structure", + "required":[ + "groupClaim", + "groupEntityType" + ], + "members":{ + "groupClaim":{ + "shape":"Claim", + "documentation":"

The token claim that you want Verified Permissions to interpret as group membership. For example, groups.

" + }, + "groupEntityType":{ + "shape":"GroupEntityType", + "documentation":"

The policy store entity type that you want to map your users' group claim to. For example, MyCorp::UserGroup. A group entity type is an entity that can have a user entity type as a member.

" + } + }, + "documentation":"

The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup.

This data type is part of a OpenIdConnectConfigurationDetail structure, which is a parameter of GetIdentitySource.

" + }, + "OpenIdConnectGroupConfigurationItem":{ + "type":"structure", + "required":[ + "groupClaim", + "groupEntityType" + ], + "members":{ + "groupClaim":{ + "shape":"Claim", + "documentation":"

The token claim that you want Verified Permissions to interpret as group membership. For example, groups.

" + }, + "groupEntityType":{ + "shape":"GroupEntityType", + "documentation":"

The policy store entity type that you want to map your users' group claim to. For example, MyCorp::UserGroup. A group entity type is an entity that can have a user entity type as a member.

" + } + }, + "documentation":"

The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup.

This data type is part of a OpenIdConnectConfigurationItem structure, which is a parameter of ListIdentitySourcea.

" + }, + "OpenIdConnectIdentityTokenConfiguration":{ + "type":"structure", + "members":{ + "principalIdClaim":{ + "shape":"Claim", + "documentation":"

The claim that determines the principal in OIDC access tokens. For example, sub.

" + }, + "clientIds":{ + "shape":"ClientIds", + "documentation":"

The ID token audience, or client ID, claim values that you want to accept in your policy store from an OIDC identity provider. For example, 1example23456789, 2example10111213.

" + } + }, + "documentation":"

The configuration of an OpenID Connect (OIDC) identity source for handling identity (ID) token claims. Contains the claim that you want to identify as the principal in an authorization request, and the values of the aud claim, or audiences, that you want to accept.

This data type is part of a OpenIdConnectTokenSelection structure, which is a parameter of CreateIdentitySource.

" + }, + "OpenIdConnectIdentityTokenConfigurationDetail":{ + "type":"structure", + "members":{ + "principalIdClaim":{ + "shape":"Claim", + "documentation":"

The claim that determines the principal in OIDC access tokens. For example, sub.

" + }, + "clientIds":{ + "shape":"ClientIds", + "documentation":"

The ID token audience, or client ID, claim values that you want to accept in your policy store from an OIDC identity provider. For example, 1example23456789, 2example10111213.

" + } + }, + "documentation":"

The configuration of an OpenID Connect (OIDC) identity source for handling identity (ID) token claims. Contains the claim that you want to identify as the principal in an authorization request, and the values of the aud claim, or audiences, that you want to accept.

This data type is part of a OpenIdConnectTokenSelectionDetail structure, which is a parameter of GetIdentitySource.

" + }, + "OpenIdConnectIdentityTokenConfigurationItem":{ + "type":"structure", + "members":{ + "principalIdClaim":{ + "shape":"Claim", + "documentation":"

The claim that determines the principal in OIDC access tokens. For example, sub.

" + }, + "clientIds":{ + "shape":"ClientIds", + "documentation":"

The ID token audience, or client ID, claim values that you want to accept in your policy store from an OIDC identity provider. For example, 1example23456789, 2example10111213.

" + } + }, + "documentation":"

The configuration of an OpenID Connect (OIDC) identity source for handling identity (ID) token claims. Contains the claim that you want to identify as the principal in an authorization request, and the values of the aud claim, or audiences, that you want to accept.

This data type is part of a OpenIdConnectTokenSelectionItem structure, which is a parameter of ListIdentitySources.

" + }, + "OpenIdConnectTokenSelection":{ + "type":"structure", + "members":{ + "accessTokenOnly":{ + "shape":"OpenIdConnectAccessTokenConfiguration", + "documentation":"

The OIDC configuration for processing access tokens. Contains allowed audience claims, for example https://auth.example.com, and the claim that you want to map to the principal, for example sub.

" + }, + "identityTokenOnly":{ + "shape":"OpenIdConnectIdentityTokenConfiguration", + "documentation":"

The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID claims, for example 1example23456789, and the claim that you want to map to the principal, for example sub.

" + } + }, + "documentation":"

The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source.

This data type is part of a OpenIdConnectConfiguration structure, which is a parameter of CreateIdentitySource.

", + "union":true + }, + "OpenIdConnectTokenSelectionDetail":{ + "type":"structure", + "members":{ + "accessTokenOnly":{ + "shape":"OpenIdConnectAccessTokenConfigurationDetail", + "documentation":"

The OIDC configuration for processing access tokens. Contains allowed audience claims, for example https://auth.example.com, and the claim that you want to map to the principal, for example sub.

" + }, + "identityTokenOnly":{ + "shape":"OpenIdConnectIdentityTokenConfigurationDetail", + "documentation":"

The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID claims, for example 1example23456789, and the claim that you want to map to the principal, for example sub.

" + } + }, + "documentation":"

The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source.

This data type is part of a OpenIdConnectConfigurationDetail structure, which is a parameter of GetIdentitySource.

", + "union":true + }, + "OpenIdConnectTokenSelectionItem":{ + "type":"structure", + "members":{ + "accessTokenOnly":{ + "shape":"OpenIdConnectAccessTokenConfigurationItem", + "documentation":"

The OIDC configuration for processing access tokens. Contains allowed audience claims, for example https://auth.example.com, and the claim that you want to map to the principal, for example sub.

" + }, + "identityTokenOnly":{ + "shape":"OpenIdConnectIdentityTokenConfigurationItem", + "documentation":"

The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID claims, for example 1example23456789, and the claim that you want to map to the principal, for example sub.

" + } + }, + "documentation":"

The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source.

This data type is part of a OpenIdConnectConfigurationItem structure, which is a parameter of ListIdentitySources.

", + "union":true + }, "OpenIdIssuer":{ "type":"string", "enum":["COGNITO"] }, "ParentList":{ "type":"list", - "member":{"shape":"EntityIdentifier"}, - "max":100, - "min":0 + "member":{"shape":"EntityIdentifier"} }, "PolicyDefinition":{ "type":"structure", @@ -2674,9 +2968,13 @@ "cognitoUserPoolConfiguration":{ "shape":"UpdateCognitoUserPoolConfiguration", "documentation":"

Contains configuration details of a Amazon Cognito user pool.

" + }, + "openIdConnectConfiguration":{ + "shape":"UpdateOpenIdConnectConfiguration", + "documentation":"

Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details.

" } }, - "documentation":"

Contains an updated configuration to replace the configuration in an existing identity source.

At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.

You must specify a userPoolArn, and optionally, a ClientId.

", + "documentation":"

Contains an update to replace the configuration in an existing identity source.

", "union":true }, "UpdateIdentitySourceInput":{ @@ -2732,6 +3030,93 @@ } } }, + "UpdateOpenIdConnectAccessTokenConfiguration":{ + "type":"structure", + "members":{ + "principalIdClaim":{ + "shape":"Claim", + "documentation":"

The claim that determines the principal in OIDC access tokens. For example, sub.

" + }, + "audiences":{ + "shape":"Audiences", + "documentation":"

The access token aud claim values that you want to accept in your policy store. For example, https://myapp.example.com, https://myapp2.example.com.

" + } + }, + "documentation":"

The configuration of an OpenID Connect (OIDC) identity source for handling access token claims. Contains the claim that you want to identify as the principal in an authorization request, and the values of the aud claim, or audiences, that you want to accept.

This data type is part of a UpdateOpenIdConnectTokenSelection structure, which is a parameter to UpdateIdentitySource.

" + }, + "UpdateOpenIdConnectConfiguration":{ + "type":"structure", + "required":[ + "issuer", + "tokenSelection" + ], + "members":{ + "issuer":{ + "shape":"Issuer", + "documentation":"

The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery endpoint at the path .well-known/openid-configuration.

" + }, + "entityIdPrefix":{ + "shape":"EntityIdPrefix", + "documentation":"

A descriptive string that you want to prefix to user entities from your OIDC identity provider. For example, if you set an entityIdPrefix of MyOIDCProvider, you can reference principals in your policies in the format MyCorp::User::MyOIDCProvider|Carlos.

" + }, + "groupConfiguration":{ + "shape":"UpdateOpenIdConnectGroupConfiguration", + "documentation":"

The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup.

" + }, + "tokenSelection":{ + "shape":"UpdateOpenIdConnectTokenSelection", + "documentation":"

The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source.

" + } + }, + "documentation":"

Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details.

This data type is part of a UpdateConfiguration structure, which is a parameter to UpdateIdentitySource.

" + }, + "UpdateOpenIdConnectGroupConfiguration":{ + "type":"structure", + "required":[ + "groupClaim", + "groupEntityType" + ], + "members":{ + "groupClaim":{ + "shape":"Claim", + "documentation":"

The token claim that you want Verified Permissions to interpret as group membership. For example, groups.

" + }, + "groupEntityType":{ + "shape":"GroupEntityType", + "documentation":"

The policy store entity type that you want to map your users' group claim to. For example, MyCorp::UserGroup. A group entity type is an entity that can have a user entity type as a member.

" + } + }, + "documentation":"

The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup.

This data type is part of a UpdateOpenIdConnectConfiguration structure, which is a parameter to UpdateIdentitySource.

" + }, + "UpdateOpenIdConnectIdentityTokenConfiguration":{ + "type":"structure", + "members":{ + "principalIdClaim":{ + "shape":"Claim", + "documentation":"

The claim that determines the principal in OIDC access tokens. For example, sub.

" + }, + "clientIds":{ + "shape":"ClientIds", + "documentation":"

The ID token audience, or client ID, claim values that you want to accept in your policy store from an OIDC identity provider. For example, 1example23456789, 2example10111213.

" + } + }, + "documentation":"

The configuration of an OpenID Connect (OIDC) identity source for handling identity (ID) token claims. Contains the claim that you want to identify as the principal in an authorization request, and the values of the aud claim, or audiences, that you want to accept.

This data type is part of a UpdateOpenIdConnectTokenSelection structure, which is a parameter to UpdateIdentitySource.

" + }, + "UpdateOpenIdConnectTokenSelection":{ + "type":"structure", + "members":{ + "accessTokenOnly":{ + "shape":"UpdateOpenIdConnectAccessTokenConfiguration", + "documentation":"

The OIDC configuration for processing access tokens. Contains allowed audience claims, for example https://auth.example.com, and the claim that you want to map to the principal, for example sub.

" + }, + "identityTokenOnly":{ + "shape":"UpdateOpenIdConnectIdentityTokenConfiguration", + "documentation":"

The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID claims, for example 1example23456789, and the claim that you want to map to the principal, for example sub.

" + } + }, + "documentation":"

The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source.

This data type is part of a UpdateOpenIdConnectConfiguration structure, which is a parameter to UpdateIdentitySource.

", + "union":true + }, "UpdatePolicyDefinition":{ "type":"structure", "members":{ diff --git a/botocore/data/waf-regional/2016-11-28/endpoint-rule-set-1.json b/botocore/data/waf-regional/2016-11-28/endpoint-rule-set-1.json index 91d6cda52e..9117fef38a 100644 --- a/botocore/data/waf-regional/2016-11-28/endpoint-rule-set-1.json +++ b/botocore/data/waf-regional/2016-11-28/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/waf-regional/2016-11-28/service-2.json b/botocore/data/waf-regional/2016-11-28/service-2.json index bbf6036e1a..71e9730153 100644 --- a/botocore/data/waf-regional/2016-11-28/service-2.json +++ b/botocore/data/waf-regional/2016-11-28/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"waf-regional", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"WAF Regional", "serviceFullName":"AWS WAF Regional", "serviceId":"WAF Regional", "signatureVersion":"v4", "targetPrefix":"AWSWAF_Regional_20161128", - "uid":"waf-regional-2016-11-28" + "uid":"waf-regional-2016-11-28", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateWebACL":{ diff --git a/botocore/data/waf/2015-08-24/endpoint-rule-set-1.json b/botocore/data/waf/2015-08-24/endpoint-rule-set-1.json index 93b40c194b..21e2168f12 100644 --- a/botocore/data/waf/2015-08-24/endpoint-rule-set-1.json +++ b/botocore/data/waf/2015-08-24/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -235,7 +233,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -270,7 +267,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -281,14 +277,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -302,14 +300,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -318,11 +314,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -333,14 +329,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -354,7 +352,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -374,7 +371,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -385,14 +381,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -403,9 +401,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/waf/2015-08-24/service-2.json b/botocore/data/waf/2015-08-24/service-2.json index 1f60141e6a..0465b5c86e 100644 --- a/botocore/data/waf/2015-08-24/service-2.json +++ b/botocore/data/waf/2015-08-24/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"waf", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"WAF", "serviceFullName":"AWS WAF", "serviceId":"WAF", "signatureVersion":"v4", "targetPrefix":"AWSWAF_20150824", - "uid":"waf-2015-08-24" + "uid":"waf-2015-08-24", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateByteMatchSet":{ diff --git a/botocore/data/wafv2/2019-07-29/service-2.json b/botocore/data/wafv2/2019-07-29/service-2.json index de1ce009ff..ffe0d4ef94 100644 --- a/botocore/data/wafv2/2019-07-29/service-2.json +++ b/botocore/data/wafv2/2019-07-29/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"wafv2", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"WAFV2", "serviceFullName":"AWS WAFV2", "serviceId":"WAFV2", "signatureVersion":"v4", "targetPrefix":"AWSWAF_20190729", - "uid":"wafv2-2019-07-29" + "uid":"wafv2-2019-07-29", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateWebACL":{ @@ -1217,7 +1219,7 @@ "members":{ "SearchString":{ "shape":"SearchString", - "documentation":"

A string value that you want WAF to search for. WAF searches only in the part of web requests that you designate for inspection in FieldToMatch. The maximum length of the value is 200 bytes.

Valid values depend on the component that you specify for inspection in FieldToMatch:

  • Method: The HTTP method that you want WAF to search for. This indicates the type of operation specified in the request.

  • UriPath: The value that you want WAF to search for in the URI path, for example, /images/daily-ad.jpg.

  • JA3Fingerprint: Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to EXACTLY.

    You can obtain the JA3 fingerprint for client requests from the web ACL logs. If WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see Log fields in the WAF Developer Guide.

  • HeaderOrder: The list of header names to match for. WAF creates a string that contains the ordered list of header names, from the headers in the web request, and then matches against that string.

If SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.

If you're using the WAF API

Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 200 bytes.

For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90, in the value of SearchString.

If you're using the CLI or one of the Amazon Web Services SDKs

The value that you want WAF to search for. The SDK automatically base64 encodes the value.

" + "documentation":"

A string value that you want WAF to search for. WAF searches only in the part of web requests that you designate for inspection in FieldToMatch. The maximum length of the value is 200 bytes.

Valid values depend on the component that you specify for inspection in FieldToMatch:

  • Method: The HTTP method that you want WAF to search for. This indicates the type of operation specified in the request.

  • UriPath: The value that you want WAF to search for in the URI path, for example, /images/daily-ad.jpg.

  • JA3Fingerprint: Available for use with Amazon CloudFront distributions and Application Load Balancers. Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to EXACTLY.

    You can obtain the JA3 fingerprint for client requests from the web ACL logs. If WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see Log fields in the WAF Developer Guide.

  • HeaderOrder: The list of header names to match for. WAF creates a string that contains the ordered list of header names, from the headers in the web request, and then matches against that string.

If SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.

If you're using the WAF API

Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 200 bytes.

For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90, in the value of SearchString.

If you're using the CLI or one of the Amazon Web Services SDKs

The value that you want WAF to search for. The SDK automatically base64 encodes the value.

" }, "FieldToMatch":{ "shape":"FieldToMatch", @@ -2117,6 +2119,14 @@ "ResourceArn":{ "shape":"ResourceArn", "documentation":"

The Amazon Resource Name (ARN) of the web ACL from which you want to delete the LoggingConfiguration.

" + }, + "LogType":{ + "shape":"LogType", + "documentation":"

Used to distinguish between various logging options. Currently, there is one option.

Default: WAF_LOGS

" + }, + "LogScope":{ + "shape":"LogScope", + "documentation":"

The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage.

The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see Collecting data from Amazon Web Services services in the Amazon Security Lake user guide.

Default: CUSTOMER

" } } }, @@ -2487,10 +2497,10 @@ }, "JA3Fingerprint":{ "shape":"JA3Fingerprint", - "documentation":"

Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.

You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to EXACTLY.

You can obtain the JA3 fingerprint for client requests from the web ACL logs. If WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see Log fields in the WAF Developer Guide.

Provide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.

" + "documentation":"

Available for use with Amazon CloudFront distributions and Application Load Balancers. Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.

You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to EXACTLY.

You can obtain the JA3 fingerprint for client requests from the web ACL logs. If WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see Log fields in the WAF Developer Guide.

Provide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.

" } }, - "documentation":"

Specifies a web request component to be used in a rule match statement or in a logging configuration.

  • In a rule statement, this is the part of the web request that you want WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of the web request, create a separate rule statement for each component.

    Example JSON for a QueryString field to match:

    \"FieldToMatch\": { \"QueryString\": {} }

    Example JSON for a Method field to match specification:

    \"FieldToMatch\": { \"Method\": { \"Name\": \"DELETE\" } }

  • In a logging configuration, this is used in the RedactedFields property to specify a field to redact from the logging records. For this use case, note the following:

    • Even though all FieldToMatch settings are available, the only valid settings for field redaction are UriPath, QueryString, SingleHeader, and Method.

    • In this documentation, the descriptions of the individual fields talk about specifying the web request component to inspect, but for field redaction, you are specifying the component type to redact from the logs.

" + "documentation":"

Specifies a web request component to be used in a rule match statement or in a logging configuration.

  • In a rule statement, this is the part of the web request that you want WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of the web request, create a separate rule statement for each component.

    Example JSON for a QueryString field to match:

    \"FieldToMatch\": { \"QueryString\": {} }

    Example JSON for a Method field to match specification:

    \"FieldToMatch\": { \"Method\": { \"Name\": \"DELETE\" } }

  • In a logging configuration, this is used in the RedactedFields property to specify a field to redact from the logging records. For this use case, note the following:

    • Even though all FieldToMatch settings are available, the only valid settings for field redaction are UriPath, QueryString, SingleHeader, and Method.

    • In this documentation, the descriptions of the individual fields talk about specifying the web request component to inspect, but for field redaction, you are specifying the component type to redact from the logs.

    • If you have request sampling enabled, the redacted fields configuration for logging has no impact on sampling. The only way to exclude fields from request sampling is by disabling sampling in the web ACL visibility configuration.

" }, "FieldToMatchData":{ "type":"string", @@ -2735,6 +2745,14 @@ "ResourceArn":{ "shape":"ResourceArn", "documentation":"

The Amazon Resource Name (ARN) of the web ACL for which you want to get the LoggingConfiguration.

" + }, + "LogType":{ + "shape":"LogType", + "documentation":"

Used to distinguish between various logging options. Currently, there is one option.

Default: WAF_LOGS

" + }, + "LogScope":{ + "shape":"LogScope", + "documentation":"

The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage.

The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see Collecting data from Amazon Web Services services in the Amazon Security Lake user guide.

Default: CUSTOMER

" } } }, @@ -3307,7 +3325,7 @@ "documentation":"

The match status to assign to the web request if the request doesn't have a JA3 fingerprint.

You can specify the following fallback behaviors:

  • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

  • NO_MATCH - Treat the web request as not matching the rule statement.

" } }, - "documentation":"

Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.

You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to EXACTLY.

You can obtain the JA3 fingerprint for client requests from the web ACL logs. If WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see Log fields in the WAF Developer Guide.

Provide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.

" + "documentation":"

Available for use with Amazon CloudFront distributions and Application Load Balancers. Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.

You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to EXACTLY.

You can obtain the JA3 fingerprint for client requests from the web ACL logs. If WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see Log fields in the WAF Developer Guide.

Provide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.

" }, "JsonBody":{ "type":"structure", @@ -3326,14 +3344,14 @@ }, "InvalidFallbackBehavior":{ "shape":"BodyParsingFallbackBehavior", - "documentation":"

What WAF should do if it fails to completely parse the JSON body. The options are the following:

  • EVALUATE_AS_STRING - Inspect the body as plain text. WAF applies the text transformations and inspection criteria that you defined for the JSON inspection to the body text string.

  • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

  • NO_MATCH - Treat the web request as not matching the rule statement.

If you don't provide this setting, WAF parses and evaluates the content only up to the first parsing failure that it encounters.

WAF does its best to parse the entire JSON body, but might be forced to stop for reasons such as invalid characters, duplicate keys, truncation, and any content whose root node isn't an object or an array.

WAF parses the JSON in the following examples as two valid key, value pairs:

  • Missing comma: {\"key1\":\"value1\"\"key2\":\"value2\"}

  • Missing colon: {\"key1\":\"value1\",\"key2\"\"value2\"}

  • Extra colons: {\"key1\"::\"value1\",\"key2\"\"value2\"}

" + "documentation":"

What WAF should do if it fails to completely parse the JSON body. The options are the following:

  • EVALUATE_AS_STRING - Inspect the body as plain text. WAF applies the text transformations and inspection criteria that you defined for the JSON inspection to the body text string.

  • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

  • NO_MATCH - Treat the web request as not matching the rule statement.

If you don't provide this setting, WAF parses and evaluates the content only up to the first parsing failure that it encounters.

WAF parsing doesn't fully validate the input JSON string, so parsing can succeed even for invalid JSON. When parsing succeeds, WAF doesn't apply the fallback behavior. For more information, see JSON body in the WAF Developer Guide.

" }, "OversizeHandling":{ "shape":"OversizeHandling", "documentation":"

What WAF should do if the body is larger than WAF can inspect.

WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to WAF for inspection.

  • For Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).

  • For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL AssociationConfig, for additional processing fees.

The options for oversize handling are the following:

  • CONTINUE - Inspect the available body contents normally, according to the rule inspection criteria.

  • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

  • NO_MATCH - Treat the web request as not matching the rule statement.

You can combine the MATCH or NO_MATCH settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.

Default: CONTINUE

" } }, - "documentation":"

Inspect the body of the web request as JSON. The body immediately follows the request headers.

This is used to indicate the web request component to inspect, in the FieldToMatch specification.

Use the specifications in this object to indicate which parts of the JSON body to inspect using the rule's inspection criteria. WAF inspects only the parts of the JSON that result from the matches that you indicate.

Example JSON: \"JsonBody\": { \"MatchPattern\": { \"All\": {} }, \"MatchScope\": \"ALL\" }

" + "documentation":"

Inspect the body of the web request as JSON. The body immediately follows the request headers.

This is used to indicate the web request component to inspect, in the FieldToMatch specification.

Use the specifications in this object to indicate which parts of the JSON body to inspect using the rule's inspection criteria. WAF inspects only the parts of the JSON that result from the matches that you indicate.

Example JSON: \"JsonBody\": { \"MatchPattern\": { \"All\": {} }, \"MatchScope\": \"ALL\" }

For additional information about this request component option, see JSON body in the WAF Developer Guide.

" }, "JsonMatchPattern":{ "type":"structure", @@ -3610,6 +3628,10 @@ "Limit":{ "shape":"PaginationLimit", "documentation":"

The maximum number of objects that you want WAF to return for this request. If more objects are available, in the response, WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.

" + }, + "LogScope":{ + "shape":"LogScope", + "documentation":"

The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage.

The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see Collecting data from Amazon Web Services services in the Amazon Security Lake user guide.

Default: CUSTOMER

" } } }, @@ -3852,6 +3874,17 @@ "max":100, "min":1 }, + "LogScope":{ + "type":"string", + "enum":[ + "CUSTOMER", + "SECURITY_LAKE" + ] + }, + "LogType":{ + "type":"string", + "enum":["WAF_LOGS"] + }, "LoggingConfiguration":{ "type":"structure", "required":[ @@ -3869,7 +3902,7 @@ }, "RedactedFields":{ "shape":"RedactedFields", - "documentation":"

The parts of the request that you want to keep out of the logs.

For example, if you redact the SingleHeader field, the HEADER field in the logs will be REDACTED for all rules that use the SingleHeader FieldToMatch setting.

Redaction applies only to the component that's specified in the rule's FieldToMatch setting, so the SingleHeader redaction doesn't apply to rules that use the Headers FieldToMatch.

You can specify only the following fields for redaction: UriPath, QueryString, SingleHeader, and Method.

" + "documentation":"

The parts of the request that you want to keep out of the logs.

For example, if you redact the SingleHeader field, the HEADER field in the logs will be REDACTED for all rules that use the SingleHeader FieldToMatch setting.

Redaction applies only to the component that's specified in the rule's FieldToMatch setting, so the SingleHeader redaction doesn't apply to rules that use the Headers FieldToMatch.

You can specify only the following fields for redaction: UriPath, QueryString, SingleHeader, and Method.

This setting has no impact on request sampling. With request sampling, the only way to exclude fields is by disabling sampling in the web ACL visibility configuration.

" }, "ManagedByFirewallManager":{ "shape":"Boolean", @@ -3878,6 +3911,14 @@ "LoggingFilter":{ "shape":"LoggingFilter", "documentation":"

Filtering that specifies which web requests are kept in the logs and which are dropped. You can filter on the rule action and on the web request labels that were applied by matching rules during web ACL evaluation.

" + }, + "LogType":{ + "shape":"LogType", + "documentation":"

Used to distinguish between various logging options. Currently, there is one option.

Default: WAF_LOGS

" + }, + "LogScope":{ + "shape":"LogScope", + "documentation":"

The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage.

The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see Collecting data from Amazon Web Services services in the Amazon Security Lake user guide.

Default: CUSTOMER

" } }, "documentation":"

Defines an association between logging destinations and a web ACL resource, for logging from WAF. As part of the association, you can specify parts of the standard logging fields to keep out of the logs and you can specify filters so that you log only a subset of the logging records.

You can define one logging destination per web ACL.

You can access information about the traffic that WAF inspects using the following steps:

  1. Create your logging destination. You can use an Amazon CloudWatch Logs log group, an Amazon Simple Storage Service (Amazon S3) bucket, or an Amazon Kinesis Data Firehose.

    The name that you give the destination must start with aws-waf-logs-. Depending on the type of destination, you might need to configure additional settings or permissions.

    For configuration requirements and pricing information for each destination type, see Logging web ACL traffic in the WAF Developer Guide.

  2. Associate your logging destination to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, WAF creates an additional role or policy that is required to write logs to the logging destination. For an Amazon CloudWatch Logs log group, WAF creates a resource policy on the log group. For an Amazon S3 bucket, WAF creates a bucket policy. For an Amazon Kinesis Data Firehose, WAF creates a service-linked role.

For additional information about web ACL logging, see Logging web ACL traffic information in the WAF Developer Guide.

" @@ -6181,7 +6222,7 @@ "members":{ "SampledRequestsEnabled":{ "shape":"Boolean", - "documentation":"

Indicates whether WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the WAF console.

" + "documentation":"

Indicates whether WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the WAF console.

Request sampling doesn't provide a field redaction option, and any field redaction that you specify in your logging configuration doesn't affect sampling. The only way to exclude fields from request sampling is by disabling sampling in the web ACL visibility configuration.

" }, "CloudWatchMetricsEnabled":{ "shape":"Boolean", diff --git a/botocore/data/workspaces-thin-client/2023-08-22/service-2.json b/botocore/data/workspaces-thin-client/2023-08-22/service-2.json index cf47645846..0442a76e60 100644 --- a/botocore/data/workspaces-thin-client/2023-08-22/service-2.json +++ b/botocore/data/workspaces-thin-client/2023-08-22/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2023-08-22", + "auth":["aws.auth#sigv4"], "endpointPrefix":"thinclient", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon WorkSpaces Thin Client", "serviceId":"WorkSpaces Thin Client", "signatureVersion":"v4", @@ -427,6 +428,10 @@ "tags":{ "shape":"TagsMap", "documentation":"

A map of the key-value pairs of the tag or tags to assign to the resource.

" + }, + "deviceCreationTags":{ + "shape":"DeviceCreationTagsMap", + "documentation":"

A map of the key-value pairs of the tag or tags to assign to the newly created devices for this environment.

" } } }, @@ -636,6 +641,26 @@ }, "documentation":"

Describes a thin client device.

" }, + "DeviceCreationTagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"(?!aws:)[A-Za-z0-9 _=@:.+-/]+" + }, + "DeviceCreationTagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[A-Za-z0-9 _=@:.+-/]+" + }, + "DeviceCreationTagsMap":{ + "type":"map", + "key":{"shape":"DeviceCreationTagKey"}, + "value":{"shape":"DeviceCreationTagValue"}, + "max":50, + "min":0, + "sensitive":true + }, "DeviceId":{ "type":"string", "pattern":"[a-zA-Z0-9]{24}" @@ -810,6 +835,10 @@ "tags":{ "shape":"TagsMap", "documentation":"

The tag keys and optional values for the resource.

" + }, + "deviceCreationTags":{ + "shape":"DeviceCreationTagsMap", + "documentation":"

The tag keys and optional values for the newly created devices for this environment.

" } }, "documentation":"

Describes an environment.

" @@ -1513,6 +1542,10 @@ "desiredSoftwareSetId":{ "shape":"SoftwareSetIdOrEmptyString", "documentation":"

The ID of the software set to apply.

" + }, + "deviceCreationTags":{ + "shape":"DeviceCreationTagsMap", + "documentation":"

A map of the key-value pairs of the tag or tags to assign to the newly created devices for this environment.

" } } }, diff --git a/botocore/data/workspaces-web/2020-07-08/service-2.json b/botocore/data/workspaces-web/2020-07-08/service-2.json index 3e85985f70..833cc123cd 100644 --- a/botocore/data/workspaces-web/2020-07-08/service-2.json +++ b/botocore/data/workspaces-web/2020-07-08/service-2.json @@ -86,7 +86,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ConflictException"} ], "documentation":"

Associates a trust store with a web portal.

", "idempotent":true @@ -452,7 +453,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ConflictException"} ], "documentation":"

Disassociates browser settings from a web portal.

", "idempotent":true @@ -471,7 +473,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ConflictException"} ], "documentation":"

Disassociates IP access settings from a web portal.

", "idempotent":true @@ -490,7 +493,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ConflictException"} ], "documentation":"

Disassociates network settings from a web portal.

", "idempotent":true @@ -509,7 +513,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ConflictException"} ], "documentation":"

Disassociates a trust store from a web portal.

", "idempotent":true @@ -528,7 +533,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ConflictException"} ], "documentation":"

Disassociates user access logging settings from a web portal.

", "idempotent":true @@ -547,7 +553,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ConflictException"} ], "documentation":"

Disassociates user settings from a web portal.

", "idempotent":true @@ -1635,6 +1642,10 @@ "portalArn":{ "shape":"ARN", "documentation":"

The ARN of the web portal.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the identity provider resource. A tag is a key-value pair.

" } } }, @@ -1679,7 +1690,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tags to add to the browser settings resource. A tag is a key-value pair.

" + "documentation":"

The tags to add to the IP access settings resource. A tag is a key-value pair.

" } } }, @@ -1878,6 +1889,10 @@ "shape":"keyArn", "documentation":"

The customer managed key used to encrypt sensitive information in the user settings.

" }, + "deepLinkAllowed":{ + "shape":"EnabledType", + "documentation":"

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + }, "disconnectTimeoutInMinutes":{ "shape":"DisconnectTimeoutInMinutes", "documentation":"

The amount of time that a streaming session remains active after users disconnect.

" @@ -3711,6 +3726,10 @@ "shape":"EnabledType", "documentation":"

Specifies whether the user can copy text from the streaming session to the local device.

" }, + "deepLinkAllowed":{ + "shape":"EnabledType", + "documentation":"

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + }, "disconnectTimeoutInMinutes":{ "shape":"DisconnectTimeoutInMinutes", "documentation":"

The amount of time that a streaming session remains active after users disconnect.

" @@ -3815,6 +3834,10 @@ "shape":"keyArn", "documentation":"

The customer managed key used to encrypt sensitive information in the user settings.

" }, + "deepLinkAllowed":{ + "shape":"EnabledType", + "documentation":"

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + }, "disconnectTimeoutInMinutes":{ "shape":"DisconnectTimeoutInMinutes", "documentation":"

The amount of time that a streaming session remains active after users disconnect.

" @@ -3862,6 +3885,10 @@ "shape":"EnabledType", "documentation":"

Specifies whether the user can copy text from the streaming session to the local device.

" }, + "deepLinkAllowed":{ + "shape":"EnabledType", + "documentation":"

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + }, "disconnectTimeoutInMinutes":{ "shape":"DisconnectTimeoutInMinutes", "documentation":"

The amount of time that a streaming session remains active after users disconnect.

" @@ -3957,5 +3984,5 @@ "pattern":"^arn:[\\w+=\\/,.@-]+:kms:[a-zA-Z0-9\\-]*:[a-zA-Z0-9]{1,12}:key\\/[a-zA-Z0-9-]+$" } }, - "documentation":"

WorkSpaces Web is a low cost, fully managed WorkSpace built specifically to facilitate secure, web-based workloads. WorkSpaces Web makes it easy for customers to safely provide their employees with access to internal websites and SaaS web applications without the administrative burden of appliances or specialized client software. WorkSpaces Web provides simple policy tools tailored for user interactions, while offloading common tasks like capacity management, scaling, and maintaining browser images.

" + "documentation":"

Amazon WorkSpaces Secure Browser is a low cost, fully managed WorkSpace built specifically to facilitate secure, web-based workloads. WorkSpaces Secure Browser makes it easy for customers to safely provide their employees with access to internal websites and SaaS web applications without the administrative burden of appliances or specialized client software. WorkSpaces Secure Browser provides simple policy tools tailored for user interactions, while offloading common tasks like capacity management, scaling, and maintaining browser images.

" } diff --git a/botocore/data/workspaces/2015-04-08/service-2.json b/botocore/data/workspaces/2015-04-08/service-2.json index 7608bf3cc0..306e6faafa 100644 --- a/botocore/data/workspaces/2015-04-08/service-2.json +++ b/botocore/data/workspaces/2015-04-08/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"workspaces", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon WorkSpaces", "serviceId":"WorkSpaces", "signatureVersion":"v4", "targetPrefix":"WorkspacesService", - "uid":"workspaces-2015-04-08" + "uid":"workspaces-2015-04-08", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptAccountLinkInvitation":{ @@ -291,7 +293,25 @@ {"shape":"ResourceLimitExceededException"}, {"shape":"InvalidParameterValuesException"} ], - "documentation":"

Creates one or more WorkSpaces.

This operation is asynchronous and returns before the WorkSpaces are created.

  • The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.

  • You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles.

  • User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core.

" + "documentation":"

Creates one or more WorkSpaces.

This operation is asynchronous and returns before the WorkSpaces are created.

  • The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.

  • You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles.

  • User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core.

  • Review your running mode to ensure you are using one that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing?

" + }, + "CreateWorkspacesPool":{ + "name":"CreateWorkspacesPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWorkspacesPoolRequest"}, + "output":{"shape":"CreateWorkspacesPoolResult"}, + "errors":[ + {"shape":"ResourceLimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"OperationNotSupportedException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates a pool of WorkSpaces.

" }, "DeleteAccountLinkInvitation":{ "name":"DeleteAccountLinkInvitation", @@ -759,6 +779,36 @@ ], "documentation":"

Describes the connection status of the specified WorkSpaces.

" }, + "DescribeWorkspacesPoolSessions":{ + "name":"DescribeWorkspacesPoolSessions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkspacesPoolSessionsRequest"}, + "output":{"shape":"DescribeWorkspacesPoolSessionsResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves a list that describes the streaming sessions for a specified pool.

" + }, + "DescribeWorkspacesPools":{ + "name":"DescribeWorkspacesPools", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkspacesPoolsRequest"}, + "output":{"shape":"DescribeWorkspacesPoolsResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Describes the specified WorkSpaces Pools.

" + }, "DisassociateConnectionAlias":{ "name":"DisassociateConnectionAlias", "http":{ @@ -788,7 +838,8 @@ {"shape":"InvalidParameterValuesException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidResourceStateException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"OperationNotSupportedException"} ], "documentation":"

Disassociates the specified IP access control group from the specified directory.

" }, @@ -950,7 +1001,8 @@ "errors":[ {"shape":"InvalidParameterValuesException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"OperationNotSupportedException"} ], "documentation":"

Modifies the properties of the specified Amazon WorkSpaces clients.

" }, @@ -981,10 +1033,27 @@ "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValuesException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationNotSupportedException"} ], "documentation":"

Modifies the self-service WorkSpace management capabilities for your users. For more information, see Enable Self-Service WorkSpace Management Capabilities for Your Users.

" }, + "ModifyStreamingProperties":{ + "name":"ModifyStreamingProperties", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyStreamingPropertiesRequest"}, + "output":{"shape":"ModifyStreamingPropertiesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationNotSupportedException"} + ], + "documentation":"

Modifies the specified streaming properties.

" + }, "ModifyWorkspaceAccessProperties":{ "name":"ModifyWorkspaceAccessProperties", "http":{ @@ -1092,7 +1161,8 @@ {"shape":"WorkspacesDefaultRoleNotFoundException"}, {"shape":"InvalidResourceStateException"}, {"shape":"UnsupportedNetworkConfigurationException"}, - {"shape":"OperationNotSupportedException"} + {"shape":"OperationNotSupportedException"}, + {"shape":"ResourceAlreadyExistsException"} ], "documentation":"

Registers the specified directory. This operation is asynchronous and returns before the WorkSpace directory is registered. If this is the first time you are registering a directory, you will need to create the workspaces_DefaultRole role before you can register a directory. For more information, see Creating the workspaces_DefaultRole Role.

" }, @@ -1155,6 +1225,25 @@ "output":{"shape":"StartWorkspacesResult"}, "documentation":"

Starts the specified WorkSpaces.

You cannot start a WorkSpace unless it has a running mode of AutoStop and a state of STOPPED.

" }, + "StartWorkspacesPool":{ + "name":"StartWorkspacesPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartWorkspacesPoolRequest"}, + "output":{"shape":"StartWorkspacesPoolResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"InvalidResourceStateException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationNotSupportedException"}, + {"shape":"OperationInProgressException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Starts the specified pool.

You cannot start a pool unless it has a running mode of AutoStop and a state of STOPPED.

" + }, "StopWorkspaces":{ "name":"StopWorkspaces", "http":{ @@ -1165,6 +1254,23 @@ "output":{"shape":"StopWorkspacesResult"}, "documentation":"

Stops the specified WorkSpaces.

You cannot stop a WorkSpace unless it has a running mode of AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR.

" }, + "StopWorkspacesPool":{ + "name":"StopWorkspacesPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopWorkspacesPoolRequest"}, + "output":{"shape":"StopWorkspacesPoolResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"InvalidResourceStateException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationInProgressException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Stops the specified pool.

You cannot stop a WorkSpace pool unless it has a running mode of AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR.

" + }, "TerminateWorkspaces":{ "name":"TerminateWorkspaces", "http":{ @@ -1175,6 +1281,40 @@ "output":{"shape":"TerminateWorkspacesResult"}, "documentation":"

Terminates the specified WorkSpaces.

Terminating a WorkSpace is a permanent action and cannot be undone. The user's data is destroyed. If you need to archive any user data, contact Amazon Web Services Support before terminating the WorkSpace.

You can terminate a WorkSpace that is in any state except SUSPENDED.

This operation is asynchronous and returns before the WorkSpaces have been completely terminated. After a WorkSpace is terminated, the TERMINATED state is returned only briefly before the WorkSpace directory metadata is cleaned up, so this state is rarely returned. To confirm that a WorkSpace is terminated, check for the WorkSpace ID by using DescribeWorkSpaces. If the WorkSpace ID isn't returned, then the WorkSpace has been successfully terminated.

Simple AD and AD Connector are made available to you free of charge to use with WorkSpaces. If there are no WorkSpaces being used with your Simple AD or AD Connector directory for 30 consecutive days, this directory will be automatically deregistered for use with Amazon WorkSpaces, and you will be charged for this directory as per the Directory Service pricing terms.

To delete empty directories, see Delete the Directory for Your WorkSpaces. If you delete your Simple AD or AD Connector directory, you can always create a new one when you want to start using WorkSpaces again.

" }, + "TerminateWorkspacesPool":{ + "name":"TerminateWorkspacesPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateWorkspacesPoolRequest"}, + "output":{"shape":"TerminateWorkspacesPoolResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"InvalidResourceStateException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationInProgressException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Terminates the specified pool.

" + }, + "TerminateWorkspacesPoolSession":{ + "name":"TerminateWorkspacesPoolSession", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateWorkspacesPoolSessionRequest"}, + "output":{"shape":"TerminateWorkspacesPoolSessionResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationNotSupportedException"}, + {"shape":"OperationInProgressException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Terminates the pool session.

" + }, "UpdateConnectClientAddIn":{ "name":"UpdateConnectClientAddIn", "http":{ @@ -1259,12 +1399,31 @@ {"shape":"OperationNotSupportedException"} ], "documentation":"

Shares or unshares an image with one account in the same Amazon Web Services Region by specifying whether that account has permission to copy the image. If the copy image permission is granted, the image is shared with that account. If the copy image permission is revoked, the image is unshared with the account.

After an image has been shared, the recipient account can copy the image to other Regions as needed.

In the China (Ningxia) Region, you can copy images only within the same Region.

In Amazon Web Services GovCloud (US), to copy images to and from other Regions, contact Amazon Web Services Support.

For more information about sharing images, see Share or Unshare a Custom WorkSpaces Image.

  • To delete an image that has been shared, you must unshare the image before you delete it.

  • Sharing Bring Your Own License (BYOL) images across Amazon Web Services accounts isn't supported at this time in Amazon Web Services GovCloud (US). To share BYOL images across accounts in Amazon Web Services GovCloud (US), contact Amazon Web Services Support.

" + }, + "UpdateWorkspacesPool":{ + "name":"UpdateWorkspacesPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateWorkspacesPoolRequest"}, + "output":{"shape":"UpdateWorkspacesPoolResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"InvalidResourceStateException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"OperationNotSupportedException"}, + {"shape":"OperationInProgressException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Updates the specified pool.

" } }, "shapes":{ "ARN":{ "type":"string", - "pattern":"^arn:aws:[A-Za-z0-9][A-za-z0-9_/.-]{0,62}:[A-za-z0-9_/.-]{0,63}:[A-za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.-]{0,127}$" + "pattern":"^arn:aws[a-z-]{0,7}:[A-Za-z0-9][A-za-z0-9_/.-]{0,62}:[A-za-z0-9_/.-]{0,63}:[A-za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.\\\\-]{0,1023}$" }, "AcceptAccountLinkInvitationRequest":{ "type":"structure", @@ -1374,6 +1533,32 @@ "type":"list", "member":{"shape":"AccountModification"} }, + "ActiveDirectoryConfig":{ + "type":"structure", + "required":[ + "DomainName", + "ServiceAccountSecretArn" + ], + "members":{ + "DomainName":{ + "shape":"DomainName", + "documentation":"

The name of the domain.

" + }, + "ServiceAccountSecretArn":{ + "shape":"SecretsManagerArn", + "documentation":"

Indicates the secret ARN on the service account.

" + } + }, + "documentation":"

Information about the Active Directory config.

" + }, + "ActiveUserSessions":{ + "type":"integer", + "min":0 + }, + "ActualUserSessions":{ + "type":"integer", + "min":0 + }, "AddInName":{ "type":"string", "max":64, @@ -1463,6 +1648,47 @@ "type":"list", "member":{"shape":"ApplicationResourceAssociation"} }, + "ApplicationSettingsRequest":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"ApplicationSettingsStatusEnum", + "documentation":"

Enables or disables persistent application settings for users during their pool sessions.

" + }, + "SettingsGroup":{ + "shape":"SettingsGroup", + "documentation":"

The path prefix for the S3 bucket where users’ persistent application settings are stored. You can allow the same persistent application settings to be used across multiple pools by specifying the same settings group for each pool.

" + } + }, + "documentation":"

The persistent application settings for WorkSpaces Pools users.

" + }, + "ApplicationSettingsResponse":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"ApplicationSettingsStatusEnum", + "documentation":"

Specifies whether persistent application settings are enabled for users during their pool sessions.

" + }, + "SettingsGroup":{ + "shape":"SettingsGroup", + "documentation":"

The path prefix for the S3 bucket where users’ persistent application settings are stored.

" + }, + "S3BucketName":{ + "shape":"S3BucketName", + "documentation":"

The S3 bucket where users’ persistent application settings are stored. When persistent application settings are enabled for the first time for an account in an Amazon Web Services Region, an S3 bucket is created. The bucket is unique to the Amazon Web Services account and the Region.

" + } + }, + "documentation":"

Describes the persistent application settings for WorkSpaces Pools users.

" + }, + "ApplicationSettingsStatusEnum":{ + "type":"string", + "enum":[ + "DISABLED", + "ENABLED" + ] + }, "AssociateConnectionAliasRequest":{ "type":"structure", "required":[ @@ -1585,6 +1811,10 @@ "PENDING_DISASSOCIATION" ] }, + "AuthenticationType":{ + "type":"string", + "enum":["SAML"] + }, "AuthorizeIpRulesRequest":{ "type":"structure", "required":[ @@ -1607,6 +1837,10 @@ "members":{ } }, + "AvailableUserSessions":{ + "type":"integer", + "min":0 + }, "AwsAccount":{ "type":"string", "pattern":"^\\d{12}$" @@ -1680,6 +1914,45 @@ "STANDBY" ] }, + "Capacity":{ + "type":"structure", + "required":["DesiredUserSessions"], + "members":{ + "DesiredUserSessions":{ + "shape":"DesiredUserSessions", + "documentation":"

The desired number of user sessions for the WorkSpaces in the pool.

" + } + }, + "documentation":"

Describes the user capacity for a pool of WorkSpaces.

" + }, + "CapacityStatus":{ + "type":"structure", + "required":[ + "AvailableUserSessions", + "DesiredUserSessions", + "ActualUserSessions", + "ActiveUserSessions" + ], + "members":{ + "AvailableUserSessions":{ + "shape":"AvailableUserSessions", + "documentation":"

The number of user sessions currently available for streaming from your pool.

AvailableUserSessions = ActualUserSessions - ActiveUserSessions

" + }, + "DesiredUserSessions":{ + "shape":"DesiredUserSessions", + "documentation":"

The total number of sessions slots that are either running or pending. This represents the total number of concurrent streaming sessions your pool can support in a steady state.

" + }, + "ActualUserSessions":{ + "shape":"ActualUserSessions", + "documentation":"

The total number of user sessions that are available for streaming or are currently streaming in your pool.

ActualUserSessions = AvailableUserSessions + ActiveUserSessions

" + }, + "ActiveUserSessions":{ + "shape":"ActiveUserSessions", + "documentation":"

The number of user sessions currently being used for your pool.

" + } + }, + "documentation":"

Describes the capacity status for a pool of WorkSpaces.

" + }, "CertificateAuthorityArn":{ "type":"string", "max":200, @@ -2312,6 +2585,59 @@ } } }, + "CreateWorkspacesPoolRequest":{ + "type":"structure", + "required":[ + "PoolName", + "Description", + "BundleId", + "DirectoryId", + "Capacity" + ], + "members":{ + "PoolName":{ + "shape":"WorkspacesPoolName", + "documentation":"

The name of the pool.

" + }, + "Description":{ + "shape":"UpdateDescription", + "documentation":"

The pool description.

" + }, + "BundleId":{ + "shape":"BundleId", + "documentation":"

The identifier of the bundle for the pool.

" + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The identifier of the directory for the pool.

" + }, + "Capacity":{ + "shape":"Capacity", + "documentation":"

The user capacity of the pool.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the pool.

" + }, + "ApplicationSettings":{ + "shape":"ApplicationSettingsRequest", + "documentation":"

Indicates the application settings of the pool.

" + }, + "TimeoutSettings":{ + "shape":"TimeoutSettings", + "documentation":"

Indicates the timeout settings of the pool.

" + } + } + }, + "CreateWorkspacesPoolResult":{ + "type":"structure", + "members":{ + "WorkspacesPool":{ + "shape":"WorkspacesPool", + "documentation":"

Indicates the pool to create.

" + } + } + }, "CreateWorkspacesRequest":{ "type":"structure", "required":["Workspaces"], @@ -2474,6 +2800,10 @@ "EnableMaintenanceMode":{ "shape":"BooleanObject", "documentation":"

Specifies whether maintenance mode is enabled for WorkSpaces. For more information, see WorkSpace Maintenance.

" + }, + "InstanceIamRoleArn":{ + "shape":"ARN", + "documentation":"

Indicates the IAM role ARN of the instance.

" } }, "documentation":"

Describes the default values that are used to create WorkSpaces. For more information, see Update Directory Details for Your WorkSpaces.

" @@ -3133,6 +3463,10 @@ "shape":"DirectoryIdList", "documentation":"

The identifiers of the directories. If the value is null, all directories are retrieved.

" }, + "WorkspaceDirectoryNames":{ + "shape":"WorkspaceDirectoryNameList", + "documentation":"

The names of the WorkSpace directories.

" + }, "Limit":{ "shape":"Limit", "documentation":"

The maximum number of directories to return.

" @@ -3274,73 +3608,200 @@ } } }, - "DescribeWorkspacesRequest":{ + "DescribeWorkspacesPoolSessionsRequest":{ "type":"structure", + "required":["PoolId"], "members":{ - "WorkspaceIds":{ - "shape":"WorkspaceIdList", - "documentation":"

The identifiers of the WorkSpaces. You cannot combine this parameter with any other filter.

Because the CreateWorkspaces operation is asynchronous, the identifier it returns is not immediately available. If you immediately call DescribeWorkspaces with this identifier, no information is returned.

" - }, - "DirectoryId":{ - "shape":"DirectoryId", - "documentation":"

The identifier of the directory. In addition, you can optionally specify a specific directory user (see UserName). You cannot combine this parameter with any other filter.

" + "PoolId":{ + "shape":"WorkspacesPoolId", + "documentation":"

The identifier of the pool.

" }, - "UserName":{ - "shape":"UserName", - "documentation":"

The name of the directory user. You must specify this parameter with DirectoryId.

" - }, - "BundleId":{ - "shape":"BundleId", - "documentation":"

The identifier of the bundle. All WorkSpaces that are created from this bundle are retrieved. You cannot combine this parameter with any other filter.

" + "UserId":{ + "shape":"WorkspacesPoolUserId", + "documentation":"

The identifier of the user.

" }, "Limit":{ - "shape":"Limit", + "shape":"Limit50", "documentation":"

The maximum number of items to return.

" }, "NextToken":{ "shape":"PaginationToken", "documentation":"

If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results.

" - }, - "WorkspaceName":{ - "shape":"WorkspaceName", - "documentation":"

The name of the user-decoupled WorkSpace.

" } } }, - "DescribeWorkspacesResult":{ + "DescribeWorkspacesPoolSessionsResult":{ "type":"structure", "members":{ - "Workspaces":{ - "shape":"WorkspaceList", - "documentation":"

Information about the WorkSpaces.

Because CreateWorkspaces is an asynchronous operation, some of the returned information could be incomplete.

" + "Sessions":{ + "shape":"WorkspacesPoolSessions", + "documentation":"

Describes the pool sessions.

" }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" + "documentation":"

If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results.

" } } }, - "Description":{"type":"string"}, - "DirectoryId":{ - "type":"string", - "max":65, - "min":10, - "pattern":"^d-[0-9a-f]{8,63}$" - }, - "DirectoryIdList":{ - "type":"list", - "member":{"shape":"DirectoryId"}, - "max":25, - "min":1 - }, - "DirectoryList":{ - "type":"list", - "member":{"shape":"WorkspaceDirectory"} - }, - "DirectoryName":{"type":"string"}, - "DisassociateConnectionAliasRequest":{ + "DescribeWorkspacesPoolsFilter":{ "type":"structure", - "required":["AliasId"], + "required":[ + "Name", + "Values", + "Operator" + ], + "members":{ + "Name":{ + "shape":"DescribeWorkspacesPoolsFilterName", + "documentation":"

The name of the pool to filter.

" + }, + "Values":{ + "shape":"DescribeWorkspacesPoolsFilterValues", + "documentation":"

The values for filtering WorkSpaces Pools.

" + }, + "Operator":{ + "shape":"DescribeWorkspacesPoolsFilterOperator", + "documentation":"

The operator values for filtering WorkSpaces Pools.

" + } + }, + "documentation":"

Describes the filter conditions for WorkSpaces Pools to return.

" + }, + "DescribeWorkspacesPoolsFilterName":{ + "type":"string", + "enum":["PoolName"] + }, + "DescribeWorkspacesPoolsFilterOperator":{ + "type":"string", + "enum":[ + "EQUALS", + "NOTEQUALS", + "CONTAINS", + "NOTCONTAINS" + ] + }, + "DescribeWorkspacesPoolsFilterValue":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Za-z0-9][A-Za-z0-9_.-]+$" + }, + "DescribeWorkspacesPoolsFilterValues":{ + "type":"list", + "member":{"shape":"DescribeWorkspacesPoolsFilterValue"}, + "max":25, + "min":1 + }, + "DescribeWorkspacesPoolsFilters":{ + "type":"list", + "member":{"shape":"DescribeWorkspacesPoolsFilter"}, + "max":25, + "min":1 + }, + "DescribeWorkspacesPoolsRequest":{ + "type":"structure", + "members":{ + "PoolIds":{ + "shape":"WorkspacesPoolIds", + "documentation":"

The identifier of the WorkSpaces Pools.

" + }, + "Filters":{ + "shape":"DescribeWorkspacesPoolsFilters", + "documentation":"

The filter conditions for the WorkSpaces Pool to return.

" + }, + "Limit":{ + "shape":"Limit", + "documentation":"

The maximum number of items to return.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results.

" + } + } + }, + "DescribeWorkspacesPoolsResult":{ + "type":"structure", + "members":{ + "WorkspacesPools":{ + "shape":"WorkspacesPools", + "documentation":"

Information about the WorkSpaces Pools.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results.

" + } + } + }, + "DescribeWorkspacesRequest":{ + "type":"structure", + "members":{ + "WorkspaceIds":{ + "shape":"WorkspaceIdList", + "documentation":"

The identifiers of the WorkSpaces. You cannot combine this parameter with any other filter.

Because the CreateWorkspaces operation is asynchronous, the identifier it returns is not immediately available. If you immediately call DescribeWorkspaces with this identifier, no information is returned.

" + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The identifier of the directory. In addition, you can optionally specify a specific directory user (see UserName). You cannot combine this parameter with any other filter.

" + }, + "UserName":{ + "shape":"UserName", + "documentation":"

The name of the directory user. You must specify this parameter with DirectoryId.

" + }, + "BundleId":{ + "shape":"BundleId", + "documentation":"

The identifier of the bundle. All WorkSpaces that are created from this bundle are retrieved. You cannot combine this parameter with any other filter.

" + }, + "Limit":{ + "shape":"Limit", + "documentation":"

The maximum number of items to return.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results.

" + }, + "WorkspaceName":{ + "shape":"WorkspaceName", + "documentation":"

The name of the user-decoupled WorkSpace.

" + } + } + }, + "DescribeWorkspacesResult":{ + "type":"structure", + "members":{ + "Workspaces":{ + "shape":"WorkspaceList", + "documentation":"

Information about the WorkSpaces.

Because CreateWorkspaces is an asynchronous operation, some of the returned information could be incomplete.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, + "Description":{"type":"string"}, + "DesiredUserSessions":{ + "type":"integer", + "min":0 + }, + "DirectoryId":{ + "type":"string", + "max":65, + "min":10, + "pattern":"^(d-[0-9a-f]{8,63}$)|(wsd-[0-9a-z]{8,63}$)" + }, + "DirectoryIdList":{ + "type":"list", + "member":{"shape":"DirectoryId"}, + "max":25, + "min":1 + }, + "DirectoryList":{ + "type":"list", + "member":{"shape":"WorkspaceDirectory"} + }, + "DirectoryName":{"type":"string"}, + "DisassociateConnectionAliasRequest":{ + "type":"structure", + "required":["AliasId"], "members":{ "AliasId":{ "shape":"ConnectionAliasId", @@ -3401,10 +3862,19 @@ } } }, + "DisconnectTimeoutInSeconds":{ + "type":"integer", + "max":36000, + "min":60 + }, "DnsIpAddresses":{ "type":"list", "member":{"shape":"IpAddress"} }, + "DomainName":{ + "type":"string", + "pattern":"^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+$" + }, "Ec2ImageId":{ "type":"string", "pattern":"^ami\\-([a-f0-9]{8}|[a-f0-9]{17})$" @@ -3427,6 +3897,7 @@ "type":"list", "member":{"shape":"ErrorDetails"} }, + "ErrorMessage":{"type":"string"}, "ErrorType":{"type":"string"}, "ExceptionErrorCode":{"type":"string"}, "ExceptionMessage":{"type":"string"}, @@ -3534,6 +4005,11 @@ } } }, + "IdleDisconnectTimeoutInSeconds":{ + "type":"integer", + "max":36000, + "min":0 + }, "ImageAssociatedResourceType":{ "type":"string", "enum":["APPLICATION"] @@ -3695,7 +4171,7 @@ }, "Applications":{ "shape":"ApplicationList", - "documentation":"

If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11 BYOL images. For more information about subscribing to Office for BYOL images, see Bring Your Own Windows Desktop Licenses.

  • Although this parameter is an array, only one item is allowed at this time.

  • Windows 11 only supports Microsoft_Office_2019.

" + "documentation":"

If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11 BYOL images. For more information about subscribing to Office for BYOL images, see Bring Your Own Windows Desktop Licenses.

  • Although this parameter is an array, only one item is allowed at this time.

  • During the image import process, non-GPU WSP WorkSpaces with Windows 11 support only Microsoft_Office_2019. GPU WSP WorkSpaces with Windows 11 do not support Office installation.

" } } }, @@ -3865,6 +4341,11 @@ "max":25, "min":1 }, + "Limit50":{ + "type":"integer", + "max":50, + "min":1 + }, "LinkId":{ "type":"string", "pattern":"^link-.{8,24}$" @@ -3955,6 +4436,15 @@ "max":5, "min":1 }, + "MaxUserDurationInSeconds":{ + "type":"integer", + "max":432000, + "min":600 + }, + "MaximumLength":{ + "type":"integer", + "min":0 + }, "MigrateWorkspaceRequest":{ "type":"structure", "required":[ @@ -4126,6 +4616,25 @@ "members":{ } }, + "ModifyStreamingPropertiesRequest":{ + "type":"structure", + "required":["ResourceId"], + "members":{ + "ResourceId":{ + "shape":"DirectoryId", + "documentation":"

The identifier of the resource.

" + }, + "StreamingProperties":{ + "shape":"StreamingProperties", + "documentation":"

The streaming properties to configure.

" + } + } + }, + "ModifyStreamingPropertiesResult":{ + "type":"structure", + "members":{ + } + }, "ModifyWorkspaceAccessPropertiesRequest":{ "type":"structure", "required":[ @@ -4215,6 +4724,20 @@ "members":{ } }, + "NetworkAccessConfiguration":{ + "type":"structure", + "members":{ + "EniPrivateIpAddress":{ + "shape":"NonEmptyString", + "documentation":"

The private IP address of the elastic network interface that is attached to instances in your VPC.

" + }, + "EniId":{ + "shape":"NonEmptyString", + "documentation":"

The resource identifier of the elastic network interface that is attached to instances in your VPC. All network interfaces have the eni-xxxxxxxx resource identifier.

" + } + }, + "documentation":"

Describes the network details of a WorkSpaces Pool.

" + }, "NonEmptyString":{ "type":"string", "min":1 @@ -4242,7 +4765,8 @@ "WINDOWS_7", "WINDOWS_SERVER_2016", "WINDOWS_SERVER_2019", - "WINDOWS_SERVER_2022" + "WINDOWS_SERVER_2022", + "RHEL_8" ] }, "OperatingSystemNameList":{ @@ -4415,10 +4939,6 @@ }, "RegisterWorkspaceDirectoryRequest":{ "type":"structure", - "required":[ - "DirectoryId", - "EnableWorkDocs" - ], "members":{ "DirectoryId":{ "shape":"DirectoryId", @@ -4443,12 +4963,40 @@ "Tags":{ "shape":"TagList", "documentation":"

The tags associated with the directory.

" + }, + "WorkspaceDirectoryName":{ + "shape":"WorkspaceDirectoryName", + "documentation":"

The name of the directory to register.

" + }, + "WorkspaceDirectoryDescription":{ + "shape":"WorkspaceDirectoryDescription", + "documentation":"

Description of the directory to register.

" + }, + "UserIdentityType":{ + "shape":"UserIdentityType", + "documentation":"

The type of identity management the user is using.

" + }, + "WorkspaceType":{ + "shape":"WorkspaceType", + "documentation":"

Indicates whether the directory's WorkSpace type is personal or pools.

" + }, + "ActiveDirectoryConfig":{ + "shape":"ActiveDirectoryConfig", + "documentation":"

The active directory config of the directory.

" } } }, "RegisterWorkspaceDirectoryResult":{ "type":"structure", "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The identifier of the directory.

" + }, + "State":{ + "shape":"WorkspaceDirectoryState", + "documentation":"

The registration status of the WorkSpace directory.

" + } } }, "RegistrationCode":{ @@ -4627,6 +5175,7 @@ }, "RootStorage":{ "type":"structure", + "required":["Capacity"], "members":{ "Capacity":{ "shape":"NonEmptyString", @@ -4645,6 +5194,12 @@ ] }, "RunningModeAutoStopTimeoutInMinutes":{"type":"integer"}, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$" + }, "SamlProperties":{ "type":"structure", "members":{ @@ -4677,6 +5232,10 @@ "min":8, "pattern":"^(http|https)\\://\\S+$" }, + "SecretsManagerArn":{ + "type":"string", + "pattern":"^arn:aws[a-z-]{0,7}:secretsmanager:[A-za-z0-9_/.-]{0,63}:[A-za-z0-9_/.-]{0,63}:secret:[A-Za-z0-9][A-za-z0-9_/.-]{8,519}$" + }, "SecurityGroupId":{ "type":"string", "max":20, @@ -4709,6 +5268,22 @@ }, "documentation":"

Describes the self-service permissions for a directory. For more information, see Enable Self-Service WorkSpace Management Capabilities for Your Users.

" }, + "SessionConnectionState":{ + "type":"string", + "enum":[ + "CONNECTED", + "NOT_CONNECTED" + ] + }, + "SessionInstanceId":{ + "type":"string", + "pattern":"^i-[a-f0-9]{8}(?:[a-f0-9]{9})?$" + }, + "SettingsGroup":{ + "type":"string", + "max":100, + "pattern":"^[A-Za-z0-9_./()!*'-]+$" + }, "Snapshot":{ "type":"structure", "members":{ @@ -4802,6 +5377,21 @@ "max":25, "min":1 }, + "StartWorkspacesPoolRequest":{ + "type":"structure", + "required":["PoolId"], + "members":{ + "PoolId":{ + "shape":"WorkspacesPoolId", + "documentation":"

The identifier of the pool.

" + } + } + }, + "StartWorkspacesPoolResult":{ + "type":"structure", + "members":{ + } + }, "StartWorkspacesRequest":{ "type":"structure", "required":["StartWorkspaceRequests"], @@ -4837,6 +5427,21 @@ "max":25, "min":1 }, + "StopWorkspacesPoolRequest":{ + "type":"structure", + "required":["PoolId"], + "members":{ + "PoolId":{ + "shape":"WorkspacesPoolId", + "documentation":"

The identifier of the pool.

" + } + } + }, + "StopWorkspacesPoolResult":{ + "type":"structure", + "members":{ + } + }, "StopWorkspacesRequest":{ "type":"structure", "required":["StopWorkspaceRequests"], @@ -4856,6 +5461,65 @@ } } }, + "StorageConnector":{ + "type":"structure", + "required":[ + "ConnectorType", + "Status" + ], + "members":{ + "ConnectorType":{ + "shape":"StorageConnectorTypeEnum", + "documentation":"

The type of connector used to save user files.

" + }, + "Status":{ + "shape":"StorageConnectorStatusEnum", + "documentation":"

Indicates if the storage connetor is enabled or disabled.

" + } + }, + "documentation":"

Describes the storage connector.

" + }, + "StorageConnectorStatusEnum":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "StorageConnectorTypeEnum":{ + "type":"string", + "enum":["HOME_FOLDER"] + }, + "StorageConnectors":{ + "type":"list", + "member":{"shape":"StorageConnector"}, + "min":1 + }, + "StreamingExperiencePreferredProtocolEnum":{ + "type":"string", + "enum":[ + "TCP", + "UDP" + ] + }, + "StreamingProperties":{ + "type":"structure", + "members":{ + "StreamingExperiencePreferredProtocol":{ + "shape":"StreamingExperiencePreferredProtocolEnum", + "documentation":"

Indicates the type of preferred protocol for the streaming experience.

" + }, + "UserSettings":{ + "shape":"UserSettings", + "documentation":"

Indicates the permission settings asscoiated with the user.

" + }, + "StorageConnectors":{ + "shape":"StorageConnectors", + "documentation":"

Indicates the storage connector used

" + } + }, + "documentation":"

Describes the streaming properties.

" + }, "String2048":{ "type":"string", "max":2048, @@ -4935,6 +5599,36 @@ "max":25, "min":1 }, + "TerminateWorkspacesPoolRequest":{ + "type":"structure", + "required":["PoolId"], + "members":{ + "PoolId":{ + "shape":"WorkspacesPoolId", + "documentation":"

The identifier of the pool.

" + } + } + }, + "TerminateWorkspacesPoolResult":{ + "type":"structure", + "members":{ + } + }, + "TerminateWorkspacesPoolSessionRequest":{ + "type":"structure", + "required":["SessionId"], + "members":{ + "SessionId":{ + "shape":"AmazonUuid", + "documentation":"

The identifier of the pool session.

" + } + } + }, + "TerminateWorkspacesPoolSessionResult":{ + "type":"structure", + "members":{ + } + }, "TerminateWorkspacesRequest":{ "type":"structure", "required":["TerminateWorkspaceRequests"], @@ -4954,6 +5648,24 @@ } } }, + "TimeoutSettings":{ + "type":"structure", + "members":{ + "DisconnectTimeoutInSeconds":{ + "shape":"DisconnectTimeoutInSeconds", + "documentation":"

Specifies the amount of time, in seconds, that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within the time set, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

" + }, + "IdleDisconnectTimeoutInSeconds":{ + "shape":"IdleDisconnectTimeoutInSeconds", + "documentation":"

The amount of time in seconds a connection will stay active while idle.

" + }, + "MaxUserDurationInSeconds":{ + "shape":"MaxUserDurationInSeconds", + "documentation":"

Specifies the maximum amount of time, in seconds, that a streaming session can remain active. If users are still connected to a streaming instance five minutes before this limit is reached, they are prompted to save any open documents before being disconnected. After this time elapses, the instance is terminated and replaced by a new instance.

" + } + }, + "documentation":"

Describes the timeout settings for a pool of WorkSpaces.

" + }, "Timestamp":{"type":"timestamp"}, "UnsupportedNetworkConfigurationException":{ "type":"structure", @@ -5110,13 +5822,107 @@ "members":{ } }, + "UpdateWorkspacesPoolRequest":{ + "type":"structure", + "required":["PoolId"], + "members":{ + "PoolId":{ + "shape":"WorkspacesPoolId", + "documentation":"

The identifier of the specified pool to update.

" + }, + "Description":{ + "shape":"UpdateDescription", + "documentation":"

Describes the specified pool to update.

" + }, + "BundleId":{ + "shape":"BundleId", + "documentation":"

The identifier of the bundle.

" + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The identifier of the directory.

" + }, + "Capacity":{ + "shape":"Capacity", + "documentation":"

The desired capacity for the pool.

" + }, + "ApplicationSettings":{ + "shape":"ApplicationSettingsRequest", + "documentation":"

The persistent application settings for users in the pool.

" + }, + "TimeoutSettings":{ + "shape":"TimeoutSettings", + "documentation":"

Indicates the timeout settings of the specified pool.

" + } + } + }, + "UpdateWorkspacesPoolResult":{ + "type":"structure", + "members":{ + "WorkspacesPool":{ + "shape":"WorkspacesPool", + "documentation":"

Describes the specified pool.

" + } + } + }, + "UserIdentityType":{ + "type":"string", + "enum":[ + "CUSTOMER_MANAGED", + "AWS_DIRECTORY_SERVICE" + ] + }, "UserName":{ "type":"string", "max":63, "min":1 }, + "UserSetting":{ + "type":"structure", + "required":[ + "Action", + "Permission" + ], + "members":{ + "Action":{ + "shape":"UserSettingActionEnum", + "documentation":"

Indicates the type of action.

" + }, + "Permission":{ + "shape":"UserSettingPermissionEnum", + "documentation":"

Indicates if the setting is enabled or disabled.

" + }, + "MaximumLength":{ + "shape":"MaximumLength", + "documentation":"

Indicates the maximum character length for the specified user setting.

" + } + }, + "documentation":"

Information about the user's permission settings.

" + }, + "UserSettingActionEnum":{ + "type":"string", + "enum":[ + "CLIPBOARD_COPY_FROM_LOCAL_DEVICE", + "CLIPBOARD_COPY_TO_LOCAL_DEVICE", + "PRINTING_TO_LOCAL_DEVICE", + "SMART_CARD" + ] + }, + "UserSettingPermissionEnum":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "UserSettings":{ + "type":"list", + "member":{"shape":"UserSetting"}, + "min":1 + }, "UserStorage":{ "type":"structure", + "required":["Capacity"], "members":{ "Capacity":{ "shape":"NonEmptyString", @@ -5475,6 +6281,10 @@ "EnableMaintenanceMode":{ "shape":"BooleanObject", "documentation":"

Indicates whether maintenance mode is enabled for your WorkSpaces. For more information, see WorkSpace Maintenance.

" + }, + "InstanceIamRoleArn":{ + "shape":"ARN", + "documentation":"

Indicates the IAM role ARN of the instance.

" } }, "documentation":"

Describes the default properties that are used for creating WorkSpaces. For more information, see Update Directory Details for Your WorkSpaces.

" @@ -5553,10 +6363,52 @@ "CertificateBasedAuthProperties":{ "shape":"CertificateBasedAuthProperties", "documentation":"

The certificate-based authentication properties used to authenticate SAML 2.0 Identity Provider (IdP) user identities to Active Directory for WorkSpaces login.

" + }, + "WorkspaceDirectoryName":{ + "shape":"WorkspaceDirectoryName", + "documentation":"

The name fo the WorkSpace directory.

" + }, + "WorkspaceDirectoryDescription":{ + "shape":"WorkspaceDirectoryDescription", + "documentation":"

The description of the WorkSpace directory

" + }, + "UserIdentityType":{ + "shape":"UserIdentityType", + "documentation":"

Indicates the identity type of the specifired user.

" + }, + "WorkspaceType":{ + "shape":"WorkspaceType", + "documentation":"

Indicates whether the directory's WorkSpace type is personal or pools.

" + }, + "ActiveDirectoryConfig":{ + "shape":"ActiveDirectoryConfig", + "documentation":"

Information about the Active Directory config.

" + }, + "StreamingProperties":{ + "shape":"StreamingProperties", + "documentation":"

The streaming properties to configure.

" + }, + "ErrorMessage":{ + "shape":"Description", + "documentation":"

The error message returned.

" } }, "documentation":"

Describes a directory that is used with Amazon WorkSpaces.

" }, + "WorkspaceDirectoryDescription":{ + "type":"string", + "pattern":"^([a-zA-Z0-9_])[\\\\a-zA-Z0-9_@#%*+=:?./!\\s-]{1,255}$" + }, + "WorkspaceDirectoryName":{ + "type":"string", + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9_.\\s-]{1,64}$" + }, + "WorkspaceDirectoryNameList":{ + "type":"list", + "member":{"shape":"WorkspaceDirectoryName"}, + "max":25, + "min":1 + }, "WorkspaceDirectoryState":{ "type":"string", "enum":[ @@ -5571,7 +6423,8 @@ "type":"string", "enum":[ "SIMPLE_AD", - "AD_CONNECTOR" + "AD_CONNECTOR", + "CUSTOMER_MANAGED" ] }, "WorkspaceErrorCode":{"type":"string"}, @@ -5695,6 +6548,7 @@ "BYOL_GRAPHICSPRO", "BYOL_GRAPHICS_G4DN", "BYOL_REGULAR_WSP", + "BYOL_GRAPHICS_G4DN_WSP", "BYOL_REGULAR_BYOP", "BYOL_GRAPHICS_G4DN_BYOP" ] @@ -5737,7 +6591,7 @@ "members":{ "RunningMode":{ "shape":"RunningMode", - "documentation":"

The running mode. For more information, see Manage the WorkSpace Running Mode.

The MANUAL value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.

" + "documentation":"

The running mode. For more information, see Manage the WorkSpace Running Mode.

The MANUAL value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.

Review your running mode to ensure you are using one that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing?

" }, "RunningModeAutoStopTimeoutInMinutes":{ "shape":"RunningModeAutoStopTimeoutInMinutes", @@ -5879,6 +6733,13 @@ "ERROR" ] }, + "WorkspaceType":{ + "type":"string", + "enum":[ + "PERSONAL", + "POOLS" + ] + }, "WorkspacesDefaultRoleNotFoundException":{ "type":"structure", "members":{ @@ -5912,6 +6773,219 @@ "WorkspacesIpGroupsList":{ "type":"list", "member":{"shape":"WorkspacesIpGroup"} + }, + "WorkspacesPool":{ + "type":"structure", + "required":[ + "PoolId", + "PoolArn", + "CapacityStatus", + "PoolName", + "State", + "CreatedAt", + "BundleId", + "DirectoryId" + ], + "members":{ + "PoolId":{ + "shape":"WorkspacesPoolId", + "documentation":"

The identifier of a pool.

" + }, + "PoolArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) for the pool.

" + }, + "CapacityStatus":{ + "shape":"CapacityStatus", + "documentation":"

The capacity status for the pool

" + }, + "PoolName":{ + "shape":"WorkspacesPoolName", + "documentation":"

The name of the pool,

" + }, + "Description":{ + "shape":"UpdateDescription", + "documentation":"

The description of the pool.

" + }, + "State":{ + "shape":"WorkspacesPoolState", + "documentation":"

The current state of the pool.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The time the pool was created.

" + }, + "BundleId":{ + "shape":"BundleId", + "documentation":"

The identifier of the bundle used by the pool.

" + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The identifier of the directory used by the pool.

" + }, + "Errors":{ + "shape":"WorkspacesPoolErrors", + "documentation":"

The pool errors.

" + }, + "ApplicationSettings":{ + "shape":"ApplicationSettingsResponse", + "documentation":"

The persistent application settings for users of the pool.

" + }, + "TimeoutSettings":{ + "shape":"TimeoutSettings", + "documentation":"

The amount of time that a pool session remains active after users disconnect. If they try to reconnect to the pool session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new pool instance.

" + } + }, + "documentation":"

Describes a pool of WorkSpaces.

" + }, + "WorkspacesPoolError":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"WorkspacesPoolErrorCode", + "documentation":"

The error code.

" + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

The error message.

" + } + }, + "documentation":"

Describes a pool error.

" + }, + "WorkspacesPoolErrorCode":{ + "type":"string", + "enum":[ + "IAM_SERVICE_ROLE_IS_MISSING", + "IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION", + "IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION", + "IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION", + "NETWORK_INTERFACE_LIMIT_EXCEEDED", + "INTERNAL_SERVICE_ERROR", + "MACHINE_ROLE_IS_MISSING", + "STS_DISABLED_IN_REGION", + "SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES", + "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION", + "SUBNET_NOT_FOUND", + "IMAGE_NOT_FOUND", + "INVALID_SUBNET_CONFIGURATION", + "SECURITY_GROUPS_NOT_FOUND", + "IGW_NOT_ATTACHED", + "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION", + "WORKSPACES_POOL_STOPPED", + "WORKSPACES_POOL_INSTANCE_PROVISIONING_FAILURE", + "DOMAIN_JOIN_ERROR_FILE_NOT_FOUND", + "DOMAIN_JOIN_ERROR_ACCESS_DENIED", + "DOMAIN_JOIN_ERROR_LOGON_FAILURE", + "DOMAIN_JOIN_ERROR_INVALID_PARAMETER", + "DOMAIN_JOIN_ERROR_MORE_DATA", + "DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN", + "DOMAIN_JOIN_ERROR_NOT_SUPPORTED", + "DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME", + "DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED", + "DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED", + "DOMAIN_JOIN_NERR_PASSWORD_EXPIRED", + "DOMAIN_JOIN_INTERNAL_SERVICE_ERROR", + "DOMAIN_JOIN_ERROR_SECRET_ACTION_PERMISSION_IS_MISSING", + "DOMAIN_JOIN_ERROR_SECRET_DECRYPTION_FAILURE", + "DOMAIN_JOIN_ERROR_SECRET_STATE_INVALID", + "DOMAIN_JOIN_ERROR_SECRET_NOT_FOUND", + "DOMAIN_JOIN_ERROR_SECRET_VALUE_KEY_NOT_FOUND", + "DOMAIN_JOIN_ERROR_SECRET_INVALID", + "BUNDLE_NOT_FOUND", + "DIRECTORY_NOT_FOUND", + "INSUFFICIENT_PERMISSIONS_ERROR", + "DEFAULT_OU_IS_MISSING" + ] + }, + "WorkspacesPoolErrors":{ + "type":"list", + "member":{"shape":"WorkspacesPoolError"} + }, + "WorkspacesPoolId":{ + "type":"string", + "pattern":"^wspool-[0-9a-z]{9}$" + }, + "WorkspacesPoolIds":{ + "type":"list", + "member":{"shape":"WorkspacesPoolId"}, + "max":25, + "min":1 + }, + "WorkspacesPoolName":{ + "type":"string", + "pattern":"^[A-Za-z0-9][A-Za-z0-9_.-]{0,63}$" + }, + "WorkspacesPoolSession":{ + "type":"structure", + "required":[ + "SessionId", + "PoolId", + "UserId" + ], + "members":{ + "AuthenticationType":{ + "shape":"AuthenticationType", + "documentation":"

The authentication method. The user is authenticated using a WorkSpaces Pools URL (API) or SAML 2.0 federation (SAML).

" + }, + "ConnectionState":{ + "shape":"SessionConnectionState", + "documentation":"

Specifies whether a user is connected to the pool session.

" + }, + "SessionId":{ + "shape":"AmazonUuid", + "documentation":"

The identifier of the session.

" + }, + "InstanceId":{ + "shape":"SessionInstanceId", + "documentation":"

The identifier for the instance hosting the session.

" + }, + "PoolId":{ + "shape":"WorkspacesPoolId", + "documentation":"

The identifier of the pool.

" + }, + "ExpirationTime":{ + "shape":"Timestamp", + "documentation":"

The time that the pool session ended.

" + }, + "NetworkAccessConfiguration":{ + "shape":"NetworkAccessConfiguration", + "documentation":"

Describes the network details of the pool.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The time that the pool sission started.

" + }, + "UserId":{ + "shape":"WorkspacesPoolUserId", + "documentation":"

The identifier of the user.

" + } + }, + "documentation":"

Describes a pool session.

" + }, + "WorkspacesPoolSessions":{ + "type":"list", + "member":{"shape":"WorkspacesPoolSession"} + }, + "WorkspacesPoolState":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "RUNNING", + "STARTING", + "STOPPED", + "STOPPING", + "UPDATING" + ] + }, + "WorkspacesPoolUserId":{ + "type":"string", + "max":128, + "min":2 + }, + "WorkspacesPools":{ + "type":"list", + "member":{"shape":"WorkspacesPool"} } }, "documentation":"Amazon WorkSpaces Service

Amazon WorkSpaces enables you to provision virtual, cloud-based Microsoft Windows or Amazon Linux desktops for your users, known as WorkSpaces. WorkSpaces eliminates the need to procure and deploy hardware or install complex software. You can quickly add or remove users as your needs change. Users can access their virtual desktops from multiple devices or web browsers.

This API Reference provides detailed information about the actions, data types, parameters, and errors of the WorkSpaces service. For more information about the supported Amazon Web Services Regions, endpoints, and service quotas of the Amazon WorkSpaces service, see WorkSpaces endpoints and quotas in the Amazon Web Services General Reference.

You can also manage your WorkSpaces resources using the WorkSpaces console, Command Line Interface (CLI), and SDKs. For more information about administering WorkSpaces, see the Amazon WorkSpaces Administration Guide. For more information about using the Amazon WorkSpaces client application or web browser to access provisioned WorkSpaces, see the Amazon WorkSpaces User Guide. For more information about using the CLI to manage your WorkSpaces resources, see the WorkSpaces section of the CLI Reference.

" diff --git a/botocore/discovery.py b/botocore/discovery.py index 9c68001dea..95b51b81ba 100644 --- a/botocore/discovery.py +++ b/botocore/discovery.py @@ -185,8 +185,7 @@ def describe_endpoint(self, **kwargs): if not self._always_discover and not discovery_required: # Discovery set to only run on required operations logger.debug( - 'Optional discovery disabled. Skipping discovery for Operation: %s' - % operation + f'Optional discovery disabled. Skipping discovery for Operation: {operation}' ) return None @@ -228,12 +227,12 @@ def __init__(self, manager): def register(self, events, service_id): events.register( - 'before-parameter-build.%s' % service_id, self.gather_identifiers + f'before-parameter-build.{service_id}', self.gather_identifiers ) events.register_first( - 'request-created.%s' % service_id, self.discover_endpoint + f'request-created.{service_id}', self.discover_endpoint ) - events.register('needs-retry.%s' % service_id, self.handle_retries) + events.register(f'needs-retry.{service_id}', self.handle_retries) def gather_identifiers(self, params, model, context, **kwargs): endpoint_discovery = model.endpoint_discovery diff --git a/botocore/docs/bcdoc/docstringparser.py b/botocore/docs/bcdoc/docstringparser.py index 16e74e7d20..ebe16ba5e5 100644 --- a/botocore/docs/bcdoc/docstringparser.py +++ b/botocore/docs/bcdoc/docstringparser.py @@ -83,9 +83,9 @@ def add_tag(self, tag, attrs=None, is_start=True): def _doc_has_handler(self, tag, is_start): if is_start: - handler_name = 'start_%s' % tag + handler_name = f'start_{tag}' else: - handler_name = 'end_%s' % tag + handler_name = f'end_{tag}' return hasattr(self.doc.style, handler_name) @@ -226,12 +226,12 @@ def collapse_whitespace(self): child.collapse_whitespace() def _write_start(self, doc): - handler_name = 'start_%s' % self.tag + handler_name = f'start_{self.tag}' if hasattr(doc.style, handler_name): getattr(doc.style, handler_name)(self.attrs) def _write_end(self, doc, next_child): - handler_name = 'end_%s' % self.tag + handler_name = f'end_{self.tag}' if hasattr(doc.style, handler_name): if handler_name == 'end_a': # We use lookahead to determine if a space is needed after a link node @@ -248,7 +248,7 @@ class DataNode(Node): def __init__(self, data, parent=None): super().__init__(parent) if not isinstance(data, str): - raise ValueError("Expecting string type, %s given." % type(data)) + raise ValueError(f"Expecting string type, {type(data)} given.") self._leading_whitespace = '' self._trailing_whitespace = '' self._stripped_data = '' diff --git a/botocore/docs/bcdoc/style.py b/botocore/docs/bcdoc/style.py index f2a165a932..205d238d7a 100644 --- a/botocore/docs/bcdoc/style.py +++ b/botocore/docs/bcdoc/style.py @@ -34,7 +34,7 @@ def indentation(self, value): self._indent = value def new_paragraph(self): - return '\n%s' % self.spaces() + return f'\n{self.spaces()}' def indent(self): self._indent += 1 @@ -83,10 +83,10 @@ def __init__(self, doc, indent_width=2): self.list_depth = 0 def new_paragraph(self): - self.doc.write('\n\n%s' % self.spaces()) + self.doc.write(f'\n\n{self.spaces()}') def new_line(self): - self.doc.write('\n%s' % self.spaces()) + self.doc.write(f'\n{self.spaces()}') def _start_inline(self, markup): # Insert space between any directly adjacent bold and italic inlines to @@ -165,11 +165,11 @@ def italics(self, s): def start_p(self, attrs=None): if self.do_p: - self.doc.write('\n\n%s' % self.spaces()) + self.doc.write(f'\n\n{self.spaces()}') def end_p(self): if self.do_p: - self.doc.write('\n\n%s' % self.spaces()) + self.doc.write(f'\n\n{self.spaces()}') def start_code(self, attrs=None): self.doc.do_translation = True @@ -268,14 +268,14 @@ def end_a(self, next_child=None): if ':' in last_write: last_write = last_write.replace(':', r'\:') self.doc.push_write(last_write) - self.doc.push_write(' <%s>`__' % self.a_href) + self.doc.push_write(f' <{self.a_href}>`__') elif last_write == '`': # Look at start_a(). It will do a self.doc.write('`') # which is the start of the link title. If that is the # case then there was no link text. We should just # use an inline link. The syntax of this is # ``_ - self.doc.push_write('`<%s>`__' % self.a_href) + self.doc.push_write(f'`<{self.a_href}>`__') else: self.doc.push_write(self.a_href) self.doc.hrefs[self.a_href] = self.a_href @@ -375,9 +375,9 @@ def tocitem(self, item, file_name=None): self.li(item) else: if file_name: - self.doc.writeln(' %s' % file_name) + self.doc.writeln(f' {file_name}') else: - self.doc.writeln(' %s' % item) + self.doc.writeln(f' {item}') def hidden_toctree(self): if self.doc.target == 'html': @@ -394,11 +394,11 @@ def table_of_contents(self, title=None, depth=None): if title is not None: self.doc.writeln(title) if depth is not None: - self.doc.writeln(' :depth: %s' % depth) + self.doc.writeln(f' :depth: {depth}') def start_sphinx_py_class(self, class_name): self.new_paragraph() - self.doc.write('.. py:class:: %s' % class_name) + self.doc.write(f'.. py:class:: {class_name}') self.indent() self.new_paragraph() @@ -408,9 +408,9 @@ def end_sphinx_py_class(self): def start_sphinx_py_method(self, method_name, parameters=None): self.new_paragraph() - content = '.. py:method:: %s' % method_name + content = f'.. py:method:: {method_name}' if parameters is not None: - content += '(%s)' % parameters + content += f'({parameters})' self.doc.write(content) self.indent() self.new_paragraph() @@ -421,7 +421,7 @@ def end_sphinx_py_method(self): def start_sphinx_py_attr(self, attr_name): self.new_paragraph() - self.doc.write('.. py:attribute:: %s' % attr_name) + self.doc.write(f'.. py:attribute:: {attr_name}') self.indent() self.new_paragraph() diff --git a/botocore/docs/client.py b/botocore/docs/client.py index bc9b2658c9..41e37426ec 100644 --- a/botocore/docs/client.py +++ b/botocore/docs/client.py @@ -120,9 +120,7 @@ def _add_client_creation_example(self, section): section.style.start_codeblock() section.style.new_line() section.write( - 'client = session.create_client(\'{service}\')'.format( - service=self._service_name - ) + f'client = session.create_client(\'{self._service_name}\')' ) section.style.end_codeblock() @@ -177,14 +175,14 @@ def _add_method_exceptions_list(self, section, operation_model): class_name = ( f'{self._client_class_name}.Client.exceptions.{error.name}' ) - error_section.style.li(':py:class:`%s`' % class_name) + error_section.style.li(f':py:class:`{class_name}`') def _add_model_driven_method(self, section, method_name): service_model = self._client.meta.service_model operation_name = self._client.meta.method_to_api_mapping[method_name] operation_model = service_model.operation_model(operation_name) - example_prefix = 'response = client.%s' % method_name + example_prefix = f'response = client.{method_name}' full_method_name = ( f"{section.context.get('qualifier', '')}{method_name}" ) @@ -344,7 +342,7 @@ def _add_exception_catch_example(self, section, shape): section.write('...') section.style.dedent() section.style.new_line() - section.write('except client.exceptions.%s as e:' % shape.name) + section.write(f'except client.exceptions.{shape.name} as e:') section.style.indent() section.style.new_line() section.write('print(e.response)') diff --git a/botocore/docs/example.py b/botocore/docs/example.py index 9f831bcde1..cb43db5509 100644 --- a/botocore/docs/example.py +++ b/botocore/docs/example.py @@ -68,7 +68,7 @@ def document_shape_type_string( ): if 'enum' in shape.metadata: for i, enum in enumerate(shape.metadata['enum']): - section.write('\'%s\'' % enum) + section.write(f'\'{enum}\'') if i < len(shape.metadata['enum']) - 1: section.write('|') else: @@ -107,7 +107,7 @@ def document_shape_type_structure( if exclude and param in exclude: continue param_section = section.add_new_section(param) - param_section.write('\'%s\': ' % param) + param_section.write(f'\'{param}\': ') param_shape = input_members[param] param_value_section = param_section.add_new_section( 'member-value', context={'shape': param_shape.name} diff --git a/botocore/docs/paginator.py b/botocore/docs/paginator.py index 1ac4dd4848..2c9b30034f 100644 --- a/botocore/docs/paginator.py +++ b/botocore/docs/paginator.py @@ -223,9 +223,7 @@ def document_paginate_method( paginate_description = ( 'Creates an iterator that will paginate through responses ' - 'from :py:meth:`{}.Client.{}`.'.format( - get_service_module_name(service_model), xform_name(paginator_name) - ) + f'from :py:meth:`{get_service_module_name(service_model)}.Client.{xform_name(paginator_name)}`.' ) document_model_driven_method( diff --git a/botocore/docs/params.py b/botocore/docs/params.py index cddaf12fc3..74747ec27e 100644 --- a/botocore/docs/params.py +++ b/botocore/docs/params.py @@ -163,7 +163,7 @@ def _add_member_documentation(self, section, shape, name=None, **kwargs): name_section = section.add_new_section('param-name') name_section.write('- ') if name is not None: - name_section.style.bold('%s' % name) + name_section.style.bold(f'{name}') name_section.write(' ') type_section = section.add_new_section('param-type') self._document_non_top_level_param_type(type_section, shape) @@ -186,7 +186,7 @@ def _add_member_documentation(self, section, shape, name=None, **kwargs): ' as follows' ) tagged_union_members_str = ', '.join( - ['``%s``' % key for key in shape.members.keys()] + [f'``{key}``' for key in shape.members.keys()] ) unknown_code_example = ( '\'SDK_UNKNOWN_MEMBER\': ' @@ -255,13 +255,13 @@ def _add_member_documentation( end_type_section = type_section.add_new_section('end-param-type') end_type_section.style.new_line() name_section = section.add_new_section('param-name') - name_section.write(':param %s: ' % name) + name_section.write(f':param {name}: ') else: name_section = section.add_new_section('param-name') name_section.write('- ') if name is not None: - name_section.style.bold('%s' % name) + name_section.style.bold(f'{name}') name_section.write(' ') type_section = section.add_new_section('param-type') self._document_non_top_level_param_type(type_section, shape) @@ -286,7 +286,7 @@ def _add_member_documentation( ' following top level keys can be set: %s. ' ) tagged_union_members_str = ', '.join( - ['``%s``' % key for key in shape.members.keys()] + [f'``{key}``' for key in shape.members.keys()] ) tagged_union_docs.write(note % (tagged_union_members_str)) documentation_section.include_doc_string(shape.documentation) diff --git a/botocore/docs/sharedexample.py b/botocore/docs/sharedexample.py index 58cdfa594c..29d3df5fc9 100644 --- a/botocore/docs/sharedexample.py +++ b/botocore/docs/sharedexample.py @@ -104,14 +104,14 @@ def _document_dict( dict_section = section.add_new_section('dict-value') self._start_nested_value(dict_section, '{') for key, val in value.items(): - path.append('.%s' % key) + path.append(f'.{key}') item_section = dict_section.add_new_section(key) item_section.style.new_line() item_comment = self._get_comment(path, comments) if item_comment: item_section.write(item_comment) item_section.style.new_line() - item_section.write("'%s': " % key) + item_section.write(f"'{key}': ") # Shape could be none if there is no output besides ResponseMetadata item_shape = None @@ -131,7 +131,7 @@ def _document_params(self, section, value, comments, path, shape): param_section = section.add_new_section('param-values') self._start_nested_value(param_section, '(') for key, val in value.items(): - path.append('.%s' % key) + path.append(f'.{key}') item_section = param_section.add_new_section(key) item_section.style.new_line() item_comment = self._get_comment(path, comments) @@ -156,7 +156,7 @@ def _document_list(self, section, value, comments, path, shape): for index, val in enumerate(value): item_section = list_section.add_new_section(index) item_section.style.new_line() - path.append('[%s]' % index) + path.append(f'[{index}]') item_comment = self._get_comment(path, comments) if item_comment: item_section.write(item_comment) @@ -173,14 +173,14 @@ def _document_str(self, section, value, path): section.write(f"'{safe_value}',") def _document_number(self, section, value, path): - section.write("%s," % str(value)) + section.write(f"{str(value)},") def _document_datetime(self, section, value, path): datetime_tuple = parse_timestamp(value).timetuple() datetime_str = str(datetime_tuple[0]) for i in range(1, len(datetime_tuple)): datetime_str += ", " + str(datetime_tuple[i]) - section.write("datetime(%s)," % datetime_str) + section.write(f"datetime({datetime_str}),") def _get_comment(self, path, comments): key = re.sub(r'^\.', '', ''.join(path)) diff --git a/botocore/docs/utils.py b/botocore/docs/utils.py index eb6cae145c..161e260229 100644 --- a/botocore/docs/utils.py +++ b/botocore/docs/utils.py @@ -214,8 +214,11 @@ def append_documentation(self, event_name, section, **kwargs): } # Combines all CONTROLS keys into a big or regular expression _ESCAPE_CONTROLS_RE = re.compile('|'.join(map(re.escape, _CONTROLS))) + + # Based on the match get the appropriate replacement from CONTROLS -_CONTROLS_MATCH_HANDLER = lambda match: _CONTROLS[match.group(0)] +def _CONTROLS_MATCH_HANDLER(match): + return _CONTROLS[match.group(0)] def escape_controls(value): diff --git a/botocore/docs/waiter.py b/botocore/docs/waiter.py index 1da048a163..2918602d2f 100644 --- a/botocore/docs/waiter.py +++ b/botocore/docs/waiter.py @@ -72,7 +72,7 @@ def _add_single_waiter(self, section, waiter_name): waiter_section.style.start_codeblock() waiter_section.style.new_line() waiter_section.write( - 'waiter = client.get_waiter(\'%s\')' % xform_name(waiter_name) + f'waiter = client.get_waiter(\'{xform_name(waiter_name)}\')' ) waiter_section.style.end_codeblock() @@ -135,7 +135,7 @@ def document_wait_method( type_name='integer', documentation=( '

The amount of time in seconds to wait between ' - 'attempts. Default: {}

'.format(waiter_model.delay) + f'attempts. Default: {waiter_model.delay}

' ), ) @@ -144,7 +144,7 @@ def document_wait_method( type_name='integer', documentation=( '

The maximum number of attempts to be made. ' - 'Default: {}

'.format(waiter_model.max_attempts) + f'Default: {waiter_model.max_attempts}

' ), ) @@ -161,14 +161,10 @@ def document_wait_method( ] wait_description = ( - 'Polls :py:meth:`{}.Client.{}` every {} ' + f'Polls :py:meth:`{get_service_module_name(service_model)}.Client.' + f'{xform_name(waiter_model.operation)}` every {waiter_model.delay} ' 'seconds until a successful state is reached. An error is ' - 'raised after {} failed checks.'.format( - get_service_module_name(service_model), - xform_name(waiter_model.operation), - waiter_model.delay, - waiter_model.max_attempts, - ) + f'raised after {waiter_model.max_attempts} failed checks.' ) document_model_driven_method( diff --git a/botocore/endpoint.py b/botocore/endpoint.py index adc622c25a..59f3d86c8e 100644 --- a/botocore/endpoint.py +++ b/botocore/endpoint.py @@ -128,9 +128,7 @@ def create_request(self, params, operation_model=None): ] ) service_id = operation_model.service_model.service_id.hyphenize() - event_name = 'request-created.{service_id}.{op_name}'.format( - service_id=service_id, op_name=operation_model.name - ) + event_name = f'request-created.{service_id}.{operation_model.name}' self._event_emitter.emit( event_name, request=request, @@ -224,9 +222,9 @@ def _send_request(self, request_dict, operation_model): ): # We want to share num retries, not num attempts. total_retries = attempts - 1 - success_response[1]['ResponseMetadata'][ - 'RetryAttempts' - ] = total_retries + success_response[1]['ResponseMetadata']['RetryAttempts'] = ( + total_retries + ) if exception is not None: raise exception else: @@ -298,9 +296,9 @@ def _do_get_response(self, request, operation_model, context): ) http_response_record_dict = response_dict.copy() - http_response_record_dict[ - 'streaming' - ] = operation_model.has_streaming_output + http_response_record_dict['streaming'] = ( + operation_model.has_streaming_output + ) history_recorder.record('HTTP_RESPONSE', http_response_record_dict) protocol = operation_model.metadata['protocol'] @@ -399,7 +397,7 @@ def create_endpoint( if not is_valid_endpoint_url( endpoint_url ) and not is_valid_ipv6_endpoint_url(endpoint_url): - raise ValueError("Invalid endpoint: %s" % endpoint_url) + raise ValueError(f"Invalid endpoint: {endpoint_url}") if proxies is None: proxies = self._get_proxies(endpoint_url) diff --git a/botocore/endpoint_provider.py b/botocore/endpoint_provider.py index 1be5a25c8d..9439086c53 100644 --- a/botocore/endpoint_provider.py +++ b/botocore/endpoint_provider.py @@ -20,7 +20,6 @@ or you can look at the test files in /tests/unit/data/endpoints/valid-rules/ """ - import logging import re from enum import Enum diff --git a/botocore/errorfactory.py b/botocore/errorfactory.py index d9a1e9cd9c..6084e51da4 100644 --- a/botocore/errorfactory.py +++ b/botocore/errorfactory.py @@ -49,8 +49,8 @@ def __getattr__(self, name): for exception_cls in self._code_to_exception.values() ] raise AttributeError( - fr"{self} object has no attribute {name}. " - fr"Valid exceptions are: {', '.join(exception_cls_names)}" + rf"{self} object has no attribute {name}. " + rf"Valid exceptions are: {', '.join(exception_cls_names)}" ) diff --git a/botocore/eventstream.py b/botocore/eventstream.py index 11baf81a32..b7999a6e50 100644 --- a/botocore/eventstream.py +++ b/botocore/eventstream.py @@ -10,7 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -"""Binary Event Stream Decoding """ +"""Binary Event Stream Decoding""" from binascii import crc32 from struct import unpack @@ -33,7 +33,7 @@ class DuplicateHeader(ParserError): """Duplicate header found in the event.""" def __init__(self, header): - message = 'Duplicate header present: "%s"' % header + message = f'Duplicate header present: "{header}"' super().__init__(message) @@ -41,10 +41,7 @@ class InvalidHeadersLength(ParserError): """Headers length is longer than the maximum.""" def __init__(self, length): - message = 'Header length of {} exceeded the maximum of {}'.format( - length, - _MAX_HEADERS_LENGTH, - ) + message = f'Header length of {length} exceeded the maximum of {_MAX_HEADERS_LENGTH}' super().__init__(message) @@ -52,10 +49,7 @@ class InvalidPayloadLength(ParserError): """Payload length is longer than the maximum.""" def __init__(self, length): - message = 'Payload length of {} exceeded the maximum of {}'.format( - length, - _MAX_PAYLOAD_LENGTH, - ) + message = f'Payload length of {length} exceeded the maximum of {_MAX_PAYLOAD_LENGTH}' super().__init__(message) @@ -63,12 +57,7 @@ class ChecksumMismatch(ParserError): """Calculated checksum did not match the expected checksum.""" def __init__(self, expected, calculated): - message = ( - 'Checksum mismatch: expected 0x{:08x}, calculated 0x{:08x}'.format( - expected, - calculated, - ) - ) + message = f'Checksum mismatch: expected 0x{expected:08x}, calculated 0x{calculated:08x}' super().__init__(message) diff --git a/botocore/exceptions.py b/botocore/exceptions.py index 1c480abbf8..9fa0dfaa84 100644 --- a/botocore/exceptions.py +++ b/botocore/exceptions.py @@ -514,7 +514,7 @@ class UnknownClientMethodError(BotoCoreError): class UnsupportedSignatureVersionError(BotoCoreError): """Error when trying to use an unsupported Signature Version.""" - fmt = 'Signature version is not supported: {signature_version}' + fmt = 'Signature version(s) are not supported: {signature_version}' class ClientError(Exception): diff --git a/botocore/handlers.py b/botocore/handlers.py index a6e5f7ed8c..99eed3bfc5 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -203,6 +203,11 @@ def set_operation_specific_signer(context, signing_name, **kwargs): if auth_type == 'bearer': return 'bearer' + # If the operation needs an unsigned body, we set additional context + # allowing the signer to be aware of this. + if context.get('unsigned_payload') or auth_type == 'v4-unsigned-body': + context['payload_signing_enabled'] = False + if auth_type.startswith('v4'): if auth_type == 'v4-s3express': return auth_type @@ -210,7 +215,8 @@ def set_operation_specific_signer(context, signing_name, **kwargs): if auth_type == 'v4a': # If sigv4a is chosen, we must add additional signing config for # global signature. - signing = {'region': '*', 'signing_name': signing_name} + region = _resolve_sigv4a_region(context) + signing = {'region': region, 'signing_name': signing_name} if 'signing' in context: context['signing'].update(signing) else: @@ -219,11 +225,6 @@ def set_operation_specific_signer(context, signing_name, **kwargs): else: signature_version = 'v4' - # If the operation needs an unsigned body, we set additional context - # allowing the signer to be aware of this. - if auth_type == 'v4-unsigned-body': - context['payload_signing_enabled'] = False - # Signing names used by s3 and s3-control use customized signers "s3v4" # and "s3v4a". if signing_name in S3_SIGNING_NAMES: @@ -232,6 +233,15 @@ def set_operation_specific_signer(context, signing_name, **kwargs): return signature_version +def _resolve_sigv4a_region(context): + region = None + if 'client_config' in context: + region = context['client_config'].sigv4a_signing_region_set + if not region and context.get('signing', {}).get('region'): + region = context['signing']['region'] + return region or '*' + + def decode_console_output(parsed, **kwargs): if 'Output' in parsed: try: @@ -251,8 +261,7 @@ def generate_idempotent_uuid(params, model, **kwargs): if name not in params: params[name] = str(uuid.uuid4()) logger.debug( - "injecting idempotency token (%s) into param '%s'." - % (params[name], name) + f"injecting idempotency token ({params[name]}) into param '{name}'." ) @@ -454,7 +463,7 @@ def _quote_source_header_from_dict(source_dict): ) final = percent_encode(final, safe=SAFE_CHARS + '/') if version_id is not None: - final += '?versionId=%s' % version_id + final += f'?versionId={version_id}' return final @@ -632,8 +641,8 @@ def validate_ascii_metadata(params, **kwargs): except UnicodeEncodeError: error_msg = ( 'Non ascii characters found in S3 metadata ' - 'for key "%s", value: "%s". \nS3 metadata can only ' - 'contain ASCII characters. ' % (key, value) + f'for key "{key}", value: "{value}". \nS3 metadata can only ' + 'contain ASCII characters. ' ) raise ParamValidationError(report=error_msg) @@ -761,10 +770,10 @@ def check_openssl_supports_tls_version_1_2(**kwargs): openssl_version_tuple = ssl.OPENSSL_VERSION_INFO if openssl_version_tuple < (1, 0, 1): warnings.warn( - 'Currently installed openssl version: %s does not ' + f'Currently installed openssl version: {ssl.OPENSSL_VERSION} does not ' 'support TLS 1.2, which is required for use of iot-data. ' 'Please use python installed with openssl version 1.0.1 or ' - 'higher.' % (ssl.OPENSSL_VERSION), + 'higher.', UnsupportedTLSVersionWarning, ) # We cannot check the openssl version on python2.6, so we should just diff --git a/botocore/hooks.py b/botocore/hooks.py index 01248a1ea9..583cb39c3b 100644 --- a/botocore/hooks.py +++ b/botocore/hooks.py @@ -170,7 +170,7 @@ def unregister( def _verify_is_callable(self, func): if not callable(func): - raise ValueError("Event handler %s must be callable." % func) + raise ValueError(f"Event handler {func} must be callable.") def _verify_accept_kwargs(self, func): """Verifies a callable accepts kwargs @@ -314,20 +314,20 @@ def _register_section( if unique_id_uses_count: if not count: raise ValueError( - "Initial registration of unique id %s was " + f"Initial registration of unique id {unique_id} was " "specified to use a counter. Subsequent register " "calls to unique id must specify use of a counter " - "as well." % unique_id + "as well." ) else: self._unique_id_handlers[unique_id]['count'] += 1 else: if count: raise ValueError( - "Initial registration of unique id %s was " + f"Initial registration of unique id {unique_id} was " "specified to not use a counter. Subsequent " "register calls to unique id must specify not to " - "use a counter as well." % unique_id + "use a counter as well." ) return else: @@ -364,9 +364,9 @@ def unregister( if unique_id_uses_count: if count is None: raise ValueError( - "Initial registration of unique id %s was specified to " + f"Initial registration of unique id {unique_id} was specified to " "use a counter. Subsequent unregister calls to unique " - "id must specify use of a counter as well." % unique_id + "id must specify use of a counter as well." ) elif count == 1: handler = self._unique_id_handlers.pop(unique_id)[ @@ -378,10 +378,10 @@ def unregister( else: if count: raise ValueError( - "Initial registration of unique id %s was specified " + f"Initial registration of unique id {unique_id} was specified " "to not use a counter. Subsequent unregister calls " "to unique id must specify not to use a counter as " - "well." % unique_id + "well." ) handler = self._unique_id_handlers.pop(unique_id)['handler'] try: diff --git a/botocore/httpchecksum.py b/botocore/httpchecksum.py index 3e812c65e7..a97eb430d4 100644 --- a/botocore/httpchecksum.py +++ b/botocore/httpchecksum.py @@ -11,13 +11,14 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -""" The interfaces in this module are not intended for public use. +"""The interfaces in this module are not intended for public use. This module defines interfaces for applying checksums to HTTP requests within the context of botocore. This involves both resolving the checksum to be used based on client configuration and environment, as well as application of the checksum to the request. """ + import base64 import io import logging @@ -264,7 +265,7 @@ def resolve_request_checksum_algorithm( ) ) raise FlexibleChecksumError( - error_msg="Unsupported checksum algorithm: %s" % algorithm_name + error_msg=f"Unsupported checksum algorithm: {algorithm_name}" ) location_type = "header" @@ -278,7 +279,7 @@ def resolve_request_checksum_algorithm( algorithm = { "algorithm": algorithm_name, "in": location_type, - "name": "x-amz-checksum-%s" % algorithm_name, + "name": f"x-amz-checksum-{algorithm_name}", } if algorithm["name"] in request["headers"]: @@ -313,7 +314,7 @@ def apply_request_checksum(request): _apply_request_trailer_checksum(request) else: raise FlexibleChecksumError( - error_msg="Unknown checksum variant: %s" % algorithm["in"] + error_msg="Unknown checksum variant: {}".format(algorithm["in"]) ) @@ -400,7 +401,7 @@ def handle_checksum_body(http_response, response, context, operation_model): return for algorithm in algorithms: - header_name = "x-amz-checksum-%s" % algorithm + header_name = f"x-amz-checksum-{algorithm}" # If the header is not found, check the next algorithm if header_name not in headers: continue @@ -434,7 +435,7 @@ def handle_checksum_body(http_response, response, context, operation_model): def _handle_streaming_response(http_response, response, algorithm): checksum_cls = _CHECKSUM_CLS.get(algorithm) - header_name = "x-amz-checksum-%s" % algorithm + header_name = f"x-amz-checksum-{algorithm}" return StreamingChecksumBody( http_response.raw, response["headers"].get("content-length"), @@ -445,18 +446,15 @@ def _handle_streaming_response(http_response, response, algorithm): def _handle_bytes_response(http_response, response, algorithm): body = http_response.content - header_name = "x-amz-checksum-%s" % algorithm + header_name = f"x-amz-checksum-{algorithm}" checksum_cls = _CHECKSUM_CLS.get(algorithm) checksum = checksum_cls() checksum.update(body) expected = response["headers"][header_name] if checksum.digest() != base64.b64decode(expected): error_msg = ( - "Expected checksum %s did not match calculated checksum: %s" - % ( - expected, - checksum.b64digest(), - ) + f"Expected checksum {expected} did not match calculated " + f"checksum: {checksum.b64digest()}" ) raise FlexibleChecksumError(error_msg=error_msg) return body diff --git a/botocore/loaders.py b/botocore/loaders.py index 2baf4196fc..f5072a3e5f 100644 --- a/botocore/loaders.py +++ b/botocore/loaders.py @@ -101,6 +101,7 @@ for the sdk. For instance, additional operation parameters might be added here which don't represent the actual service api. """ + import logging import os diff --git a/botocore/model.py b/botocore/model.py index 8aa3d2dcc6..677266c8d2 100644 --- a/botocore/model.py +++ b/botocore/model.py @@ -11,9 +11,11 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Abstractions to interact with service models.""" + from collections import defaultdict from typing import NamedTuple, Union +from botocore.auth import resolve_auth_type from botocore.compat import OrderedDict from botocore.exceptions import ( MissingServiceIdError, @@ -622,10 +624,24 @@ def context_parameters(self): def request_compression(self): return self._operation_model.get('requestcompression') + @CachedProperty + def auth(self): + return self._operation_model.get('auth') + @CachedProperty def auth_type(self): return self._operation_model.get('authtype') + @CachedProperty + def resolved_auth_type(self): + if self.auth: + return resolve_auth_type(self.auth) + return self.auth_type + + @CachedProperty + def unsigned_payload(self): + return self._operation_model.get('unsignedPayload') + @CachedProperty def error_shapes(self): shapes = self._operation_model.get("errors", []) diff --git a/botocore/paginate.py b/botocore/paginate.py index 42e74d0819..228cdd3cd2 100644 --- a/botocore/paginate.py +++ b/botocore/paginate.py @@ -179,7 +179,7 @@ def get_paginator(self, operation_name): single_paginator_config = self._paginator_config[operation_name] except KeyError: raise ValueError( - "Paginator for operation does not exist: %s" % operation_name + f"Paginator for operation does not exist: {operation_name}" ) return single_paginator_config @@ -232,7 +232,7 @@ def resume_token(self): @resume_token.setter def resume_token(self, value): if not isinstance(value, dict): - raise ValueError("Bad starting token: %s" % value) + raise ValueError(f"Bad starting token: {value}") if 'boto_truncate_amount' in value: token_keys = sorted(self._input_token + ['boto_truncate_amount']) @@ -243,7 +243,7 @@ def resume_token(self, value): if token_keys == dict_keys: self._resume_token = self._token_encoder.encode(value) else: - raise ValueError("Bad starting token: %s" % value) + raise ValueError(f"Bad starting token: {value}") @property def non_aggregate_part(self): @@ -546,7 +546,7 @@ def _parse_starting_token_deprecated(self): """ log.debug( "Attempting to fall back to old starting token parser. For " - "token: %s" % self._starting_token + f"token: {self._starting_token}" ) if self._starting_token is None: return None @@ -577,7 +577,7 @@ def _convert_deprecated_starting_token(self, deprecated_token): len_deprecated_token = len(deprecated_token) len_input_token = len(self._input_token) if len_deprecated_token > len_input_token: - raise ValueError("Bad starting token: %s" % self._starting_token) + raise ValueError(f"Bad starting token: {self._starting_token}") elif len_deprecated_token < len_input_token: log.debug( "Old format starting token does not contain all input " diff --git a/botocore/parsers.py b/botocore/parsers.py index 3905757c85..0c7a34f218 100644 --- a/botocore/parsers.py +++ b/botocore/parsers.py @@ -114,6 +114,7 @@ } """ + import base64 import http.client import json @@ -315,7 +316,7 @@ def _do_generic_error_parse(self, response): } def _do_parse(self, response, shape): - raise NotImplementedError("%s._do_parse" % self.__class__.__name__) + raise NotImplementedError(f"{self.__class__.__name__}._do_parse") def _do_error_parse(self, response, shape): raise NotImplementedError(f"{self.__class__.__name__}._do_error_parse") @@ -398,7 +399,7 @@ def _handle_map(self, shape, node): elif tag_name == value_location_name: val_name = self._parse_shape(value_shape, single_pair) else: - raise ResponseParserError("Unknown tag: %s" % tag_name) + raise ResponseParserError(f"Unknown tag: {tag_name}") parsed[key_name] = val_name return parsed @@ -506,9 +507,8 @@ def _parse_xml_string_to_dom(self, xml_string): root = parser.close() except XMLParseError as e: raise ResponseParserError( - "Unable to parse response (%s), " - "invalid XML received. Further retries may succeed:\n%s" - % (e, xml_string) + f"Unable to parse response ({e}), " + f"invalid XML received. Further retries may succeed:\n{xml_string}" ) return root diff --git a/botocore/regions.py b/botocore/regions.py index 0fe8f0ee0e..cfa3bde115 100644 --- a/botocore/regions.py +++ b/botocore/regions.py @@ -16,6 +16,7 @@ given service and region and resolving the available endpoints for a service in a specific AWS partition. """ + import copy import logging import re @@ -261,7 +262,7 @@ def _endpoint_for_partition( ): error_msg = ( "Dualstack endpoints are currently not supported" - " for %s partition" % partition_name + f" for {partition_name} partition" ) raise EndpointVariantError(tags=['dualstack'], error_msg=error_msg) @@ -357,8 +358,7 @@ def _resolve( if endpoint_data.get('deprecated'): LOG.warning( - 'Client is configured with the deprecated endpoint: %s' - % (endpoint_name) + f'Client is configured with the deprecated endpoint: {endpoint_name}' ) service_defaults = service_data.get('defaults', {}) @@ -496,7 +496,7 @@ def construct_endpoint( operation_model, call_args, request_context ) LOG.debug( - 'Calling endpoint provider with parameters: %s' % provider_params + f'Calling endpoint provider with parameters: {provider_params}' ) try: provider_result = self._provider.resolve_endpoint( @@ -510,7 +510,7 @@ def construct_endpoint( raise else: raise botocore_exception from ex - LOG.debug('Endpoint provider result: %s' % provider_result.url) + LOG.debug(f'Endpoint provider result: {provider_result.url}') # The endpoint provider does not support non-secure transport. if not self._use_ssl and provider_result.url.startswith('https://'): @@ -633,7 +633,7 @@ def _get_customized_builtins( customized_builtins = copy.copy(self._builtins) # Handlers are expected to modify the builtins dict in place. self._event_emitter.emit( - 'before-endpoint-resolution.%s' % service_id, + f'before-endpoint-resolution.{service_id}', builtins=customized_builtins, model=operation_model, params=call_args, @@ -722,7 +722,9 @@ def auth_schemes_to_signing_ctx(self, auth_schemes): signing_context['region'] = scheme['signingRegion'] elif 'signingRegionSet' in scheme: if len(scheme['signingRegionSet']) > 0: - signing_context['region'] = scheme['signingRegionSet'][0] + signing_context['region'] = ','.join( + scheme['signingRegionSet'] + ) if 'signingName' in scheme: signing_context.update(signing_name=scheme['signingName']) if 'disableDoubleEncoding' in scheme: diff --git a/botocore/retries/bucket.py b/botocore/retries/bucket.py index 1818e5d57b..09d33c77d0 100644 --- a/botocore/retries/bucket.py +++ b/botocore/retries/bucket.py @@ -1,4 +1,5 @@ """This module implements token buckets used for client side throttling.""" + import threading import time diff --git a/botocore/retries/quota.py b/botocore/retries/quota.py index c3e91ae367..f03942912a 100644 --- a/botocore/retries/quota.py +++ b/botocore/retries/quota.py @@ -1,7 +1,5 @@ -"""Retry quota implementation. +"""Retry quota implementation.""" - -""" import threading diff --git a/botocore/retries/special.py b/botocore/retries/special.py index 9ce18b1fa3..9b782601da 100644 --- a/botocore/retries/special.py +++ b/botocore/retries/special.py @@ -5,6 +5,7 @@ module. Ideally we should be able to remove this module. """ + import logging from binascii import crc32 diff --git a/botocore/retries/standard.py b/botocore/retries/standard.py index 00927d6769..8801530b00 100644 --- a/botocore/retries/standard.py +++ b/botocore/retries/standard.py @@ -23,6 +23,7 @@ based API used by botocore. """ + import logging import random @@ -57,9 +58,9 @@ def register_retry_handler(client, max_attempts=DEFAULT_MAX_ATTEMPTS): retry_quota=retry_quota, ) - unique_id = 'retry-config-%s' % service_event_name + unique_id = f'retry-config-{service_event_name}' client.meta.events.register( - 'needs-retry.%s' % service_event_name, + f'needs-retry.{service_event_name}', handler.needs_retry, unique_id=unique_id, ) @@ -263,7 +264,7 @@ def delay_amount(self, context): This class implements truncated binary exponential backoff with jitter:: - t_i = min(rand(0, 1) * 2 ** attempt, MAX_BACKOFF) + t_i = rand(0, 1) * min(2 ** attempt, MAX_BACKOFF) where ``i`` is the request attempt (0 based). @@ -271,8 +272,8 @@ def delay_amount(self, context): # The context.attempt_number is a 1-based value, but we have # to calculate the delay based on i based a 0-based value. We # want the first delay to just be ``rand(0, 1)``. - return min( - self._random() * (self._base ** (context.attempt_number - 1)), + return self._random() * min( + (self._base ** (context.attempt_number - 1)), self._max_backoff, ) diff --git a/botocore/retryhandler.py b/botocore/retryhandler.py index deef1bfe9e..c2eed1d9d3 100644 --- a/botocore/retryhandler.py +++ b/botocore/retryhandler.py @@ -288,9 +288,9 @@ def __call__( if attempt_number >= self._max_attempts: # explicitly set MaxAttemptsReached if response is not None and 'ResponseMetadata' in response[1]: - response[1]['ResponseMetadata'][ - 'MaxAttemptsReached' - ] = True + response[1]['ResponseMetadata']['MaxAttemptsReached'] = ( + True + ) logger.debug( "Reached the maximum number of retry attempts: %s", attempt_number, diff --git a/botocore/serialize.py b/botocore/serialize.py index 306441e060..82ed58d09a 100644 --- a/botocore/serialize.py +++ b/botocore/serialize.py @@ -37,6 +37,7 @@ with the exception of blob types. Those are assumed to be binary, and if a str/unicode type is passed in, it will be encoded as utf-8. """ + import base64 import calendar import datetime @@ -277,7 +278,7 @@ def _serialize_type_map(self, serialized, value, shape, prefix=''): if self._is_shape_flattened(shape): full_prefix = prefix else: - full_prefix = '%s.entry' % prefix + full_prefix = f'{prefix}.entry' template = full_prefix + '.{i}.{suffix}' key_shape = shape.key value_shape = shape.value @@ -356,7 +357,7 @@ def serialize_to_request(self, parameters, operation_model): ) serialized['headers'] = { 'X-Amz-Target': target, - 'Content-Type': 'application/x-amz-json-%s' % json_version, + 'Content-Type': f'application/x-amz-json-{json_version}', } body = self.MAP_TYPE() input_shape = operation_model.input_shape @@ -373,7 +374,7 @@ def serialize_to_request(self, parameters, operation_model): def _serialize(self, serialized, value, shape, key=None): method = getattr( self, - '_serialize_type_%s' % shape.type_name, + f'_serialize_type_{shape.type_name}', self._default_serialize, ) method(serialized, value, shape, key) @@ -713,7 +714,7 @@ def _serialize_body_params(self, params, shape): def _serialize(self, shape, params, xmlnode, name): method = getattr( self, - '_serialize_type_%s' % shape.type_name, + f'_serialize_type_{shape.type_name}', self._default_serialize, ) method(xmlnode, params, shape, name) @@ -725,7 +726,7 @@ def _serialize_type_structure(self, xmlnode, params, shape, name): namespace_metadata = shape.serialization['xmlNamespace'] attribute_name = 'xmlns' if namespace_metadata.get('prefix'): - attribute_name += ':%s' % namespace_metadata['prefix'] + attribute_name += f":{namespace_metadata['prefix']}" structure_node.attrib[attribute_name] = namespace_metadata['uri'] for key, value in params.items(): member_shape = shape.members[key] diff --git a/botocore/session.py b/botocore/session.py index 0739286ec6..93d020757a 100644 --- a/botocore/session.py +++ b/botocore/session.py @@ -557,11 +557,11 @@ def user_agent(self): f'{platform.system()}/{platform.release()}' ) if HAS_CRT: - base += ' awscrt/%s' % self._get_crt_version() + base += f' awscrt/{self._get_crt_version()}' if os.environ.get('AWS_EXECUTION_ENV') is not None: - base += ' exec-env/%s' % os.environ.get('AWS_EXECUTION_ENV') + base += ' exec-env/{}'.format(os.environ.get('AWS_EXECUTION_ENV')) if self.user_agent_extra: - base += ' %s' % self.user_agent_extra + base += f' {self.user_agent_extra}' return base @@ -615,7 +615,7 @@ def get_service_data(self, service_name, api_version=None): ) service_id = EVENT_ALIASES.get(service_name, service_name) self._events.emit( - 'service-data-loaded.%s' % service_id, + f'service-data-loaded.{service_id}', service_data=service_data, service_name=service_name, session=self, @@ -803,9 +803,9 @@ def get_component(self, name): except ValueError: if name in ['endpoint_resolver', 'exceptions_factory']: warnings.warn( - 'Fetching the %s component with the get_component() ' + f'Fetching the {name} component with the get_component() ' 'method is deprecated as the component has always been ' - 'considered an internal interface of botocore' % name, + 'considered an internal interface of botocore', DeprecationWarning, ) return self._internal_components.get_component(name) @@ -1153,7 +1153,7 @@ def get_component(self, name): try: return self._components[name] except KeyError: - raise ValueError("Unknown component: %s" % name) + raise ValueError(f"Unknown component: {name}") def register_component(self, name, component): self._components[name] = component diff --git a/botocore/signers.py b/botocore/signers.py index ef51805fe5..89319af10b 100644 --- a/botocore/signers.py +++ b/botocore/signers.py @@ -152,9 +152,7 @@ def sign( # Allow mutating request before signing self._event_emitter.emit( - 'before-sign.{}.{}'.format( - self._service_id.hyphenize(), operation_name - ), + f'before-sign.{self._service_id.hyphenize()}.{operation_name}', request=request, signing_name=signing_name, region_name=self._region_name, @@ -231,9 +229,7 @@ def _choose_signer(self, operation_name, signing_type, context): signature_version += suffix handler, response = self._event_emitter.emit_until_response( - 'choose-signer.{}.{}'.format( - self._service_id.hyphenize(), operation_name - ), + f'choose-signer.{self._service_id.hyphenize()}.{operation_name}', signing_name=signing_name, region_name=region_name, signature_version=signature_version, @@ -428,9 +424,9 @@ def generate_presigned_url(self, url, date_less_than=None, policy=None): if isinstance(policy, str): policy = policy.encode('utf8') if date_less_than is not None: - params = ['Expires=%s' % int(datetime2timestamp(date_less_than))] + params = [f'Expires={int(datetime2timestamp(date_less_than))}'] else: - params = ['Policy=%s' % self._url_b64encode(policy).decode('utf8')] + params = [f"Policy={self._url_b64encode(policy).decode('utf8')}"] signature = self.rsa_signer(policy) params.extend( [ diff --git a/botocore/stub.py b/botocore/stub.py index 137cfe4288..018fc08706 100644 --- a/botocore/stub.py +++ b/botocore/stub.py @@ -239,8 +239,8 @@ def add_response(self, method, service_response, expected_params=None): def _add_response(self, method, service_response, expected_params): if not hasattr(self.client, method): raise ValueError( - "Client %s does not have method: %s" - % (self.client.meta.service_model.service_name, method) + f"Client {self.client.meta.service_model.service_name} " + f"does not have method: {method}" ) # Create a successful http response @@ -383,16 +383,20 @@ def _assert_expected_params(self, model, params, context, **kwargs): if param not in params or expected_params[param] != params[param]: raise StubAssertionError( operation_name=model.name, - reason='Expected parameters:\n%s,\nbut received:\n%s' - % (pformat(expected_params), pformat(params)), + reason=( + f'Expected parameters:\n{pformat(expected_params)},\n' + f'but received:\n{pformat(params)}' + ), ) # Ensure there are no extra params hanging around if sorted(expected_params.keys()) != sorted(params.keys()): raise StubAssertionError( operation_name=model.name, - reason='Expected parameters:\n%s,\nbut received:\n%s' - % (pformat(expected_params), pformat(params)), + reason=( + f'Expected parameters:\n{pformat(expected_params)},\n' + f'but received:\n{pformat(params)}' + ), ) def _should_not_stub(self, context): diff --git a/botocore/useragent.py b/botocore/useragent.py index f837fc8699..a9a611910b 100644 --- a/botocore/useragent.py +++ b/botocore/useragent.py @@ -22,6 +22,7 @@ * The ``user_agent_extra`` field in the :py:class:`botocore.config.Config`. """ + import os import platform from copy import copy diff --git a/botocore/utils.py b/botocore/utils.py index e2c5c17a00..0efd7ef325 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -109,8 +109,6 @@ # id, we have to preserve compatibility. This maps the instances where either # is different than the transformed service id. EVENT_ALIASES = { - "a4b": "alexa-for-business", - "alexaforbusiness": "alexa-for-business", "api.mediatailor": "mediatailor", "api.pricing": "pricing", "api.sagemaker": "sagemaker", @@ -433,7 +431,7 @@ def _select_base_url(self, base_url, config): else: chosen_base_url = METADATA_BASE_URL - logger.debug("IMDS ENDPOINT: %s" % chosen_base_url) + logger.debug(f"IMDS ENDPOINT: {chosen_base_url}") if not is_valid_uri(chosen_base_url): raise InvalidIMDSEndpointError(endpoint=chosen_base_url) @@ -998,7 +996,7 @@ def parse_timestamp(value): exc_info=e, ) raise RuntimeError( - 'Unable to calculate correct timezone offset for "%s"' % value + f'Unable to calculate correct timezone offset for "{value}"' ) @@ -1795,17 +1793,16 @@ def redirect_from_error(self, request_dict, response, operation, **kwargs): if new_region is None: logger.debug( - "S3 client configured for region %s but the bucket %s is not " - "in that region and the proper region could not be " - "automatically determined." % (client_region, bucket) + f"S3 client configured for region {client_region} but the " + f"bucket {bucket} is not in that region and the proper region " + "could not be automatically determined." ) return logger.debug( - "S3 client configured for region %s but the bucket %s is in region" - " %s; Please configure the proper region to avoid multiple " - "unnecessary redirects and signing attempts." - % (client_region, bucket, new_region) + f"S3 client configured for region {client_region} but the bucket {bucket} " + f"is in region {new_region}; Please configure the proper region to " + f"avoid multiple unnecessary redirects and signing attempts." ) # Adding the new region to _cache will make construct_endpoint() to # use the new region as value for the AWS::Region builtin parameter. @@ -1994,17 +1991,16 @@ def redirect_from_error(self, request_dict, response, operation, **kwargs): if new_region is None: logger.debug( - "S3 client configured for region %s but the bucket %s is not " + f"S3 client configured for region {client_region} but the bucket {bucket} is not " "in that region and the proper region could not be " - "automatically determined." % (client_region, bucket) + "automatically determined." ) return logger.debug( - "S3 client configured for region %s but the bucket %s is in region" - " %s; Please configure the proper region to avoid multiple " + f"S3 client configured for region {client_region} but the bucket {bucket} is in region" + f" {new_region}; Please configure the proper region to avoid multiple " "unnecessary redirects and signing attempts." - % (client_region, bucket, new_region) ) endpoint = self._endpoint_resolver.resolve('s3', new_region) endpoint = endpoint['endpoint_url'] @@ -2089,8 +2085,8 @@ def parse_arn(self, arn): arn_parts = arn.split(':', 5) if len(arn_parts) < 6: raise InvalidArnException( - 'Provided ARN: %s must be of the format: ' - 'arn:partition:service:region:account:resource' % arn + f'Provided ARN: {arn} must be of the format: ' + 'arn:partition:service:region:account:resource' ) return { 'partition': arn_parts[1], @@ -2273,8 +2269,8 @@ def set_endpoint(self, request, **kwargs): raise UnsupportedS3ConfigurationError( msg=( 'Client is configured to use the FIPS psuedo region ' - 'for "%s", but S3 Accelerate does not have any FIPS ' - 'compatible endpoints.' % (self._region) + f'for "{self._region}", but S3 Accelerate does not have any FIPS ' + 'compatible endpoints.' ) ) switch_host_s3_accelerate(request=request, **kwargs) @@ -2294,9 +2290,8 @@ def _validate_fips_supported(self, request): if 'outpost_name' in request.context['s3_accesspoint']: raise UnsupportedS3AccesspointConfigurationError( msg=( - 'Client is configured to use the FIPS psuedo-region "%s", ' + f'Client is configured to use the FIPS psuedo-region "{self._region}", ' 'but outpost ARNs do not support FIPS endpoints.' - % (self._region) ) ) # Transforming psuedo region to actual region @@ -2308,11 +2303,10 @@ def _validate_fips_supported(self, request): raise UnsupportedS3AccesspointConfigurationError( msg=( 'Client is configured to use the FIPS psuedo-region ' - 'for "%s", but the access-point ARN provided is for ' - 'the "%s" region. For clients using a FIPS ' + f'for "{self._region}", but the access-point ARN provided is for ' + f'the "{accesspoint_region}" region. For clients using a FIPS ' 'psuedo-region calls to access-point ARNs in another ' 'region are not allowed.' - % (self._region, accesspoint_region) ) ) @@ -2323,8 +2317,8 @@ def _validate_global_regions(self, request): raise UnsupportedS3AccesspointConfigurationError( msg=( 'Client is configured to use the global psuedo-region ' - '"%s". When providing access-point ARNs a regional ' - 'endpoint must be specified.' % self._region + f'"{self._region}". When providing access-point ARNs a regional ' + 'endpoint must be specified.' ) ) @@ -2340,10 +2334,9 @@ def _validate_accesspoint_supported(self, request): if request_partition != self._partition: raise UnsupportedS3AccesspointConfigurationError( msg=( - 'Client is configured for "%s" partition, but access-point' - ' ARN provided is for "%s" partition. The client and ' + f'Client is configured for "{self._partition}" partition, but access-point' + f' ARN provided is for "{request_partition}" partition. The client and ' ' access-point partition must be the same.' - % (self._partition, request_partition) ) ) s3_service = request.context['s3_accesspoint'].get('service') @@ -2488,7 +2481,7 @@ def _get_accesspoint_netloc(self, request_context, region_name): def _inject_fips_if_needed(self, component, request_context): if self._use_fips_endpoint: - return '%s-fips' % component + return f'{component}-fips' return component def _get_accesspoint_path(self, original_path, request_context): @@ -2665,18 +2658,17 @@ def _validate_endpoint_from_arn_details_supported(self, request): if arn_region != self._region: error_msg = ( 'The use_arn_region configuration is disabled but ' - 'received arn for "%s" when the client is configured ' - 'to use "%s"' - ) % (arn_region, self._region) + f'received arn for "{arn_region}" when the client is configured ' + f'to use "{self._region}"' + ) raise UnsupportedS3ControlConfigurationError(msg=error_msg) request_partion = request.context['arn_details']['partition'] if request_partion != self._partition: raise UnsupportedS3ControlConfigurationError( msg=( - 'Client is configured for "%s" partition, but arn ' - 'provided is for "%s" partition. The client and ' + f'Client is configured for "{self._partition}" partition, but arn ' + f'provided is for "{request_partion}" partition. The client and ' 'arn partition must be the same.' - % (self._partition, request_partion) ) ) if self._s3_config.get('use_accelerate_endpoint'): @@ -2876,8 +2868,8 @@ def _override_account_id_param(self, params, arn_details): if 'AccountId' in params and params['AccountId'] != account_id: error_msg = ( 'Account ID in arn does not match the AccountId parameter ' - 'provided: "%s"' - ) % params['AccountId'] + 'provided: "{}"' + ).format(params['AccountId']) raise UnsupportedS3ControlArnError( arn=arn_details['original'], msg=error_msg, @@ -3570,7 +3562,6 @@ def is_s3express_bucket(bucket): # values are the transformed service IDs (lower case and hyphenated). CLIENT_NAME_TO_HYPHENIZED_SERVICE_ID_OVERRIDES = { # Actual service name we use -> Allowed computed service name. - 'alexaforbusiness': 'alexa-for-business', 'apigateway': 'api-gateway', 'application-autoscaling': 'application-auto-scaling', 'appmesh': 'app-mesh', diff --git a/botocore/validate.py b/botocore/validate.py index dfcca3daa8..82aabd66e4 100644 --- a/botocore/validate.py +++ b/botocore/validate.py @@ -210,7 +210,7 @@ def _validate(self, params, shape, errors, name): if special_validator: special_validator(params, shape, errors, name) else: - getattr(self, '_validate_%s' % shape.type_name)( + getattr(self, f'_validate_{shape.type_name}')( params, shape, errors, name ) diff --git a/botocore/waiter.py b/botocore/waiter.py index 2362eebeda..ebac2c9f82 100644 --- a/botocore/waiter.py +++ b/botocore/waiter.py @@ -64,8 +64,7 @@ def wait(self, **kwargs): # Rename the waiter class based on the type of waiter. waiter_class_name = str( - '%s.Waiter.%s' - % (get_service_module_name(client.meta.service_model), waiter_name) + f'{get_service_module_name(client.meta.service_model)}.Waiter.{waiter_name}' ) # Create the new waiter class @@ -127,8 +126,8 @@ def _verify_supported_version(self, version): raise WaiterConfigError( error_msg=( "Unsupported waiter version, supported version " - "must be: %s, but version of waiter config " - "is: %s" % (self.SUPPORTED_VERSION, version) + f"must be: {self.SUPPORTED_VERSION}, but version " + f"of waiter config is: {version}" ) ) @@ -136,7 +135,7 @@ def get_waiter(self, waiter_name): try: single_waiter_config = self._waiter_config[waiter_name] except KeyError: - raise ValueError("Waiter does not exist: %s" % waiter_name) + raise ValueError(f"Waiter does not exist: {waiter_name}") return SingleWaiterConfig(single_waiter_config) @@ -178,28 +177,23 @@ def __init__(self, config): @property def explanation(self): if self.matcher == 'path': - return 'For expression "{}" we matched expected path: "{}"'.format( - self.argument, - self.expected, - ) + return f'For expression "{self.argument}" we matched expected path: "{self.expected}"' elif self.matcher == 'pathAll': return ( - 'For expression "%s" all members matched excepted path: "%s"' - % (self.argument, self.expected) + f'For expression "{self.argument}" all members matched ' + f'expected path: "{self.expected}"' ) elif self.matcher == 'pathAny': return ( - 'For expression "%s" we matched expected path: "%s" at least once' - % (self.argument, self.expected) + f'For expression "{self.argument}" we matched expected ' + f'path: "{self.expected}" at least once' ) elif self.matcher == 'status': - return 'Matched expected HTTP status code: %s' % self.expected + return f'Matched expected HTTP status code: {self.expected}' elif self.matcher == 'error': - return 'Matched expected service error code: %s' % self.expected + return f'Matched expected service error code: {self.expected}' else: - return ( - 'No explanation for unknown waiter type: "%s"' % self.matcher - ) + return f'No explanation for unknown waiter type: "{self.matcher}"' def _create_matcher_func(self): # An acceptor function is a callable that takes a single value. The @@ -222,7 +216,7 @@ def _create_matcher_func(self): return self._create_error_matcher() else: raise WaiterConfigError( - error_msg="Unknown acceptor: %s" % self.matcher + error_msg=f"Unknown acceptor: {self.matcher}" ) def _create_path_matcher(self): @@ -302,7 +296,15 @@ def acceptor_matches(response): # response. So response is still a dictionary, and in the case # of an error response will contain the "Error" and # "ResponseMetadata" key. - return response.get("Error", {}).get("Code", "") == expected + # When expected is True, accept any error code. + # When expected is False, check if any errors were encountered. + # Otherwise, check for a specific AWS error code. + if expected is True: + return "Error" in response and "Code" in response["Error"] + elif expected is False: + return "Error" not in response + else: + return response.get("Error", {}).get("Code", "") == expected return acceptor_matches @@ -356,8 +358,7 @@ def wait(self, **kwargs): # can just handle here by raising an exception. raise WaiterError( name=self.name, - reason='An error occurred (%s): %s' - % ( + reason='An error occurred ({}): {}'.format( response['Error'].get('Code', 'Unknown'), response['Error'].get('Message', 'Unknown'), ), @@ -369,9 +370,7 @@ def wait(self, **kwargs): ) return if current_state == 'failure': - reason = 'Waiter encountered a terminal failure state: %s' % ( - acceptor.explanation - ) + reason = f'Waiter encountered a terminal failure state: {acceptor.explanation}' raise WaiterError( name=self.name, reason=reason, @@ -382,8 +381,8 @@ def wait(self, **kwargs): reason = 'Max attempts exceeded' else: reason = ( - 'Max attempts exceeded. Previously accepted state: %s' - % (acceptor.explanation) + f'Max attempts exceeded. Previously accepted state: ' + f'{acceptor.explanation}' ) raise WaiterError( name=self.name, diff --git a/docs/source/conf.py b/docs/source/conf.py index 09569e74fb..90d7befae9 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -57,9 +57,9 @@ # built documents. # # The short X.Y version. -version = '1.34.1' +version = '1.35' # The full version, including alpha/beta/rc tags. -release = '1.34.105' +release = '1.35.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pyproject.toml b/pyproject.toml index 5b2146b409..456a848967 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,6 +10,63 @@ line_length = 79 honor_noqa = true src_paths = ["botocore", "tests"] -[tool.black] +[tool.ruff] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".git-rewrite", + ".hg", + ".ipynb_checkpoints", + ".mypy_cache", + ".nox", + ".pants.d", + ".pyenv", + ".pytest_cache", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + ".vscode", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "site-packages", + "venv", +] + +# Format same as Black. line-length = 79 -skip_string_normalization = true +indent-width = 4 + +target-version = "py38" + +[tool.ruff.lint] +# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. +# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or +# McCabe complexity (`C901`) by default. +select = ["E4", "E7", "E9", "F", "UP"] +ignore = [] + +# Allow fix for all enabled rules (when `--fix`) is provided. +fixable = ["ALL"] +unfixable = [] + +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" + +[tool.ruff.format] +# Like Black, use double quotes for strings, spaces for indents +# and trailing commas. +quote-style = "preserve" +indent-style = "space" +skip-magic-trailing-comma = false +line-ending = "auto" + +docstring-code-format = false +docstring-code-line-length = "dynamic" diff --git a/requirements-dev-lock.txt b/requirements-dev-lock.txt index f8d7fad4b2..1f69686a45 100644 --- a/requirements-dev-lock.txt +++ b/requirements-dev-lock.txt @@ -19,9 +19,9 @@ behave==1.2.5 \ --hash=sha256:89238a5e4b11ff607e8ebc6b4b1fb1a0b1f3d794fba80e1fb4b6b3652979c927 \ --hash=sha256:8c182feece4a519c5ffc11e1ab3682d25d5a390dd5f4573bb1296443beb9d7c7 # via -r requirements-dev.txt -colorama==0.4.5 \ - --hash=sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da \ - --hash=sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4 +colorama==0.4.6 \ + --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ + --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 # via -r requirements-dev.txt coverage[toml]==7.2.7 \ --hash=sha256:06a9a2be0b5b576c3f18f1a241f0473575c4a26021b52b2a85263a00f034d51f \ @@ -87,13 +87,13 @@ coverage[toml]==7.2.7 \ # via # -r requirements-dev.txt # pytest-cov -exceptiongroup==1.1.3 \ - --hash=sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9 \ - --hash=sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3 +exceptiongroup==1.2.2 \ + --hash=sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b \ + --hash=sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc # via pytest -execnet==1.9.0 \ - --hash=sha256:8f694f3ba9cc92cab508b152dcfe322153975c29bda272e2fd7f3f00f36e47c5 \ - --hash=sha256:a295f7cc774947aac58dde7fdc85f4aa00c42adf5d8f5468fc630c1acf30a142 +execnet==2.1.1 \ + --hash=sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc \ + --hash=sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3 # via pytest-xdist importlib-resources==6.4.0 \ --hash=sha256:50d10f043df931902d4194ea07ec57960f66a80449ff867bfe782b4c486ba78c \ @@ -101,9 +101,9 @@ importlib-resources==6.4.0 \ # via # jsonschema # jsonschema-specifications -iniconfig==1.1.1 \ - --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \ - --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 +iniconfig==2.0.0 \ + --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \ + --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 # via pytest jsonschema==4.21.1 \ --hash=sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f \ @@ -113,31 +113,28 @@ jsonschema-specifications==2023.12.1 \ --hash=sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc \ --hash=sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c # via jsonschema -packaging==21.3 \ - --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ - --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 +packaging==24.1 \ + --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ + --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 # via pytest -parse==1.19.0 \ - --hash=sha256:9ff82852bcb65d139813e2a5197627a94966245c897796760a3a2a8eb66f020b +parse==1.20.2 \ + --hash=sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558 \ + --hash=sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce # via # behave # parse-type -parse-type==0.6.0 \ - --hash=sha256:20b43c660e48ed47f433bce5873a2a3d4b9b6a7ba47bd7f7d2a7cec4bec5551f \ - --hash=sha256:c148e88436bd54dab16484108e882be3367f44952c649c9cd6b82a7370b650cb +parse-type==0.6.2 \ + --hash=sha256:06d39a8b70fde873eb2a131141a0e79bb34a432941fb3d66fad247abafc9766c \ + --hash=sha256:79b1f2497060d0928bc46016793f1fca1057c4aacdf15ef876aa48d75a73a355 # via behave pkgutil-resolve-name==1.3.10 \ --hash=sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174 \ --hash=sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e # via jsonschema -pluggy==1.4.0 \ - --hash=sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 \ - --hash=sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be +pluggy==1.5.0 \ + --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ + --hash=sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669 # via pytest -pyparsing==3.0.9 \ - --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ - --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc - # via packaging pytest==8.1.1 \ --hash=sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7 \ --hash=sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044 @@ -153,112 +150,116 @@ pytest-xdist==3.5.0 \ --hash=sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a \ --hash=sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24 # via -r requirements-dev.txt -referencing==0.34.0 \ - --hash=sha256:5773bd84ef41799a5a8ca72dc34590c041eb01bf9aa02632b4a973fb0181a844 \ - --hash=sha256:d53ae300ceddd3169f1ffa9caf2cb7b769e92657e4fafb23d34b93679116dfd4 +referencing==0.35.1 \ + --hash=sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c \ + --hash=sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de # via # jsonschema # jsonschema-specifications -rpds-py==0.18.0 \ - --hash=sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f \ - --hash=sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c \ - --hash=sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76 \ - --hash=sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e \ - --hash=sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157 \ - --hash=sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f \ - --hash=sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5 \ - --hash=sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05 \ - --hash=sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24 \ - --hash=sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1 \ - --hash=sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8 \ - --hash=sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b \ - --hash=sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb \ - --hash=sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07 \ - --hash=sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1 \ - --hash=sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6 \ - --hash=sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e \ - --hash=sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e \ - --hash=sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1 \ - --hash=sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab \ - --hash=sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4 \ - --hash=sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17 \ - --hash=sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594 \ - --hash=sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d \ - --hash=sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d \ - --hash=sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3 \ - --hash=sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c \ - --hash=sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66 \ - --hash=sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f \ - --hash=sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80 \ - --hash=sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33 \ - --hash=sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f \ - --hash=sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c \ - --hash=sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022 \ - --hash=sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e \ - --hash=sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f \ - --hash=sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da \ - --hash=sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1 \ - --hash=sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688 \ - --hash=sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795 \ - --hash=sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c \ - --hash=sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98 \ - --hash=sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1 \ - --hash=sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20 \ - --hash=sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307 \ - --hash=sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4 \ - --hash=sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18 \ - --hash=sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294 \ - --hash=sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66 \ - --hash=sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467 \ - --hash=sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948 \ - --hash=sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e \ - --hash=sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1 \ - --hash=sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0 \ - --hash=sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7 \ - --hash=sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd \ - --hash=sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641 \ - --hash=sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d \ - --hash=sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9 \ - --hash=sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1 \ - --hash=sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da \ - --hash=sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3 \ - --hash=sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa \ - --hash=sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7 \ - --hash=sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40 \ - --hash=sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496 \ - --hash=sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124 \ - --hash=sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836 \ - --hash=sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434 \ - --hash=sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984 \ - --hash=sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f \ - --hash=sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6 \ - --hash=sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e \ - --hash=sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461 \ - --hash=sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c \ - --hash=sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432 \ - --hash=sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73 \ - --hash=sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58 \ - --hash=sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88 \ - --hash=sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337 \ - --hash=sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7 \ - --hash=sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863 \ - --hash=sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475 \ - --hash=sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3 \ - --hash=sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51 \ - --hash=sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf \ - --hash=sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024 \ - --hash=sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40 \ - --hash=sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9 \ - --hash=sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec \ - --hash=sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb \ - --hash=sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7 \ - --hash=sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861 \ - --hash=sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880 \ - --hash=sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f \ - --hash=sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd \ - --hash=sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca \ - --hash=sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58 \ - --hash=sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e +rpds-py==0.19.1 \ + --hash=sha256:01227f8b3e6c8961490d869aa65c99653df80d2f0a7fde8c64ebddab2b9b02fd \ + --hash=sha256:08ce9c95a0b093b7aec75676b356a27879901488abc27e9d029273d280438505 \ + --hash=sha256:0b02dd77a2de6e49078c8937aadabe933ceac04b41c5dde5eca13a69f3cf144e \ + --hash=sha256:0d4b52811dcbc1aba08fd88d475f75b4f6db0984ba12275d9bed1a04b2cae9b5 \ + --hash=sha256:13e6d4840897d4e4e6b2aa1443e3a8eca92b0402182aafc5f4ca1f5e24f9270a \ + --hash=sha256:1a129c02b42d46758c87faeea21a9f574e1c858b9f358b6dd0bbd71d17713175 \ + --hash=sha256:1a8dfa125b60ec00c7c9baef945bb04abf8ac772d8ebefd79dae2a5f316d7850 \ + --hash=sha256:1c32e41de995f39b6b315d66c27dea3ef7f7c937c06caab4c6a79a5e09e2c415 \ + --hash=sha256:1d494887d40dc4dd0d5a71e9d07324e5c09c4383d93942d391727e7a40ff810b \ + --hash=sha256:1d4af2eb520d759f48f1073ad3caef997d1bfd910dc34e41261a595d3f038a94 \ + --hash=sha256:1fb93d3486f793d54a094e2bfd9cd97031f63fcb5bc18faeb3dd4b49a1c06523 \ + --hash=sha256:24f8ae92c7fae7c28d0fae9b52829235df83f34847aa8160a47eb229d9666c7b \ + --hash=sha256:24fc5a84777cb61692d17988989690d6f34f7f95968ac81398d67c0d0994a897 \ + --hash=sha256:26ab43b6d65d25b1a333c8d1b1c2f8399385ff683a35ab5e274ba7b8bb7dc61c \ + --hash=sha256:271accf41b02687cef26367c775ab220372ee0f4925591c6796e7c148c50cab5 \ + --hash=sha256:2ddd50f18ebc05ec29a0d9271e9dbe93997536da3546677f8ca00b76d477680c \ + --hash=sha256:31dd5794837f00b46f4096aa8ccaa5972f73a938982e32ed817bb520c465e520 \ + --hash=sha256:31e450840f2f27699d014cfc8865cc747184286b26d945bcea6042bb6aa4d26e \ + --hash=sha256:32e0db3d6e4f45601b58e4ac75c6f24afbf99818c647cc2066f3e4b192dabb1f \ + --hash=sha256:346557f5b1d8fd9966059b7a748fd79ac59f5752cd0e9498d6a40e3ac1c1875f \ + --hash=sha256:34bca66e2e3eabc8a19e9afe0d3e77789733c702c7c43cd008e953d5d1463fde \ + --hash=sha256:3511f6baf8438326e351097cecd137eb45c5f019944fe0fd0ae2fea2fd26be39 \ + --hash=sha256:35af5e4d5448fa179fd7fff0bba0fba51f876cd55212f96c8bbcecc5c684ae5c \ + --hash=sha256:3837c63dd6918a24de6c526277910e3766d8c2b1627c500b155f3eecad8fad65 \ + --hash=sha256:39d67896f7235b2c886fb1ee77b1491b77049dcef6fbf0f401e7b4cbed86bbd4 \ + --hash=sha256:3b823be829407393d84ee56dc849dbe3b31b6a326f388e171555b262e8456cc1 \ + --hash=sha256:3c73254c256081704dba0a333457e2fb815364018788f9b501efe7c5e0ada401 \ + --hash=sha256:3ddab996807c6b4227967fe1587febade4e48ac47bb0e2d3e7858bc621b1cace \ + --hash=sha256:3e1dc59a5e7bc7f44bd0c048681f5e05356e479c50be4f2c1a7089103f1621d5 \ + --hash=sha256:4383beb4a29935b8fa28aca8fa84c956bf545cb0c46307b091b8d312a9150e6a \ + --hash=sha256:4cc4bc73e53af8e7a42c8fd7923bbe35babacfa7394ae9240b3430b5dcf16b2a \ + --hash=sha256:4dd02e29c8cbed21a1875330b07246b71121a1c08e29f0ee3db5b4cfe16980c4 \ + --hash=sha256:4f580ae79d0b861dfd912494ab9d477bea535bfb4756a2269130b6607a21802e \ + --hash=sha256:53dbc35808c6faa2ce3e48571f8f74ef70802218554884787b86a30947842a14 \ + --hash=sha256:56313be667a837ff1ea3508cebb1ef6681d418fa2913a0635386cf29cff35165 \ + --hash=sha256:57863d16187995c10fe9cf911b897ed443ac68189179541734502353af33e693 \ + --hash=sha256:5953391af1405f968eb5701ebbb577ebc5ced8d0041406f9052638bafe52209d \ + --hash=sha256:5beffdbe766cfe4fb04f30644d822a1080b5359df7db3a63d30fa928375b2720 \ + --hash=sha256:5e360188b72f8080fefa3adfdcf3618604cc8173651c9754f189fece068d2a45 \ + --hash=sha256:5e58b61dcbb483a442c6239c3836696b79f2cd8e7eec11e12155d3f6f2d886d1 \ + --hash=sha256:69084fd29bfeff14816666c93a466e85414fe6b7d236cfc108a9c11afa6f7301 \ + --hash=sha256:6d1d7539043b2b31307f2c6c72957a97c839a88b2629a348ebabe5aa8b626d6b \ + --hash=sha256:6d8b735c4d162dc7d86a9cf3d717f14b6c73637a1f9cd57fe7e61002d9cb1972 \ + --hash=sha256:6ea961a674172ed2235d990d7edf85d15d8dfa23ab8575e48306371c070cda67 \ + --hash=sha256:71157f9db7f6bc6599a852852f3389343bea34315b4e6f109e5cbc97c1fb2963 \ + --hash=sha256:720f3108fb1bfa32e51db58b832898372eb5891e8472a8093008010911e324c5 \ + --hash=sha256:74129d5ffc4cde992d89d345f7f7d6758320e5d44a369d74d83493429dad2de5 \ + --hash=sha256:747251e428406b05fc86fee3904ee19550c4d2d19258cef274e2151f31ae9d38 \ + --hash=sha256:75130df05aae7a7ac171b3b5b24714cffeabd054ad2ebc18870b3aa4526eba23 \ + --hash=sha256:7b3661e6d4ba63a094138032c1356d557de5b3ea6fd3cca62a195f623e381c76 \ + --hash=sha256:7d5c7e32f3ee42f77d8ff1a10384b5cdcc2d37035e2e3320ded909aa192d32c3 \ + --hash=sha256:8124101e92c56827bebef084ff106e8ea11c743256149a95b9fd860d3a4f331f \ + --hash=sha256:81db2e7282cc0487f500d4db203edc57da81acde9e35f061d69ed983228ffe3b \ + --hash=sha256:840e18c38098221ea6201f091fc5d4de6128961d2930fbbc96806fb43f69aec1 \ + --hash=sha256:89cc8921a4a5028d6dd388c399fcd2eef232e7040345af3d5b16c04b91cf3c7e \ + --hash=sha256:8b32cd4ab6db50c875001ba4f5a6b30c0f42151aa1fbf9c2e7e3674893fb1dc4 \ + --hash=sha256:8df1c283e57c9cb4d271fdc1875f4a58a143a2d1698eb0d6b7c0d7d5f49c53a1 \ + --hash=sha256:902cf4739458852fe917104365ec0efbea7d29a15e4276c96a8d33e6ed8ec137 \ + --hash=sha256:97fbb77eaeb97591efdc654b8b5f3ccc066406ccfb3175b41382f221ecc216e8 \ + --hash=sha256:9c7042488165f7251dc7894cd533a875d2875af6d3b0e09eda9c4b334627ad1c \ + --hash=sha256:9e318e6786b1e750a62f90c6f7fa8b542102bdcf97c7c4de2a48b50b61bd36ec \ + --hash=sha256:a9421b23c85f361a133aa7c5e8ec757668f70343f4ed8fdb5a4a14abd5437244 \ + --hash=sha256:aaf71f95b21f9dc708123335df22e5a2fef6307e3e6f9ed773b2e0938cc4d491 \ + --hash=sha256:afedc35fe4b9e30ab240b208bb9dc8938cb4afe9187589e8d8d085e1aacb8309 \ + --hash=sha256:b5e28e56143750808c1c79c70a16519e9bc0a68b623197b96292b21b62d6055c \ + --hash=sha256:b82c9514c6d74b89a370c4060bdb80d2299bc6857e462e4a215b4ef7aa7b090e \ + --hash=sha256:b8f78398e67a7227aefa95f876481485403eb974b29e9dc38b307bb6eb2315ea \ + --hash=sha256:bbda75f245caecff8faa7e32ee94dfaa8312a3367397975527f29654cd17a6ed \ + --hash=sha256:bca34e913d27401bda2a6f390d0614049f5a95b3b11cd8eff80fe4ec340a1208 \ + --hash=sha256:bd04d8cab16cab5b0a9ffc7d10f0779cf1120ab16c3925404428f74a0a43205a \ + --hash=sha256:c149a652aeac4902ecff2dd93c3b2681c608bd5208c793c4a99404b3e1afc87c \ + --hash=sha256:c2087dbb76a87ec2c619253e021e4fb20d1a72580feeaa6892b0b3d955175a71 \ + --hash=sha256:c34f751bf67cab69638564eee34023909380ba3e0d8ee7f6fe473079bf93f09b \ + --hash=sha256:c6d20c8896c00775e6f62d8373aba32956aa0b850d02b5ec493f486c88e12859 \ + --hash=sha256:c7af6f7b80f687b33a4cdb0a785a5d4de1fb027a44c9a049d8eb67d5bfe8a687 \ + --hash=sha256:c7b07959866a6afb019abb9564d8a55046feb7a84506c74a6f197cbcdf8a208e \ + --hash=sha256:ca0dda0c5715efe2ab35bb83f813f681ebcd2840d8b1b92bfc6fe3ab382fae4a \ + --hash=sha256:cdb7eb3cf3deb3dd9e7b8749323b5d970052711f9e1e9f36364163627f96da58 \ + --hash=sha256:ce757c7c90d35719b38fa3d4ca55654a76a40716ee299b0865f2de21c146801c \ + --hash=sha256:d1fa67ef839bad3815124f5f57e48cd50ff392f4911a9f3cf449d66fa3df62a5 \ + --hash=sha256:d2dbd8f4990d4788cb122f63bf000357533f34860d269c1a8e90ae362090ff3a \ + --hash=sha256:d4ec0046facab83012d821b33cead742a35b54575c4edfb7ed7445f63441835f \ + --hash=sha256:dbceedcf4a9329cc665452db1aaf0845b85c666e4885b92ee0cddb1dbf7e052a \ + --hash=sha256:dc733d35f861f8d78abfaf54035461e10423422999b360966bf1c443cbc42705 \ + --hash=sha256:dd635c2c4043222d80d80ca1ac4530a633102a9f2ad12252183bcf338c1b9474 \ + --hash=sha256:de1f7cd5b6b351e1afd7568bdab94934d656abe273d66cda0ceea43bbc02a0c2 \ + --hash=sha256:df7c841813f6265e636fe548a49664c77af31ddfa0085515326342a751a6ba51 \ + --hash=sha256:e0f9d268b19e8f61bf42a1da48276bcd05f7ab5560311f541d22557f8227b866 \ + --hash=sha256:e2d66eb41ffca6cc3c91d8387509d27ba73ad28371ef90255c50cb51f8953301 \ + --hash=sha256:e429fc517a1c5e2a70d576077231538a98d59a45dfc552d1ac45a132844e6dfb \ + --hash=sha256:e4d2b88efe65544a7d5121b0c3b003ebba92bfede2ea3577ce548b69c5235185 \ + --hash=sha256:e76c902d229a3aa9d5ceb813e1cbcc69bf5bda44c80d574ff1ac1fa3136dea71 \ + --hash=sha256:ef07a0a1d254eeb16455d839cef6e8c2ed127f47f014bbda64a58b5482b6c836 \ + --hash=sha256:f09529d2332264a902688031a83c19de8fda5eb5881e44233286b9c9ec91856d \ + --hash=sha256:f0a6d4a93d2a05daec7cb885157c97bbb0be4da739d6f9dfb02e101eb40921cd \ + --hash=sha256:f0cf2a0dbb5987da4bd92a7ca727eadb225581dd9681365beba9accbe5308f7d \ + --hash=sha256:f2671cb47e50a97f419a02cd1e0c339b31de017b033186358db92f4d8e2e17d8 \ + --hash=sha256:f35b34a5184d5e0cc360b61664c1c06e866aab077b5a7c538a3e20c8fcdbf90b \ + --hash=sha256:f3d73022990ab0c8b172cce57c69fd9a89c24fd473a5e79cbce92df87e3d9c48 \ + --hash=sha256:f5b8353ea1a4d7dfb59a7f45c04df66ecfd363bb5b35f33b11ea579111d4655f \ + --hash=sha256:f809a17cc78bd331e137caa25262b507225854073fd319e987bd216bed911b7c \ + --hash=sha256:f9bc4161bd3b970cd6a6fcda70583ad4afd10f2750609fb1f3ca9505050d4ef3 \ + --hash=sha256:fdf4890cda3b59170009d012fca3294c00140e7f2abe1910e6a730809d0f3f9b # via # jsonschema # referencing @@ -278,7 +279,7 @@ wheel==0.43.0 \ --hash=sha256:465ef92c69fa5c5da2d1cf8ac40559a8c940886afcef87dcf14b9470862f1d85 \ --hash=sha256:55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81 # via -r requirements-dev.txt -zipp==3.18.1 \ - --hash=sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b \ - --hash=sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715 +zipp==3.19.2 \ + --hash=sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19 \ + --hash=sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c # via importlib-resources diff --git a/requirements-dev.txt b/requirements-dev.txt index 5003c19203..e0732858cf 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -2,7 +2,8 @@ wheel==0.43.0 behave==1.2.5 jsonschema==4.21.1 coverage==7.2.7 -setuptools==67.8.0;python_version>="3.12" +setuptools==71.1.0;python_version>="3.12" +packaging==24.1;python_version>="3.12" # Requirement for setuptools>=71 # Pytest specific deps pytest==8.1.1 diff --git a/requirements-docs-lock.txt b/requirements-docs-lock.txt new file mode 100644 index 0000000000..26ae627b4d --- /dev/null +++ b/requirements-docs-lock.txt @@ -0,0 +1,281 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile --allow-unsafe --generate-hashes --output-file=requirements-docs-lock.txt requirements-docs.txt +# +alabaster==0.7.13 \ + --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \ + --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2 + # via sphinx +babel==2.15.0 \ + --hash=sha256:08706bdad8d0a3413266ab61bd6c34d0c28d6e1e7badf40a2cebe67644e2e1fb \ + --hash=sha256:8daf0e265d05768bc6c7a314cf1321e9a123afc328cc635c18622a2f30a04413 + # via sphinx +beautifulsoup4==4.12.3 \ + --hash=sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051 \ + --hash=sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed + # via furo +certifi==2024.7.4 \ + --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ + --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 + # via requests +charset-normalizer==3.3.2 \ + --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ + --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ + --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ + --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ + --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ + --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ + --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ + --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ + --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ + --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ + --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ + --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ + --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ + --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ + --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ + --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ + --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ + --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ + --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ + --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ + --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ + --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ + --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ + --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ + --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ + --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ + --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ + --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ + --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ + --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ + --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ + --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ + --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ + --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ + --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ + --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ + --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ + --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ + --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ + --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ + --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ + --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ + --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ + --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ + --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ + --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ + --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ + --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ + --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ + --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ + --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ + --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ + --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ + --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ + --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ + --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ + --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ + --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ + --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ + --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ + --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ + --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ + --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ + --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ + --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ + --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ + --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ + --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ + --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ + --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ + --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ + --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ + --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ + --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ + --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ + --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ + --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ + --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ + --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ + --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ + --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ + --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ + --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ + --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ + --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ + --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ + --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ + --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ + --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ + --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 + # via requests +docutils==0.19 \ + --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ + --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc + # via sphinx +furo==2022.12.7 \ + --hash=sha256:7cb76c12a25ef65db85ab0743df907573d03027a33631f17d267e598ebb191f7 \ + --hash=sha256:d8008f8efbe7587a97ba533c8b2df1f9c21ee9b3e5cad0d27f61193d38b1a986 + # via -r requirements-docs.txt +idna==3.7 \ + --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ + --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 + # via requests +imagesize==1.4.1 \ + --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ + --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a + # via sphinx +importlib-metadata==8.0.0 \ + --hash=sha256:15584cf2b1bf449d98ff8a6ff1abef57bf20f3ac6454f431736cd3e660921b2f \ + --hash=sha256:188bd24e4c346d3f0a933f275c2fec67050326a856b9a359881d7c2a697e8812 + # via sphinx +jinja2==3.1.4 \ + --hash=sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369 \ + --hash=sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d + # via sphinx +markupsafe==2.1.5 \ + --hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \ + --hash=sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff \ + --hash=sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f \ + --hash=sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3 \ + --hash=sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532 \ + --hash=sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f \ + --hash=sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617 \ + --hash=sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df \ + --hash=sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4 \ + --hash=sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906 \ + --hash=sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f \ + --hash=sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4 \ + --hash=sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8 \ + --hash=sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371 \ + --hash=sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2 \ + --hash=sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465 \ + --hash=sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52 \ + --hash=sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6 \ + --hash=sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169 \ + --hash=sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad \ + --hash=sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2 \ + --hash=sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0 \ + --hash=sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029 \ + --hash=sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f \ + --hash=sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a \ + --hash=sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced \ + --hash=sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5 \ + --hash=sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c \ + --hash=sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf \ + --hash=sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9 \ + --hash=sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb \ + --hash=sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad \ + --hash=sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3 \ + --hash=sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1 \ + --hash=sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46 \ + --hash=sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc \ + --hash=sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a \ + --hash=sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee \ + --hash=sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900 \ + --hash=sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5 \ + --hash=sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea \ + --hash=sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f \ + --hash=sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5 \ + --hash=sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e \ + --hash=sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a \ + --hash=sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f \ + --hash=sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50 \ + --hash=sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a \ + --hash=sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b \ + --hash=sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4 \ + --hash=sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff \ + --hash=sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2 \ + --hash=sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46 \ + --hash=sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b \ + --hash=sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf \ + --hash=sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5 \ + --hash=sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5 \ + --hash=sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab \ + --hash=sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd \ + --hash=sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68 + # via jinja2 +packaging==24.1 \ + --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ + --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 + # via sphinx +pygments==2.18.0 \ + --hash=sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199 \ + --hash=sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a + # via + # furo + # sphinx +pytz==2024.1 \ + --hash=sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812 \ + --hash=sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319 + # via babel +requests==2.32.3 \ + --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ + --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 + # via sphinx +snowballstemmer==2.2.0 \ + --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \ + --hash=sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a + # via sphinx +soupsieve==2.5 \ + --hash=sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690 \ + --hash=sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7 + # via beautifulsoup4 +sphinx==5.3.0 \ + --hash=sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d \ + --hash=sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5 + # via + # -r requirements-docs.txt + # furo + # sphinx-basic-ng + # sphinx-copybutton + # sphinx-remove-toctrees +sphinx-basic-ng==1.0.0b2 \ + --hash=sha256:9ec55a47c90c8c002b5960c57492ec3021f5193cb26cebc2dc4ea226848651c9 \ + --hash=sha256:eb09aedbabfb650607e9b4b68c9d240b90b1e1be221d6ad71d61c52e29f7932b + # via furo +sphinx-copybutton==0.5.1 \ + --hash=sha256:0842851b5955087a7ec7fc870b622cb168618ad408dee42692e9a5c97d071da8 \ + --hash=sha256:366251e28a6f6041514bfb5439425210418d6c750e98d3a695b73e56866a677a + # via -r requirements-docs.txt +sphinx-remove-toctrees==0.0.3 \ + --hash=sha256:1077ebc00652f8a896ce27404d31cb5bdde9eeaefc80ada72d95a7a0a7b99a9d \ + --hash=sha256:e4792cc4e5d25ceb1a44dd1490c45d578e6b36f1b1e385ede659e4c324b98cba + # via -r requirements-docs.txt +sphinxcontrib-applehelp==1.0.4 \ + --hash=sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228 \ + --hash=sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e + # via sphinx +sphinxcontrib-devhelp==1.0.2 \ + --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \ + --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4 + # via sphinx +sphinxcontrib-htmlhelp==2.0.1 \ + --hash=sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff \ + --hash=sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903 + # via sphinx +sphinxcontrib-jsmath==1.0.1 \ + --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ + --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 + # via sphinx +sphinxcontrib-qthelp==1.0.3 \ + --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \ + --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6 + # via sphinx +sphinxcontrib-serializinghtml==1.1.5 \ + --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \ + --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952 + # via sphinx +urllib3==1.26.19 ; python_version < "3.10" \ + --hash=sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3 \ + --hash=sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429 + # via + # -r requirements-docs.txt + # requests +zipp==3.19.2 \ + --hash=sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19 \ + --hash=sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c + # via importlib-metadata diff --git a/requirements-docs.txt b/requirements-docs.txt index 29bf8a854a..de99cd6ebd 100644 --- a/requirements-docs.txt +++ b/requirements-docs.txt @@ -2,3 +2,5 @@ sphinx==5.3.0 furo==2022.12.7 sphinx_copybutton==0.5.1 sphinx-remove-toctrees==0.0.3 +# Avoid urllib3 2.x below Python 3.10 +urllib3<2.0 ; python_version < "3.10" diff --git a/scripts/ci/install b/scripts/ci/install index ed813995e5..0f29217840 100755 --- a/scripts/ci/install +++ b/scripts/ci/install @@ -43,4 +43,4 @@ if __name__ == "__main__": package = os.path.join('dist', wheel_dist) if args.extras: package = f"\"{package}[{args.extras}]\"" - run('pip install %s' % package) + run(f'pip install {package}') diff --git a/scripts/ci/install-dev-deps b/scripts/ci/install-dev-deps index 061a92645a..71e9badc05 100755 --- a/scripts/ci/install-dev-deps +++ b/scripts/ci/install-dev-deps @@ -27,6 +27,11 @@ def run(command): if __name__ == "__main__": with cd(REPO_ROOT): if sys.version_info[:2] >= (3, 12): - run("pip install setuptools") + # Python 3.12+ no longer includes setuptools by default. + + # Setuptools 71+ now prefers already installed versions + # of packaging _and_ broke the API for packaging<22.0. + # We'll pin to match what's in requirements-dev.txt. + run("pip install setuptools==71.1.0 packaging==24.1") run("pip install -r requirements-dev-lock.txt") diff --git a/scripts/get-model-filename b/scripts/get-model-filename index fa81a5315e..a490e366e8 100755 --- a/scripts/get-model-filename +++ b/scripts/get-model-filename @@ -21,6 +21,7 @@ will be created if it does not exist. Copied: /tmp/myfile.json -> /Users/foo/botocore/data/aws/cloudwatch/2010-08-01.normal.json """ + import json # Note we're using optparse for 2.6 compat. @@ -127,12 +128,14 @@ class TestDeterminePath(unittest.TestCase): ) # The special casing of elasticloadbalancing -> elb. - self.given_metadata( - { - 'apiVersion': '2015-01-01', - 'endpointPrefix': 'elasticloadbalancing', - } - ), + ( + self.given_metadata( + { + 'apiVersion': '2015-01-01', + 'endpointPrefix': 'elasticloadbalancing', + } + ), + ) self.assert_filename_is('botocore/data/aws/elb/2015-01-01.normal.json') diff --git a/scripts/new-change b/scripts/new-change index 716ad9c201..b902c63364 100755 --- a/scripts/new-change +++ b/scripts/new-change @@ -36,6 +36,7 @@ You can then use the ``scripts/gen-changelog`` to generate the CHANGELOG.rst file. """ + import argparse import json import os @@ -132,9 +133,7 @@ def replace_issue_references(parsed, repo_name): def linkify(match): number = match.group()[1:] - return '`{} `__'.format( - match.group(), repo_name, number - ) + return f'`{match.group()} `__' new_description = re.sub(r'#\d+', linkify, description) parsed['description'] = new_description diff --git a/setup.cfg b/setup.cfg index 7b12eb1356..8d5462dd9e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,7 +9,4 @@ requires_dist = urllib3>=1.25.4,!=2.2.0,<3; python_version>="3.10" [options.extras_require] -crt = awscrt==0.20.9 - -[flake8] -ignore = E203,E226,E501,E731,W503,W504 +crt = awscrt==0.21.2 diff --git a/setup.py b/setup.py index c6a9aea41c..b551cea1ce 100644 --- a/setup.py +++ b/setup.py @@ -33,7 +33,7 @@ def find_version(*file_paths): ] extras_require = { - 'crt': ['awscrt==0.20.9'], + 'crt': ['awscrt==0.21.2'], } setup( diff --git a/tests/__init__.py b/tests/__init__.py index 33307c0081..0a647aef94 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -121,9 +121,7 @@ def temporary_file(mode): """ temporary_directory = tempfile.mkdtemp() - basename = 'tmpfile-{}-{}'.format( - int(time.time()), random.randint(1, 1000) - ) + basename = f'tmpfile-{int(time.time())}-{random.randint(1, 1000)}' full_filename = os.path.join(temporary_directory, basename) open(full_filename, 'w').close() try: @@ -523,8 +521,9 @@ def wait(self, check, *args, **kwargs): raise ConsistencyWaiterException(fail_msg) def _fail_message(self, attempts, successes): - format_args = (attempts, successes) - return 'Failed after %s attempts, only had %s successes' % format_args + return ( + f'Failed after {attempts} attempts, only had {successes} successes' + ) class StubbedSession(botocore.session.Session): diff --git a/tests/acceptance/features/environment.py b/tests/acceptance/features/environment.py index 302e51abcb..140cbcb7c9 100644 --- a/tests/acceptance/features/environment.py +++ b/tests/acceptance/features/environment.py @@ -39,9 +39,7 @@ def before_feature(context, feature): service_name = tag break else: - raise RuntimeError( - "Unable to create a client for " "feature: %s" % feature - ) + raise RuntimeError(f"Unable to create a client for feature: {feature}") if service_name in SKIP_SERVICES: feature.mark_skipped() diff --git a/tests/acceptance/features/steps/base.py b/tests/acceptance/features/steps/base.py index 7ab58ae672..41d15099c2 100644 --- a/tests/acceptance/features/steps/base.py +++ b/tests/acceptance/features/steps/base.py @@ -84,7 +84,7 @@ def then_expected_type_is_list(context, expression): # the response is a dict to ensure it made it through # our response parser properly. if not isinstance(context.response, dict): - raise AssertionError("Response is not a dict: %s" % context.response) + raise AssertionError(f"Response is not a dict: {context.response}") @then('the response should contain a "{}"') @@ -93,13 +93,12 @@ def then_should_contain_key(context, key): # We really just care that the request succeeded for these # smoke tests. if not isinstance(context.response, dict): - raise AssertionError("Response is not a dict: %s" % context.response) + raise AssertionError(f"Response is not a dict: {context.response}") @then('I expect the response error to contain a message') def then_error_has_message(context): if 'Message' not in context.error_response.response['Error']: raise AssertionError( - "Message key missing from error response: %s" - % context.error_response.response + f"Message key missing from error response: {context.error_response.response}" ) diff --git a/tests/functional/docs/__init__.py b/tests/functional/docs/__init__.py index 87e0a76691..2fca0fd5c2 100644 --- a/tests/functional/docs/__init__.py +++ b/tests/functional/docs/__init__.py @@ -89,7 +89,7 @@ def get_method_document_block(self, operation_name, contents): def get_parameter_document_block(self, param_name, contents): contents = contents.decode('utf-8') - start_param_document = ' :type %s:' % param_name + start_param_document = f' :type {param_name}:' start_index = contents.find(start_param_document) self.assertNotEqual(start_index, -1, 'Param is not found in contents') contents = contents[start_index:] @@ -126,7 +126,7 @@ def assert_is_documented_as_autopopulated_param( # Ensure it is not in the example. self.assert_not_contains_line( - '%s=\'string\'' % param_name, method_contents + f'{param_name}=\'string\'', method_contents ) # Ensure it is in the params. diff --git a/tests/functional/docs/test_lex.py b/tests/functional/docs/test_lex.py index d7a792021b..4059461a1f 100644 --- a/tests/functional/docs/test_lex.py +++ b/tests/functional/docs/test_lex.py @@ -21,11 +21,11 @@ def test_jsonheader_docs(self): self.assert_contains_lines_in_order( [ '**Request Syntax**', - 'sessionAttributes=%s,' % self.TYPE_STRING, + f'sessionAttributes={self.TYPE_STRING},', ':type sessionAttributes: JSON serializable', '**Response Syntax**', - '\'slots\': %s,' % self.TYPE_STRING, - '\'sessionAttributes\': %s' % self.TYPE_STRING, + f'\'slots\': {self.TYPE_STRING},', + f'\'sessionAttributes\': {self.TYPE_STRING}', '**slots** (JSON serializable)', '**sessionAttributes** (JSON serializable)', ], diff --git a/tests/functional/docs/test_s3.py b/tests/functional/docs/test_s3.py index fc9f125b6f..513d0793c1 100644 --- a/tests/functional/docs/test_s3.py +++ b/tests/functional/docs/test_s3.py @@ -69,7 +69,7 @@ def test_copy_source_documented_as_union_type(self): "{'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'}" ) self.assert_contains_line( - "CopySource='string' or %s" % dict_form, content + f"CopySource='string' or {dict_form}", content ) def test_copy_source_param_docs_also_modified(self): diff --git a/tests/functional/docs/test_shared_example_config.py b/tests/functional/docs/test_shared_example_config.py index 3211e562e6..57d9d50bf4 100644 --- a/tests/functional/docs/test_shared_example_config.py +++ b/tests/functional/docs/test_shared_example_config.py @@ -53,9 +53,8 @@ def assert_valid_values(service_name, operation_model, example_config): if input_shape is None and example_input: raise AssertionError( - "Input found in example for %s from %s with id %s, but no input " - "shape is defined." - % (operation_model.name, service_name, example_id) + f"Input found in example for {operation_model.name} from {service_name} " + f"with id {example_id}, but no input shape is defined." ) example_output = example_config.get('output') @@ -63,9 +62,8 @@ def assert_valid_values(service_name, operation_model, example_config): if output_shape is None and example_output: raise AssertionError( - "Output found in example for %s from %s with id %s, but no output " - "shape is defined." - % (operation_model.name, service_name, example_id) + f"Output found in example for {operation_model.name} from {service_name} " + f"with id {example_id}, but no output shape is defined." ) try: @@ -80,9 +78,8 @@ def assert_valid_values(service_name, operation_model, example_config): ) except AssertionError as e: raise AssertionError( - "Invalid value in example for {} from {} with id {}: {}".format( - operation_model.name, service_name, example_id, e - ) + f"Invalid value in example for {operation_model.name} from " + f"{service_name} with id {example_id}: {e}" ) @@ -104,9 +101,7 @@ def _assert_valid_structure_values(shape, example_dict, path): if invalid_members: dotted_path = '.'.join(path) raise AssertionError( - "Invalid members found for {}: {}".format( - dotted_path, invalid_members - ) + f"Invalid members found for {dotted_path}: {invalid_members}" ) for member_name, example_value in example_dict.items(): @@ -133,9 +128,7 @@ def _assert_valid_timestamp(timestamp, path): except Exception as e: dotted_path = '.'.join(path) raise AssertionError( - 'Failed to parse timestamp {} for {}: {}'.format( - timestamp, dotted_path, e - ) + f'Failed to parse timestamp {timestamp} for {dotted_path}: {e}' ) @@ -144,7 +137,6 @@ def assert_operation_exists(service_model, operation_name): service_model.operation_model(operation_name) except OperationNotFoundError: raise AssertionError( - "Examples found in {} for operation {} that does not exist.".format( - service_model.service_name, operation_name - ) + f"Examples found in {service_model.service_name} for operation " + f"{operation_name} that does not exist." ) diff --git a/tests/functional/endpoint-rules/application-signals/endpoint-tests-1.json b/tests/functional/endpoint-rules/application-signals/endpoint-tests-1.json new file mode 100644 index 0000000000..5c4d5d9e24 --- /dev/null +++ b/tests/functional/endpoint-rules/application-signals/endpoint-tests-1.json @@ -0,0 +1,201 @@ +{ + "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/tests/functional/endpoint-rules/alexaforbusiness/endpoint-tests-1.json b/tests/functional/endpoint-rules/apptest/endpoint-tests-1.json similarity index 88% rename from tests/functional/endpoint-rules/alexaforbusiness/endpoint-tests-1.json rename to tests/functional/endpoint-rules/apptest/endpoint-tests-1.json index de7a21af1d..9781f0e5a2 100644 --- a/tests/functional/endpoint-rules/alexaforbusiness/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/apptest/endpoint-tests-1.json @@ -1,62 +1,62 @@ { "testCases": [ { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://a4b.us-east-1.amazonaws.com" + "url": "https://apptest-fips.us-east-1.api.aws" } }, "params": { "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://a4b-fips.us-east-1.api.aws" + "url": "https://apptest-fips.us-east-1.amazonaws.com" } }, "params": { "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://a4b-fips.us-east-1.amazonaws.com" + "url": "https://apptest.us-east-1.api.aws" } }, "params": { "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://a4b.us-east-1.api.aws" + "url": "https://apptest.us-east-1.amazonaws.com" } }, "params": { "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://a4b-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://apptest-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -69,7 +69,7 @@ "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://a4b-fips.cn-north-1.amazonaws.com.cn" + "url": "https://apptest-fips.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -82,7 +82,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://a4b.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://apptest.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -95,7 +95,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://a4b.cn-north-1.amazonaws.com.cn" + "url": "https://apptest.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -108,7 +108,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://a4b-fips.us-gov-east-1.api.aws" + "url": "https://apptest-fips.us-gov-east-1.api.aws" } }, "params": { @@ -121,7 +121,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://a4b-fips.us-gov-east-1.amazonaws.com" + "url": "https://apptest-fips.us-gov-east-1.amazonaws.com" } }, "params": { @@ -134,7 +134,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://a4b.us-gov-east-1.api.aws" + "url": "https://apptest.us-gov-east-1.api.aws" } }, "params": { @@ -147,7 +147,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://a4b.us-gov-east-1.amazonaws.com" + "url": "https://apptest.us-gov-east-1.amazonaws.com" } }, "params": { @@ -171,7 +171,7 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://a4b-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://apptest-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -195,7 +195,7 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://a4b.us-iso-east-1.c2s.ic.gov" + "url": "https://apptest.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -219,7 +219,7 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://a4b-fips.us-isob-east-1.sc2s.sgov.gov" + "url": "https://apptest-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { @@ -243,7 +243,7 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://a4b.us-isob-east-1.sc2s.sgov.gov" + "url": "https://apptest.us-isob-east-1.sc2s.sgov.gov" } }, "params": { diff --git a/tests/functional/endpoint-rules/elb/endpoint-tests-1.json b/tests/functional/endpoint-rules/elb/endpoint-tests-1.json index 40c0bd58b7..a5e023b96c 100644 --- a/tests/functional/endpoint-rules/elb/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/elb/endpoint-tests-1.json @@ -1,94 +1,68 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-south-2.api.aws" - } - }, - "params": { - "Region": "ap-south-2", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-2", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-south-2.api.aws" + "url": "https://elasticloadbalancing.af-south-1.amazonaws.com" } }, "params": { - "Region": "ap-south-2", - "UseDualStack": true, - "UseFIPS": false + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-south-2.amazonaws.com" + "url": "https://elasticloadbalancing.ap-east-1.amazonaws.com" } }, "params": { - "Region": "ap-south-2", - "UseDualStack": false, - "UseFIPS": false + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-south-1.api.aws" + "url": "https://elasticloadbalancing.ap-northeast-1.amazonaws.com" } }, "params": { - "Region": "ap-south-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-south-1.amazonaws.com" + "url": "https://elasticloadbalancing.ap-northeast-2.amazonaws.com" } }, "params": { - "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-south-1.api.aws" + "url": "https://elasticloadbalancing.ap-northeast-3.amazonaws.com" } }, "params": { - "Region": "ap-south-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,255 +74,47 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-south-1.api.aws" - } - }, - "params": { - "Region": "eu-south-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-south-1.api.aws" - } - }, - "params": { - "Region": "eu-south-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-south-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-south-2.api.aws" - } - }, - "params": { - "Region": "eu-south-2", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-2", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-south-2.api.aws" - } - }, - "params": { - "Region": "eu-south-2", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-south-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-2", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.me-central-1.api.aws" - } - }, - "params": { - "Region": "me-central-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "Region": "me-central-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.me-central-1.api.aws" - } - }, - "params": { - "Region": "me-central-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.me-central-1.amazonaws.com" - } - }, - "params": { - "Region": "me-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ca-central-1.api.aws" + "url": "https://elasticloadbalancing.ap-southeast-1.amazonaws.com" } }, "params": { - "Region": "ca-central-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ca-central-1.amazonaws.com" + "url": "https://elasticloadbalancing.ap-southeast-2.amazonaws.com" } }, "params": { - "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ca-central-1.api.aws" + "url": "https://elasticloadbalancing.ap-southeast-3.amazonaws.com" } }, "params": { - "Region": "ca-central-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -360,1352 +126,459 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-central-1.api.aws" - } - }, - "params": { - "Region": "eu-central-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-central-1.amazonaws.com" + "url": "https://elasticloadbalancing.eu-central-1.amazonaws.com" } }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.eu-central-1.api.aws" - } - }, - "params": { - "Region": "eu-central-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-central-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-west-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-west-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-iso-west-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-west-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-central-2.api.aws" - } - }, - "params": { - "Region": "eu-central-2", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-2", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-central-2.api.aws" - } - }, - "params": { - "Region": "eu-central-2", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-central-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-2", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.us-west-1.api.aws" - } - }, - "params": { - "Region": "us-west-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.us-west-1.api.aws" - } - }, - "params": { - "Region": "us-west-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.us-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.us-west-2.api.aws" - } - }, - "params": { - "Region": "us-west-2", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.us-west-2.api.aws" - } - }, - "params": { - "Region": "us-west-2", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.us-west-2.amazonaws.com" - } - }, - "params": { - "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.af-south-1.api.aws" - } - }, - "params": { - "Region": "af-south-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.af-south-1.api.aws" - } - }, - "params": { - "Region": "af-south-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.af-south-1.amazonaws.com" - } - }, - "params": { - "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-north-1.api.aws" - } - }, - "params": { - "Region": "eu-north-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-north-1.api.aws" - } - }, - "params": { - "Region": "eu-north-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-north-1.amazonaws.com" + "url": "https://elasticloadbalancing.eu-north-1.amazonaws.com" } }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-west-3.api.aws" - } - }, - "params": { - "Region": "eu-west-3", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-west-3.api.aws" - } - }, - "params": { - "Region": "eu-west-3", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-west-3.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-west-2.api.aws" - } - }, - "params": { - "Region": "eu-west-2", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-west-2.api.aws" - } - }, - "params": { - "Region": "eu-west-2", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-west-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-west-1.api.aws" - } - }, - "params": { - "Region": "eu-west-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-west-1.api.aws" - } - }, - "params": { - "Region": "eu-west-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.ap-northeast-3.api.aws" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.ap-northeast-2.api.aws" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.ap-northeast-1.api.aws" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.me-south-1.api.aws" - } - }, - "params": { - "Region": "me-south-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.me-south-1.api.aws" - } - }, - "params": { - "Region": "me-south-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.me-south-1.amazonaws.com" - } - }, - "params": { - "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.sa-east-1.api.aws" - } - }, - "params": { - "Region": "sa-east-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.sa-east-1.api.aws" - } - }, - "params": { - "Region": "sa-east-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.sa-east-1.amazonaws.com" - } - }, - "params": { - "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-east-1.api.aws" - } - }, - "params": { - "Region": "ap-east-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-east-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.ap-east-1.api.aws" - } - }, - "params": { - "Region": "ap-east-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.ap-east-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-east-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.us-gov-west-1.api.aws" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-gov-west-1.amazonaws.com" + "url": "https://elasticloadbalancing.eu-south-1.amazonaws.com" } }, "params": { - "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-southeast-1.api.aws" + "url": "https://elasticloadbalancing.eu-west-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-southeast-1.amazonaws.com" + "url": "https://elasticloadbalancing.eu-west-2.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-southeast-1.api.aws" + "url": "https://elasticloadbalancing.eu-west-3.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-southeast-1.amazonaws.com" + "url": "https://elasticloadbalancing.me-south-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-southeast-2.api.aws" + "url": "https://elasticloadbalancing.sa-east-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", - "UseDualStack": true, - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-southeast-2.amazonaws.com" + "url": "https://elasticloadbalancing.us-east-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-southeast-2.api.aws" + "url": "https://elasticloadbalancing-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-southeast-2.amazonaws.com" + "url": "https://elasticloadbalancing.us-east-2.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://elasticloadbalancing-fips.us-east-2.amazonaws.com" + } }, "params": { - "Region": "us-iso-east-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://elasticloadbalancing.us-west-1.amazonaws.com" } }, "params": { - "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://elasticloadbalancing-fips.us-west-1.amazonaws.com" + } }, "params": { - "Region": "us-iso-east-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-iso-east-1.c2s.ic.gov" + "url": "https://elasticloadbalancing.us-west-2.amazonaws.com" } }, "params": { - "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-southeast-3.api.aws" + "url": "https://elasticloadbalancing-fips.us-west-2.amazonaws.com" } }, "params": { - "Region": "ap-southeast-3", - "UseDualStack": true, - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-southeast-3.amazonaws.com" + "url": "https://elasticloadbalancing-fips.us-east-1.api.aws" } }, "params": { - "Region": "ap-southeast-3", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-southeast-3.api.aws" + "url": "https://elasticloadbalancing.us-east-1.api.aws" } }, "params": { - "Region": "ap-southeast-3", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-southeast-3.amazonaws.com" + "url": "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "ap-southeast-3", - "UseDualStack": false, - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-southeast-4.api.aws" + "url": "https://elasticloadbalancing.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "ap-southeast-4", - "UseDualStack": true, - "UseFIPS": true + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.ap-southeast-4.amazonaws.com" + "url": "https://elasticloadbalancing-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "ap-southeast-4", - "UseDualStack": false, - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-southeast-4.api.aws" + "url": "https://elasticloadbalancing-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "ap-southeast-4", - "UseDualStack": true, - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-southeast-4.amazonaws.com" + "url": "https://elasticloadbalancing.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "ap-southeast-4", - "UseDualStack": false, - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.us-east-1.api.aws" + "url": "https://elasticloadbalancing.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.us-east-1.amazonaws.com" + "url": "https://elasticloadbalancing.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-east-1.api.aws" + "url": "https://elasticloadbalancing.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-east-1.amazonaws.com" + "url": "https://elasticloadbalancing.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.us-east-2.api.aws" + "url": "https://elasticloadbalancing-fips.us-gov-east-1.api.aws" } }, "params": { - "Region": "us-east-2", - "UseDualStack": true, - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.us-east-2.amazonaws.com" + "url": "https://elasticloadbalancing.us-gov-east-1.api.aws" } }, "params": { - "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-east-2.api.aws" + "url": "https://elasticloadbalancing.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-2", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-east-2.amazonaws.com" + "url": "https://elasticloadbalancing.us-iso-west-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-iso-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "cn-northwest-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://elasticloadbalancing-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "cn-northwest-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticloadbalancing.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "cn-northwest-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.cn-northwest-1.amazonaws.com.cn" + "url": "https://elasticloadbalancing.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "cn-northwest-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -1715,8 +588,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -1728,8 +601,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -1739,34 +612,34 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-isob-east-1.sc2s.sgov.gov" + "url": "https://example.com" } }, "params": { - "Region": "us-isob-east-1", + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1777,8 +650,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1789,10 +662,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/tests/functional/endpoint-rules/launch-wizard/endpoint-tests-1.json b/tests/functional/endpoint-rules/launch-wizard/endpoint-tests-1.json index 850ed0a8fd..33502c9dc8 100644 --- a/tests/functional/endpoint-rules/launch-wizard/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/launch-wizard/endpoint-tests-1.json @@ -308,17 +308,6 @@ "expect": { "error": "Invalid Configuration: Missing Region" } - }, - { - "documentation": "Partition doesn't support DualStack", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } } ], "version": "1.0" diff --git a/tests/functional/endpoint-rules/mailmanager/endpoint-tests-1.json b/tests/functional/endpoint-rules/mailmanager/endpoint-tests-1.json new file mode 100644 index 0000000000..10683c5d59 --- /dev/null +++ b/tests/functional/endpoint-rules/mailmanager/endpoint-tests-1.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/tests/functional/endpoint-rules/backupstorage/endpoint-tests-1.json b/tests/functional/endpoint-rules/pca-connector-scep/endpoint-tests-1.json similarity index 88% rename from tests/functional/endpoint-rules/backupstorage/endpoint-tests-1.json rename to tests/functional/endpoint-rules/pca-connector-scep/endpoint-tests-1.json index ce01d1ddf5..27f0552329 100644 --- a/tests/functional/endpoint-rules/backupstorage/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/pca-connector-scep/endpoint-tests-1.json @@ -4,7 +4,7 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://backupstorage-fips.us-east-1.api.aws" + "url": "https://pca-connector-scep-fips.us-east-1.api.aws" } }, "params": { @@ -17,7 +17,7 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://backupstorage-fips.us-east-1.amazonaws.com" + "url": "https://pca-connector-scep-fips.us-east-1.amazonaws.com" } }, "params": { @@ -30,7 +30,7 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://backupstorage.us-east-1.api.aws" + "url": "https://pca-connector-scep.us-east-1.api.aws" } }, "params": { @@ -43,7 +43,7 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://backupstorage.us-east-1.amazonaws.com" + "url": "https://pca-connector-scep.us-east-1.amazonaws.com" } }, "params": { @@ -56,7 +56,7 @@ "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://backupstorage-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://pca-connector-scep-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -69,7 +69,7 @@ "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://backupstorage-fips.cn-north-1.amazonaws.com.cn" + "url": "https://pca-connector-scep-fips.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -82,7 +82,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://backupstorage.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://pca-connector-scep.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -95,7 +95,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://backupstorage.cn-north-1.amazonaws.com.cn" + "url": "https://pca-connector-scep.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -108,7 +108,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://backupstorage-fips.us-gov-east-1.api.aws" + "url": "https://pca-connector-scep-fips.us-gov-east-1.api.aws" } }, "params": { @@ -121,7 +121,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://backupstorage-fips.us-gov-east-1.amazonaws.com" + "url": "https://pca-connector-scep-fips.us-gov-east-1.amazonaws.com" } }, "params": { @@ -134,7 +134,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://backupstorage.us-gov-east-1.api.aws" + "url": "https://pca-connector-scep.us-gov-east-1.api.aws" } }, "params": { @@ -147,7 +147,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://backupstorage.us-gov-east-1.amazonaws.com" + "url": "https://pca-connector-scep.us-gov-east-1.amazonaws.com" } }, "params": { @@ -171,7 +171,7 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://backupstorage-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://pca-connector-scep-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -195,7 +195,7 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://backupstorage.us-iso-east-1.c2s.ic.gov" + "url": "https://pca-connector-scep.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -219,7 +219,7 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://backupstorage-fips.us-isob-east-1.sc2s.sgov.gov" + "url": "https://pca-connector-scep-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { @@ -243,7 +243,7 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://backupstorage.us-isob-east-1.sc2s.sgov.gov" + "url": "https://pca-connector-scep.us-isob-east-1.sc2s.sgov.gov" } }, "params": { diff --git a/tests/functional/endpoint-rules/mobile/endpoint-tests-1.json b/tests/functional/endpoint-rules/qapps/endpoint-tests-1.json similarity index 88% rename from tests/functional/endpoint-rules/mobile/endpoint-tests-1.json rename to tests/functional/endpoint-rules/qapps/endpoint-tests-1.json index 8008ab4d0f..271533dc77 100644 --- a/tests/functional/endpoint-rules/mobile/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/qapps/endpoint-tests-1.json @@ -4,7 +4,7 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mobile-fips.us-east-1.api.aws" + "url": "https://data.qapps-fips.us-east-1.api.aws" } }, "params": { @@ -17,7 +17,7 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mobile-fips.us-east-1.amazonaws.com" + "url": "https://data.qapps-fips.us-east-1.amazonaws.com" } }, "params": { @@ -30,7 +30,7 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mobile.us-east-1.api.aws" + "url": "https://data.qapps.us-east-1.api.aws" } }, "params": { @@ -43,7 +43,7 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mobile.us-east-1.amazonaws.com" + "url": "https://data.qapps.us-east-1.amazonaws.com" } }, "params": { @@ -56,7 +56,7 @@ "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mobile-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://data.qapps-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -69,7 +69,7 @@ "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mobile-fips.cn-north-1.amazonaws.com.cn" + "url": "https://data.qapps-fips.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -82,7 +82,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mobile.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://data.qapps.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -95,7 +95,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mobile.cn-north-1.amazonaws.com.cn" + "url": "https://data.qapps.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -108,7 +108,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mobile-fips.us-gov-east-1.api.aws" + "url": "https://data.qapps-fips.us-gov-east-1.api.aws" } }, "params": { @@ -121,7 +121,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mobile-fips.us-gov-east-1.amazonaws.com" + "url": "https://data.qapps-fips.us-gov-east-1.amazonaws.com" } }, "params": { @@ -134,7 +134,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mobile.us-gov-east-1.api.aws" + "url": "https://data.qapps.us-gov-east-1.api.aws" } }, "params": { @@ -147,7 +147,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mobile.us-gov-east-1.amazonaws.com" + "url": "https://data.qapps.us-gov-east-1.amazonaws.com" } }, "params": { @@ -171,7 +171,7 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mobile-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://data.qapps-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -195,7 +195,7 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mobile.us-iso-east-1.c2s.ic.gov" + "url": "https://data.qapps.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -219,7 +219,7 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mobile-fips.us-isob-east-1.sc2s.sgov.gov" + "url": "https://data.qapps-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { @@ -243,7 +243,7 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mobile.us-isob-east-1.sc2s.sgov.gov" + "url": "https://data.qapps.us-isob-east-1.sc2s.sgov.gov" } }, "params": { diff --git a/tests/functional/endpoint-rules/s3/endpoint-tests-1.json b/tests/functional/endpoint-rules/s3/endpoint-tests-1.json index da5ccda22e..b6575d8e02 100644 --- a/tests/functional/endpoint-rules/s3/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/s3/endpoint-tests-1.json @@ -1793,6 +1793,33 @@ "Key": "key" } }, + { + "documentation": "virtual addressing, aws-global region with Copy Source, and Key uses the global endpoint. Copy Source and Key parameters should not be used in endpoint evaluation.", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://bucket-name.s3.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket-name", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false, + "CopySource": "/copy/source", + "Key": "key" + } + }, { "documentation": "virtual addressing, aws-global region with fips uses the regional fips endpoint", "expect": { diff --git a/tests/functional/endpoint-rules/honeycode/endpoint-tests-1.json b/tests/functional/endpoint-rules/ssm-quicksetup/endpoint-tests-1.json similarity index 86% rename from tests/functional/endpoint-rules/honeycode/endpoint-tests-1.json rename to tests/functional/endpoint-rules/ssm-quicksetup/endpoint-tests-1.json index bfec834a71..7397c01138 100644 --- a/tests/functional/endpoint-rules/honeycode/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/ssm-quicksetup/endpoint-tests-1.json @@ -1,23 +1,10 @@ { "testCases": [ - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode.us-west-2.amazonaws.com" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": false - } - }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://honeycode-fips.us-east-1.api.aws" + "url": "https://ssm-quicksetup-fips.us-east-1.api.aws" } }, "params": { @@ -30,7 +17,7 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://honeycode-fips.us-east-1.amazonaws.com" + "url": "https://ssm-quicksetup-fips.us-east-1.amazonaws.com" } }, "params": { @@ -43,7 +30,7 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://honeycode.us-east-1.api.aws" + "url": "https://ssm-quicksetup.us-east-1.api.aws" } }, "params": { @@ -56,7 +43,7 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://honeycode.us-east-1.amazonaws.com" + "url": "https://ssm-quicksetup.us-east-1.amazonaws.com" } }, "params": { @@ -69,7 +56,7 @@ "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://honeycode-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://ssm-quicksetup-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -82,7 +69,7 @@ "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://honeycode-fips.cn-north-1.amazonaws.com.cn" + "url": "https://ssm-quicksetup-fips.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -95,7 +82,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://honeycode.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://ssm-quicksetup.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -108,7 +95,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://honeycode.cn-north-1.amazonaws.com.cn" + "url": "https://ssm-quicksetup.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -121,7 +108,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://honeycode-fips.us-gov-east-1.api.aws" + "url": "https://ssm-quicksetup-fips.us-gov-east-1.api.aws" } }, "params": { @@ -134,7 +121,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://honeycode-fips.us-gov-east-1.amazonaws.com" + "url": "https://ssm-quicksetup-fips.us-gov-east-1.amazonaws.com" } }, "params": { @@ -147,7 +134,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://honeycode.us-gov-east-1.api.aws" + "url": "https://ssm-quicksetup.us-gov-east-1.api.aws" } }, "params": { @@ -160,7 +147,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://honeycode.us-gov-east-1.amazonaws.com" + "url": "https://ssm-quicksetup.us-gov-east-1.amazonaws.com" } }, "params": { @@ -184,7 +171,7 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://honeycode-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://ssm-quicksetup-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -208,7 +195,7 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://honeycode.us-iso-east-1.c2s.ic.gov" + "url": "https://ssm-quicksetup.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -232,7 +219,7 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://honeycode-fips.us-isob-east-1.sc2s.sgov.gov" + "url": "https://ssm-quicksetup-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { @@ -256,7 +243,7 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://honeycode.us-isob-east-1.sc2s.sgov.gov" + "url": "https://ssm-quicksetup.us-isob-east-1.sc2s.sgov.gov" } }, "params": { diff --git a/tests/functional/endpoint-rules/taxsettings/endpoint-tests-1.json b/tests/functional/endpoint-rules/taxsettings/endpoint-tests-1.json new file mode 100644 index 0000000000..f3593c11da --- /dev/null +++ b/tests/functional/endpoint-rules/taxsettings/endpoint-tests-1.json @@ -0,0 +1,552 @@ +{ + "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://tax-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://tax-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://tax.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://tax.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://tax-fips.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://tax-fips.cn-northwest-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://tax.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://tax.cn-northwest-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://tax-fips.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://tax-fips.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://tax.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://tax.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://tax-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://tax.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://tax-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://tax.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://tax-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://tax.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://tax-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://tax.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/tests/functional/models/endpoints.json b/tests/functional/models/endpoints.json index 4b9b94a411..681dedc857 100644 --- a/tests/functional/models/endpoints.json +++ b/tests/functional/models/endpoints.json @@ -57,11 +57,6 @@ } }, "services" : { - "a4b" : { - "endpoints" : { - "us-east-1" : { } - } - }, "acm" : { "endpoints" : { "ap-northeast-1" : { }, diff --git a/tests/functional/test_alias.py b/tests/functional/test_alias.py index 750cb933e9..0812c85b0e 100644 --- a/tests/functional/test_alias.py +++ b/tests/functional/test_alias.py @@ -82,6 +82,8 @@ def _can_use_parameter_in_client_call(session, case, use_alias=True): getattr(client, operation)(**params) except ParamValidationError as e: raise AssertionError( - 'Expecting %s to be valid parameter for %s.%s but received ' - '%s.' % (case['new_name'], case['service'], case['operation'], e) + 'Expecting {} to be valid parameter for {}.{} but received ' + '{}.'.format( + case['new_name'], case['service'], case['operation'], e + ) ) diff --git a/tests/functional/test_auth_config.py b/tests/functional/test_auth_config.py new file mode 100644 index 0000000000..7fe096d338 --- /dev/null +++ b/tests/functional/test_auth_config.py @@ -0,0 +1,77 @@ +# Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import pytest + +from botocore.session import get_session + +# In the future, a service may have a list of credentials requirements where one +# signature may fail and others may succeed. e.g. a service may want to use bearer +# auth but fall back to sigv4 if a token isn't available. There's currently no way to do +# this in botocore, so this test ensures we handle this gracefully when the need arises. + + +# The dictionary's value here needs to be hashable to be added to the set below; any +# new auth types with multiple requirements should be added in a comma-separated list +AUTH_TYPE_REQUIREMENTS = { + 'aws.auth#sigv4': 'credentials', + 'aws.auth#sigv4a': 'credentials', + 'smithy.api#httpBearerAuth': 'bearer_token', + 'smithy.api#noAuth': 'none', +} + + +def _all_test_cases(): + session = get_session() + loader = session.get_component('data_loader') + + services = loader.list_available_services('service-2') + auth_services = [] + auth_operations = [] + + for service in services: + service_model = session.get_service_model(service) + auth_config = service_model.metadata.get('auth', {}) + if auth_config: + auth_services.append([service, auth_config]) + for operation in service_model.operation_names: + operation_model = service_model.operation_model(operation) + if operation_model.auth: + auth_operations.append([service, operation_model]) + return auth_services, auth_operations + + +AUTH_SERVICES, AUTH_OPERATIONS = _all_test_cases() + + +@pytest.mark.validates_models +@pytest.mark.parametrize("auth_service, auth_config", AUTH_SERVICES) +def test_all_requirements_match_for_service(auth_service, auth_config): + # Validates that all service-level signature types have the same requirements + message = f'Found mixed signer requirements for service: {auth_service}' + assert_all_requirements_match(auth_config, message) + + +@pytest.mark.validates_models +@pytest.mark.parametrize("auth_service, operation_model", AUTH_OPERATIONS) +def test_all_requirements_match_for_operation(auth_service, operation_model): + # Validates that all operation-level signature types have the same requirements + message = f'Found mixed signer requirements for operation: {auth_service}.{operation_model.name}' + auth_config = operation_model.auth + assert_all_requirements_match(auth_config, message) + + +def assert_all_requirements_match(auth_config, message): + auth_requirements = set( + AUTH_TYPE_REQUIREMENTS[auth_type] for auth_type in auth_config + ) + assert len(auth_requirements) == 1 diff --git a/tests/functional/test_credentials.py b/tests/functional/test_credentials.py index ddd5f2beea..a6ba1f8b4b 100644 --- a/tests/functional/test_credentials.py +++ b/tests/functional/test_credentials.py @@ -127,9 +127,8 @@ def _run_in_thread(collected): max_calls_allowed = math.ceil((end - start) / 2.0) + 1 self.assertTrue( creds.refresh_counter <= max_calls_allowed, - "Too many cred refreshes, max: %s, actual: %s, " - "time_delta: %.4f" - % (max_calls_allowed, creds.refresh_counter, (end - start)), + f"Too many cred refreshes, max: {max_calls_allowed}, actual: " + f"{creds.refresh_counter}, time_delta: {end - start:.4f}", ) def test_no_race_for_immediate_advisory_expiration(self): @@ -206,9 +205,9 @@ def create_assume_role_response(self, credentials, expiration=None): def create_random_credentials(self): return Credentials( - 'fake-%s' % random_chars(15), - 'fake-%s' % random_chars(35), - 'fake-%s' % random_chars(45), + f'fake-{random_chars(15)}', + f'fake-{random_chars(35)}', + f'fake-{random_chars(45)}', ) def assert_creds_equal(self, c1, c2): @@ -241,9 +240,7 @@ def setUp(self): credential_process = os.path.join( current_dir, 'utils', 'credentialprocess.py' ) - self.credential_process = '{} {}'.format( - sys.executable, credential_process - ) + self.credential_process = f'{sys.executable} {credential_process}' def mock_provider(self, provider_cls): mock_instance = mock.Mock(spec=provider_cls) @@ -487,7 +484,7 @@ def test_process_source_profile(self): 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n' '[profile B]\n' - 'credential_process = %s\n' % self.credential_process + f'credential_process = {self.credential_process}\n' ) self.write_config(config) @@ -520,7 +517,7 @@ def test_web_identity_source_profile(self): 'source_profile = B\n' '[profile B]\n' 'role_arn = arn:aws:iam::123456789:role/RoleB\n' - 'web_identity_token_file = %s\n' % token_path + f'web_identity_token_file = {token_path}\n' ) self.write_config(config) @@ -561,7 +558,7 @@ def test_web_identity_source_profile_ignores_env_vars(self): 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'source_profile = B\n' '[profile B]\n' - 'web_identity_token_file = %s\n' % token_path + f'web_identity_token_file = {token_path}\n' ) self.write_config(config) @@ -803,8 +800,8 @@ def test_assume_role(self): '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'role_session_name = sname\n' - 'web_identity_token_file = %s\n' - ) % self.token_file + f'web_identity_token_file = {self.token_file}\n' + ) self.write_config(config) expected_params = { 'RoleArn': 'arn:aws:iam::123456789:role/RoleA', @@ -832,8 +829,8 @@ def test_assume_role_env_vars_do_not_take_precedence(self): '[profile A]\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' 'role_session_name = aname\n' - 'web_identity_token_file = %s\n' - ) % self.token_file + f'web_identity_token_file = {self.token_file}\n' + ) self.write_config(config) different_token = os.path.join(self.tempdir, str(uuid.uuid4())) @@ -856,9 +853,7 @@ def setUp(self): credential_process = os.path.join( current_dir, 'utils', 'credentialprocess.py' ) - self.credential_process = '{} {}'.format( - sys.executable, credential_process - ) + self.credential_process = f'{sys.executable} {credential_process}' self.environ = os.environ.copy() self.environ_patch = mock.patch('os.environ', self.environ) self.environ_patch.start() @@ -921,8 +916,8 @@ def add_assume_role_with_web_identity_http_response(self, stubber): def _get_assume_role_body(self, method_name): expiration = self.some_future_time() body = ( - '<{method_name}Response>' - ' <{method_name}Result>' + f'<{method_name}Response>' + f' <{method_name}Result>' ' ' ' arn:aws:sts::0123456:user' ' AKID:mysession-1567020004' @@ -931,11 +926,11 @@ def _get_assume_role_body(self, method_name): ' AccessKey' ' SecretKey' ' SessionToken' - ' {expiration}' + f' {expiration}' ' ' - ' ' - '' - ).format(method_name=method_name, expiration=expiration) + f' ' + f'' + ) return body.encode('utf-8') def make_stubbed_client_call_to_region(self, session, stubber, region): @@ -976,11 +971,11 @@ def test_assume_role_web_identity_uses_same_region_as_client(self): '[profile A]\n' 'sts_regional_endpoints = regional\n' 'role_arn = arn:aws:iam::123456789:role/RoleA\n' - 'web_identity_token_file = %s\n' + f'web_identity_token_file = {token_file}\n' 'source_profile = B\n\n' '[profile B]\n' 'aws_access_key_id = abc123\n' - 'aws_secret_access_key = def456\n' % token_file + 'aws_secret_access_key = def456\n' ) self.write_config(config) # Make an arbitrary client and API call as we are really only diff --git a/tests/functional/test_discovery.py b/tests/functional/test_discovery.py index c6c8b0383d..8842d5b086 100644 --- a/tests/functional/test_discovery.py +++ b/tests/functional/test_discovery.py @@ -57,7 +57,7 @@ def add_describe_endpoints_response(self, stubber, discovered_endpoint): def set_endpoint_discovery_config_file(self, fileobj, config_val): fileobj.write( - '[default]\n' 'endpoint_discovery_enabled=%s\n' % config_val + '[default]\n' f'endpoint_discovery_enabled={config_val}\n' ) fileobj.flush() self.environ['AWS_CONFIG_FILE'] = fileobj.name diff --git a/tests/functional/test_event_alias.py b/tests/functional/test_event_alias.py index 9d0ec1730a..a9839ad414 100644 --- a/tests/functional/test_event_alias.py +++ b/tests/functional/test_event_alias.py @@ -8,10 +8,6 @@ SERVICES = { "acm": {"endpoint_prefix": "acm", "service_id": "acm"}, "acm-pca": {"endpoint_prefix": "acm-pca", "service_id": "acm-pca"}, - "alexaforbusiness": { - "endpoint_prefix": "a4b", - "service_id": "alexa-for-business", - }, "apigateway": { "endpoint_prefix": "apigateway", "service_id": "api-gateway", @@ -252,7 +248,6 @@ "service_id": "marketplace-metering", }, "mgh": {"endpoint_prefix": "mgh", "service_id": "migration-hub"}, - "mobile": {"endpoint_prefix": "mobile", "service_id": "mobile"}, "mq": {"endpoint_prefix": "mq", "service_id": "mq"}, "mturk": {"endpoint_prefix": "mturk-requester", "service_id": "mturk"}, "neptune": {"service_id": "neptune"}, @@ -386,7 +381,7 @@ def _hook(**kwargs): hook_calls.append(kwargs['event_name']) session = _get_session() - session.register('creating-client-class.%s' % event_part, _hook) + session.register(f'creating-client-class.{event_part}', _hook) session.create_client(client_name) assert len(hook_calls) == 1 diff --git a/tests/functional/test_h2_required.py b/tests/functional/test_h2_required.py index 563fc81e8f..8754cf2744 100644 --- a/tests/functional/test_h2_required.py +++ b/tests/functional/test_h2_required.py @@ -50,18 +50,18 @@ def _all_test_cases(): H2_SERVICES, H2_OPERATIONS = _all_test_cases() +@pytest.mark.validates_models @pytest.mark.parametrize("h2_service", H2_SERVICES) def test_all_uses_of_h2_are_known(h2_service): # Validates that a service that requires HTTP 2 for all operations is known - message = 'Found unknown HTTP 2 service: %s' % h2_service + message = f'Found unknown HTTP 2 service: {h2_service}' assert _KNOWN_SERVICES.get(h2_service) is _H2_REQUIRED, message +@pytest.mark.validates_models @pytest.mark.parametrize("h2_service, operation", H2_OPERATIONS) def test_all_h2_operations_are_known(h2_service, operation): # Validates that an operation that requires HTTP 2 is known known_operations = _KNOWN_SERVICES.get(h2_service, []) - message = 'Found unknown HTTP 2 operation: {}.{}'.format( - h2_service, operation - ) + message = f'Found unknown HTTP 2 operation: {h2_service}.{operation}' assert operation in known_operations, message diff --git a/tests/functional/test_paginator_config.py b/tests/functional/test_paginator_config.py index 418a029da4..1d2155b370 100644 --- a/tests/functional/test_paginator_config.py +++ b/tests/functional/test_paginator_config.py @@ -32,11 +32,6 @@ # to reference all the extra output keys. Nothing should ever be added to this # list, it represents all the current released paginators that fail this test. KNOWN_EXTRA_OUTPUT_KEYS = [ - 'alexaforbusiness.SearchUsers.TotalCount', - 'alexaforbusiness.SearchProfiles.TotalCount', - 'alexaforbusiness.SearchSkillGroups.TotalCount', - 'alexaforbusiness.SearchDevices.TotalCount', - 'alexaforbusiness.SearchRooms.TotalCount', 'apigateway.GetApiKeys.warnings', 'apigateway.GetUsage.usagePlanId', 'apigateway.GetUsage.startDate', @@ -167,8 +162,7 @@ def _validate_known_pagination_keys(page_config): for key in page_config: if key not in KNOWN_PAGE_KEYS: raise AssertionError( - "Unknown key '%s' in pagination config: %s" - % (key, page_config) + f"Unknown key '{key}' in pagination config: {page_config}" ) @@ -176,7 +170,7 @@ def _valiate_result_key_exists(page_config): if 'result_key' not in page_config: raise AssertionError( "Required key 'result_key' is missing " - "from pagination config: %s" % page_config + f"from pagination config: {page_config}" ) @@ -184,7 +178,7 @@ def _validate_referenced_operation_exists(operation_name, service_model): if operation_name not in service_model.operation_names: raise AssertionError( "Pagination config refers to operation that " - "does not exist: %s" % operation_name + f"does not exist: {operation_name}" ) @@ -194,7 +188,7 @@ def _validate_operation_has_output(operation_name, service_model): if output is None or not output.members: raise AssertionError( "Pagination config refers to operation " - "that does not have any output: %s" % operation_name + f"that does not have any output: {operation_name}" ) @@ -208,17 +202,16 @@ def _validate_input_keys_match(operation_name, page_config, service_model): for token in input_tokens: if token not in valid_input_names: raise AssertionError( - "input_token '%s' refers to a non existent " - "input member for operation: %s" % (token, operation_name) + f"input_token '{token}' refers to a non existent " + f"input member for operation: {operation_name}" ) if 'limit_key' in page_config: limit_key = page_config['limit_key'] if limit_key not in valid_input_names: raise AssertionError( - "limit_key '%s' refers to a non existent " - "input member for operation: %s, valid keys: " - "%s" - % ( + "limit_key '{}' refers to a non existent " + "input member for operation: {}, valid keys: " + "{}".format( limit_key, operation_name, ', '.join(list(valid_input_names)), @@ -242,24 +235,21 @@ def _validate_output_keys_match(operation_name, page_config, service_model): else: if output_key not in output_members: raise AssertionError( - "Pagination key '%s' refers to an output " - "member that does not exist: %s" % (key_name, output_key) + f"Pagination key '{key_name}' refers to an output " + f"member that does not exist: {output_key}" ) output_members.remove(output_key) for member in list(output_members): - key = "{}.{}.{}".format( - service_model.service_name, operation_name, member - ) + key = f"{service_model.service_name}.{operation_name}.{member}" if key in KNOWN_EXTRA_OUTPUT_KEYS: output_members.remove(member) if output_members: raise AssertionError( "There are member names in the output shape of " - "%s that are not accounted for in the pagination " - "config for service %s: %s" - % ( + "{} that are not accounted for in the pagination " + "config for service {}: {}".format( operation_name, service_model.service_name, ', '.join(output_members), @@ -279,7 +269,7 @@ def _validate_jmespath_compiles(expression): except JMESPathError as e: raise AssertionError( "Invalid JMESPath expression used " - "in pagination config: %s\nerror: %s" % (expression, e) + f"in pagination config: {expression}\nerror: {e}" ) diff --git a/tests/functional/test_regions.py b/tests/functional/test_regions.py index 74aaeb7738..11a882f91f 100644 --- a/tests/functional/test_regions.py +++ b/tests/functional/test_regions.py @@ -447,7 +447,7 @@ def test_single_service_region_endpoint( resolver = patched_session._get_internal_component('endpoint_resolver') bridge = ClientEndpointBridge(resolver, None, None) result = bridge.resolve(service_name, region_name) - expected = 'https://%s' % expected_endpoint + expected = f'https://{expected_endpoint}' assert result['endpoint_url'] == expected diff --git a/tests/functional/test_retry.py b/tests/functional/test_retry.py index 27dafb0909..da7460313c 100644 --- a/tests/functional/test_retry.py +++ b/tests/functional/test_retry.py @@ -44,7 +44,7 @@ def assert_will_retry_n_times( for _ in range(num_responses): http_stubber.add_response(status=status, body=body) with self.assertRaisesRegex( - ClientError, 'reached max retries: %s' % num_retries + ClientError, f'reached max retries: {num_retries}' ): yield self.assertEqual(len(http_stubber.requests), num_responses) diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index e4f99ac97f..0c04f858e0 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -1363,7 +1363,7 @@ def test_endpoint_redirection(self): self.assert_signing_region(request, region) expected_endpoint = ( "endpoint-io.a1c1d5c7.s3-object-lambda." - "%s.amazonaws.com" % region + f"{region}.amazonaws.com" ) self.assert_endpoint(request, expected_endpoint) @@ -3596,7 +3596,7 @@ def _verify_presigned_url_addressing( # We're not trying to verify the params for URL presigning, # those are tested elsewhere. We just care about the hostname/path. parts = urlsplit(url) - actual = "%s://%s%s" % parts[:3] + actual = "{}://{}{}".format(*parts[:3]) assert actual == expected_url diff --git a/tests/functional/test_s3_control_redirects.py b/tests/functional/test_s3_control_redirects.py index 1aff27a5ed..4ac00123d4 100644 --- a/tests/functional/test_s3_control_redirects.py +++ b/tests/functional/test_s3_control_redirects.py @@ -375,12 +375,9 @@ def _assert_test_case(test_case, client, stubber): exception_cls = getattr(exceptions, assertions['exception']) if exception_raised is None: raise RuntimeError( - 'Expected exception "%s" was not raised' % exception_cls + f'Expected exception "{exception_cls}" was not raised' ) - error_msg = ('Expected exception "%s", got "%s"') % ( - exception_cls, - type(exception_raised), - ) + error_msg = f'Expected exception "{exception_cls}", got "{type(exception_raised)}"' assert isinstance(exception_raised, exception_cls), error_msg else: assert len(stubber.requests) == 1 diff --git a/tests/functional/test_six_imports.py b/tests/functional/test_six_imports.py index f7ee511341..41086f56f7 100644 --- a/tests/functional/test_six_imports.py +++ b/tests/functional/test_six_imports.py @@ -36,20 +36,18 @@ def visit_Import(self, node): if getattr(alias, 'name', '') == 'six': line = self._get_line_content(self.filename, node.lineno) raise AssertionError( - "A bare 'import six' was found in %s:\n" - "\n%s: %s\n" + f"A bare 'import six' was found in {self.filename}:\n" + f"\n{node.lineno}: {line}\n" "Please use 'from botocore.compat import six' instead" - % (self.filename, node.lineno, line) ) def visit_ImportFrom(self, node): if node.module == 'six': line = self._get_line_content(self.filename, node.lineno) raise AssertionError( - "A bare 'from six import ...' was found in %s:\n" - "\n%s:%s\n" + f"A bare 'from six import ...' was found in {self.filename}:\n" + f"\n{node.lineno}:{line}\n" "Please use 'from botocore.compat import six' instead" - % (self.filename, node.lineno, line) ) def _get_line_content(self, filename, lineno): diff --git a/tests/functional/test_six_threading.py b/tests/functional/test_six_threading.py index 7321e5cbd6..c4460f7205 100644 --- a/tests/functional/test_six_threading.py +++ b/tests/functional/test_six_threading.py @@ -1,6 +1,7 @@ """ Regression test for six issue #98 (https://github.com/benjaminp/six/issues/98) """ + import sys import threading import time diff --git a/tests/functional/test_sts.py b/tests/functional/test_sts.py index 2040fb3017..42fac96711 100644 --- a/tests/functional/test_sts.py +++ b/tests/functional/test_sts.py @@ -67,7 +67,7 @@ def create_sts_client( ) def set_sts_regional_for_config_file(self, fileobj, config_val): - fileobj.write('[default]\n' 'sts_regional_endpoints=%s\n' % config_val) + fileobj.write('[default]\n' f'sts_regional_endpoints={config_val}\n') fileobj.flush() self.environ['AWS_CONFIG_FILE'] = fileobj.name diff --git a/tests/functional/test_useragent.py b/tests/functional/test_useragent.py index d69451253c..79290459ab 100644 --- a/tests/functional/test_useragent.py +++ b/tests/functional/test_useragent.py @@ -285,10 +285,7 @@ def test_s3transfer_user_agent(patched_session): def test_chalice_user_agent(patched_session): # emulate behavior from chalice's cli.factory._add_chalice_user_agent - suffix = '{}/{}'.format( - patched_session.user_agent_name, - patched_session.user_agent_version, - ) + suffix = f'{patched_session.user_agent_name}/{patched_session.user_agent_version}' patched_session.user_agent_name = 'aws-chalice' patched_session.user_agent_version = '0.1.2' patched_session.user_agent_extra = suffix diff --git a/tests/functional/test_waiter_config.py b/tests/functional/test_waiter_config.py index e7c8e9b876..fc1ab619fb 100644 --- a/tests/functional/test_waiter_config.py +++ b/tests/functional/test_waiter_config.py @@ -118,23 +118,20 @@ def _lint_single_waiter(client, waiter_name, service_model): # Needs to reference an existing operation name. if operation_name not in service_model.operation_names: raise AssertionError( - "Waiter config references unknown " - "operation: %s" % operation_name + "Waiter config references unknown " f"operation: {operation_name}" ) # Needs to have at least one acceptor. if not waiter.config.acceptors: raise AssertionError( "Waiter config must have at least " - "one acceptor state: %s" % waiter.name + f"one acceptor state: {waiter.name}" ) op_model = service_model.operation_model(operation_name) for acceptor in acceptors: _validate_acceptor(acceptor, op_model, waiter.name) if not waiter.name.isalnum(): - raise AssertionError( - "Waiter name %s is not alphanumeric." % waiter_name - ) + raise AssertionError(f"Waiter name {waiter_name} is not alphanumeric.") def _validate_schema(validator, waiter_json): @@ -151,10 +148,7 @@ def _validate_acceptor(acceptor, op_model, waiter_name): output_shape = op_model.output_shape assert ( output_shape is not None - ), "Waiter '{}' has JMESPath expression with no output shape: {}".format( - waiter_name, - op_model, - ) + ), f"Waiter '{waiter_name}' has JMESPath expression with no output shape: {op_model}" # We want to check if the JMESPath expression makes sense. # To do this, we'll generate sample output and evaluate the # JMESPath expression against the output. We'll then diff --git a/tests/functional/utils/credentialprocess.py b/tests/functional/utils/credentialprocess.py index 6529c7f493..12c8068cd3 100644 --- a/tests/functional/utils/credentialprocess.py +++ b/tests/functional/utils/credentialprocess.py @@ -11,6 +11,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """This is a dummy implementation of a credential provider process.""" + import argparse import json diff --git a/tests/integration/test_client_http.py b/tests/integration/test_client_http.py index c2ee605552..7a2b3d94b6 100644 --- a/tests/integration/test_client_http.py +++ b/tests/integration/test_client_http.py @@ -23,7 +23,7 @@ class TestClientHTTPBehavior(unittest.TestCase): def setUp(self): self.port = unused_port() - self.localhost = 'http://localhost:%s/' % self.port + self.localhost = f'http://localhost:{self.port}/' self.session = botocore.session.get_session() # We need to set fake credentials to ensure credentials aren't searched # for which might make additional API calls (assume role, etc). @@ -31,7 +31,7 @@ def setUp(self): @unittest.skip('Test has suddenly become extremely flakey.') def test_can_proxy_https_request_with_auth(self): - proxy_url = 'http://user:pass@localhost:%s/' % self.port + proxy_url = f'http://user:pass@localhost:{self.port}/' config = Config(proxies={'https': proxy_url}, region_name='us-west-1') client = self.session.create_client('ec2', config=config) @@ -51,7 +51,7 @@ def validate_auth(self): @unittest.skip('Proxy cannot connect to service when run in CodeBuild.') def test_proxy_request_includes_host_header(self): - proxy_url = 'http://user:pass@localhost:%s/' % self.port + proxy_url = f'http://user:pass@localhost:{self.port}/' config = Config( proxies={'https': proxy_url}, proxies_config={'proxy_use_forwarding_for_https': True}, diff --git a/tests/integration/test_cloudformation.py b/tests/integration/test_cloudformation.py index fe6c9a7b4e..fdc354de17 100644 --- a/tests/integration/test_cloudformation.py +++ b/tests/integration/test_cloudformation.py @@ -25,7 +25,7 @@ def test_handles_errors_with_template_body(self): # it handles the case when a stack does not exist. with self.assertRaises(ClientError): self.client.get_template( - StackName='does-not-exist-%s' % random_chars(10) + StackName=f'does-not-exist-{random_chars(10)}' ) diff --git a/tests/integration/test_cognito_identity.py b/tests/integration/test_cognito_identity.py index 046e87e76a..bce7b2d7b7 100644 --- a/tests/integration/test_cognito_identity.py +++ b/tests/integration/test_cognito_identity.py @@ -22,7 +22,7 @@ def setUp(self): ) def test_can_create_and_delete_identity_pool(self): - pool_name = 'test%s' % random_chars(10) + pool_name = f'test{random_chars(10)}' response = self.client.create_identity_pool( IdentityPoolName=pool_name, AllowUnauthenticatedIdentities=True ) diff --git a/tests/integration/test_credentials.py b/tests/integration/test_credentials.py index 239c206a70..d49c266e2e 100644 --- a/tests/integration/test_credentials.py +++ b/tests/integration/test_credentials.py @@ -157,7 +157,7 @@ def setUp(self): "Statement": [ { "Effect": "Allow", - "Principal": {"AWS": "arn:aws:iam::%s:root" % account_id}, + "Principal": {"AWS": f"arn:aws:iam::{account_id}:root"}, "Action": "sts:AssumeRole", } ], @@ -254,7 +254,7 @@ def _wait_for_assume_role( else: raise - raise Exception("Unable to assume role %s" % role_arn) + raise Exception(f"Unable to assume role {role_arn}") def create_assume_policy(self, role_arn): policy_document = { diff --git a/tests/integration/test_elastictranscoder.py b/tests/integration/test_elastictranscoder.py index 4d58056d60..a72e723621 100644 --- a/tests/integration/test_elastictranscoder.py +++ b/tests/integration/test_elastictranscoder.py @@ -38,7 +38,7 @@ def setUp(self): self.iam_client = self.session.create_client('iam', 'us-east-1') def create_bucket(self): - bucket_name = 'ets-bucket-1-%s' % random_chars(50) + bucket_name = f'ets-bucket-1-{random_chars(50)}' self.s3_client.create_bucket(Bucket=bucket_name) waiter = self.s3_client.get_waiter('bucket_exists') waiter.wait(Bucket=bucket_name) @@ -46,7 +46,7 @@ def create_bucket(self): return bucket_name def create_iam_role(self): - role_name = 'ets-role-name-1-%s' % random_chars(10) + role_name = f'ets-role-name-1-{random_chars(10)}' parsed = self.iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=DEFAULT_ROLE_POLICY ) @@ -68,7 +68,7 @@ def test_create_pipeline(self): input_bucket = self.create_bucket() output_bucket = self.create_bucket() role = self.create_iam_role() - pipeline_name = 'botocore-test-create-%s' % random_chars(10) + pipeline_name = f'botocore-test-create-{random_chars(10)}' parsed = self.client.create_pipeline( InputBucket=input_bucket, diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py index 88ba79a481..712f9f357c 100644 --- a/tests/integration/test_s3.py +++ b/tests/integration/test_s3.py @@ -288,9 +288,7 @@ def assert_num_uploads_found( # Sleep and try again. time.sleep(2) self.fail( - "Expected to see {} uploads, instead saw: {}".format( - num_uploads, amount_seen - ) + f"Expected to see {num_uploads} uploads, instead saw: {amount_seen}" ) def create_client(self): @@ -359,7 +357,7 @@ def test_can_delete_urlencoded_object(self): @pytest.mark.slow def test_can_paginate(self): for i in range(5): - key_name = 'key%s' % i + key_name = f'key{i}' self.create_object(key_name) # Eventual consistency. time.sleep(3) @@ -373,7 +371,7 @@ def test_can_paginate(self): @pytest.mark.slow def test_can_paginate_with_page_size(self): for i in range(5): - key_name = 'key%s' % i + key_name = f'key{i}' self.create_object(key_name) # Eventual consistency. time.sleep(3) @@ -392,7 +390,7 @@ def test_result_key_iters(self): for i in range(5): key_name = f'key/{i}/{i}' self.create_object(key_name) - key_name2 = 'key/%s' % i + key_name2 = f'key/{i}' self.create_object(key_name2) time.sleep(3) paginator = self.client.get_paginator('list_objects') @@ -576,7 +574,7 @@ def test_thread_safe_auth(self): threads = [] for i in range(10): t = threading.Thread( - target=self.create_object_catch_exceptions, args=('foo%s' % i,) + target=self.create_object_catch_exceptions, args=(f'foo{i}',) ) t.daemon = True threads.append(t) @@ -587,13 +585,12 @@ def test_thread_safe_auth(self): self.assertEqual( self.caught_exceptions, [], - "Unexpectedly caught exceptions: %s" % self.caught_exceptions, + f"Unexpectedly caught exceptions: {self.caught_exceptions}", ) self.assertEqual( len(set(self.auth_paths)), 10, - "Expected 10 unique auth paths, instead received: %s" - % (self.auth_paths), + f"Expected 10 unique auth paths, instead received: {self.auth_paths}", ) def test_non_normalized_key_paths(self): @@ -714,12 +711,10 @@ def test_presign_sigv2(self): ) self.assertTrue( presigned_url.startswith( - 'https://{}.s3.amazonaws.com/{}'.format( - self.bucket_name, self.key - ) + f'https://{self.bucket_name}.s3.amazonaws.com/{self.key}' ), "Host was suppose to use DNS style, instead " - "got: %s" % presigned_url, + f"got: {presigned_url}", ) # Try to retrieve the object using the presigned url. self.assertEqual(http_get(presigned_url).data, b'foo') @@ -750,12 +745,10 @@ def test_presign_sigv4(self): ) self.assertTrue( presigned_url.startswith( - 'https://{}.s3.amazonaws.com/{}'.format( - self.bucket_name, self.key - ) + f'https://{self.bucket_name}.s3.amazonaws.com/{self.key}' ), "Host was suppose to be the us-east-1 endpoint, instead " - "got: %s" % presigned_url, + f"got: {presigned_url}", ) # Try to retrieve the object using the presigned url. self.assertEqual(http_get(presigned_url).data, b'foo') @@ -787,10 +780,11 @@ def test_presign_post_sigv2(self): # Make sure the correct endpoint is being used self.assertTrue( post_args['url'].startswith( - 'https://%s.s3.amazonaws.com' % self.bucket_name + f'https://{self.bucket_name}.s3.amazonaws.com' + ), + "Host was suppose to use DNS style, instead " "got: {}".format( + post_args['url'] ), - "Host was suppose to use DNS style, instead " - "got: %s" % post_args['url'], ) # Try to retrieve the object using the presigned url. @@ -824,10 +818,10 @@ def test_presign_post_sigv4(self): # Make sure the correct endpoint is being used self.assertTrue( post_args['url'].startswith( - 'https://%s.s3.amazonaws.com/' % self.bucket_name + f'https://{self.bucket_name}.s3.amazonaws.com/' ), "Host was suppose to use us-east-1 endpoint, instead " - "got: %s" % post_args['url'], + "got: {}".format(post_args['url']), ) r = http_post(post_args['url'], data=post_args['fields'], files=files) @@ -854,12 +848,10 @@ def test_presign_sigv2(self): ) self.assertTrue( presigned_url.startswith( - 'https://{}.s3.amazonaws.com/{}'.format( - self.bucket_name, self.key - ) + f'https://{self.bucket_name}.s3.amazonaws.com/{self.key}' ), "Host was suppose to use DNS style, instead " - "got: %s" % presigned_url, + f"got: {presigned_url}", ) # Try to retrieve the object using the presigned url. self.assertEqual(http_get(presigned_url).data, b'foo') @@ -882,12 +874,10 @@ def test_presign_sigv4(self): self.assertTrue( presigned_url.startswith( - 'https://s3.us-west-2.amazonaws.com/{}/{}'.format( - self.bucket_name, self.key - ) + f'https://s3.us-west-2.amazonaws.com/{self.bucket_name}/{self.key}' ), "Host was suppose to be the us-west-2 endpoint, instead " - "got: %s" % presigned_url, + f"got: {presigned_url}", ) # Try to retrieve the object using the presigned url. self.assertEqual(http_get(presigned_url).data, b'foo') @@ -919,10 +909,11 @@ def test_presign_post_sigv2(self): # Make sure the correct endpoint is being used self.assertTrue( post_args['url'].startswith( - 'https://%s.s3.amazonaws.com' % self.bucket_name + f'https://{self.bucket_name}.s3.amazonaws.com' + ), + "Host was suppose to use DNS style, instead " "got: {}".format( + post_args['url'] ), - "Host was suppose to use DNS style, instead " - "got: %s" % post_args['url'], ) r = http_post(post_args['url'], data=post_args['fields'], files=files) @@ -955,10 +946,11 @@ def test_presign_post_sigv4(self): # Make sure the correct endpoint is being used self.assertTrue( post_args['url'].startswith( - 'https://%s.s3.amazonaws.com/' % self.bucket_name + f'https://{self.bucket_name}.s3.amazonaws.com/' + ), + "Host was suppose to use DNS style, instead " "got: {}".format( + post_args['url'] ), - "Host was suppose to use DNS style, instead " - "got: %s" % post_args['url'], ) r = http_post(post_args['url'], data=post_args['fields'], files=files) @@ -1421,7 +1413,7 @@ def test_redirects_head_object(self): ) self.assertEqual(response.get('ContentLength'), len(key)) except ClientError as e: - self.fail("S3 Client failed to redirect Head Object: %s" % e) + self.fail(f"S3 Client failed to redirect Head Object: {e}") class TestBucketWithVersions(BaseS3ClientTest): diff --git a/tests/integration/test_session.py b/tests/integration/test_session.py index 924dfd9cf0..95d2ec1626 100644 --- a/tests/integration/test_session.py +++ b/tests/integration/test_session.py @@ -30,7 +30,7 @@ def test_can_change_timestamp_with_clients(self): dates = [bucket['CreationDate'] for bucket in parsed['Buckets']] self.assertTrue( all(isinstance(date, str) for date in dates), - "Expected all str types but instead got: %s" % dates, + f"Expected all str types but instead got: {dates}", ) def test_maps_service_name_when_overriden(self): diff --git a/tests/integration/test_smoke.py b/tests/integration/test_smoke.py index 6c0d7139b7..e31a5c9287 100644 --- a/tests/integration/test_smoke.py +++ b/tests/integration/test_smoke.py @@ -10,6 +10,7 @@ to use and all the services in SMOKE_TESTS/ERROR_TESTS will be tested. """ + import logging import os import warnings @@ -79,7 +80,7 @@ 'kms': {'ListKeys': {}}, 'lambda': {'ListFunctions': {}}, 'logs': {'DescribeLogGroups': {}}, - 'opsworks': {'DescribeStacks': {}}, + # 'opsworks': {'DescribeStacks': {}}, 'rds': {'DescribeDBInstances': {}}, 'redshift': {'DescribeClusters': {}}, 'route53': {'ListHostedZones': {}}, @@ -174,7 +175,7 @@ 'kinesis': {'DescribeStream': {'StreamName': 'fake'}}, 'kms': {'GetKeyPolicy': {'KeyId': 'fake', 'PolicyName': 'fake'}}, 'lambda': {'Invoke': {'FunctionName': 'fake'}}, - 'opsworks': {'DescribeLayers': {'StackId': 'fake'}}, + # 'opsworks': {'DescribeLayers': {'StackId': 'fake'}}, 'rds': {'DescribeDBInstances': {'DBInstanceIdentifier': 'fake'}}, 'redshift': {'DescribeClusters': {'ClusterIdentifier': 'fake'}}, 'route53': {'GetHostedZone': {'Id': 'fake'}}, @@ -317,7 +318,7 @@ def test_client_can_retry_request_properly( except ClientError as e: assert False, ( 'Request was not retried properly, ' - 'received error:\n%s' % pformat(e) + f'received error:\n{pformat(e)}' ) # Ensure we used the stubber as we're not using it in strict mode assert len(http_stubber.responses) == 0, 'Stubber was not used!' diff --git a/tests/integration/test_waiters.py b/tests/integration/test_waiters.py index 5631d9184b..a627cec0de 100644 --- a/tests/integration/test_waiters.py +++ b/tests/integration/test_waiters.py @@ -24,7 +24,7 @@ def setUp(self): self.client = self.session.create_client('dynamodb', 'us-west-2') def test_create_table_and_wait(self): - table_name = 'botocoretest-%s' % random_chars(10) + table_name = f'botocoretest-{random_chars(10)}' self.client.create_table( TableName=table_name, ProvisionedThroughput={ diff --git a/tests/unit/auth/test_auth_trait.py b/tests/unit/auth/test_auth_trait.py new file mode 100644 index 0000000000..c1209a576c --- /dev/null +++ b/tests/unit/auth/test_auth_trait.py @@ -0,0 +1,42 @@ +# Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +from botocore.auth import BaseSigner, resolve_auth_type +from botocore.exceptions import ( + UnknownSignatureVersionError, + UnsupportedSignatureVersionError, +) +from tests import mock, unittest + + +class TestAuthTraitResolution(unittest.TestCase): + def test_auth_resolves_first_available(self): + auth = ['aws.auth#foo', 'aws.auth#bar'] + # Don't declare a signer for "foo" + auth_types = {'bar': mock.Mock(spec=BaseSigner)} + auth_type_conversions = {'aws.auth#foo': 'foo', 'aws.auth#bar': 'bar'} + + with mock.patch('botocore.auth.AUTH_TYPE_MAPS', auth_types): + with mock.patch( + 'botocore.auth.AUTH_TYPE_TO_SIGNATURE_VERSION', + auth_type_conversions, + ): + assert resolve_auth_type(auth) == 'bar' + + def test_invalid_auth_type_error(self): + with self.assertRaises(UnknownSignatureVersionError): + resolve_auth_type(['aws.auth#invalidAuth']) + + def test_no_known_auth_type(self): + with self.assertRaises(UnsupportedSignatureVersionError): + resolve_auth_type([]) diff --git a/tests/unit/auth/test_signers.py b/tests/unit/auth/test_signers.py index 9e92654228..d1b301e35d 100644 --- a/tests/unit/auth/test_signers.py +++ b/tests/unit/auth/test_signers.py @@ -115,10 +115,10 @@ def test_bucket_operations(self): 'object-lock', ) for operation in operations: - url = '/quotes?%s' % operation + url = f'/quotes?{operation}' split = urlsplit(url) cr = self.hmacv1.canonical_resource(split) - self.assertEqual(cr, '/quotes?%s' % operation) + self.assertEqual(cr, f'/quotes?{operation}') def test_sign_with_token(self): credentials = botocore.credentials.Credentials( @@ -357,7 +357,7 @@ def test_signature_is_not_normalized(self): def test_query_string_params_in_urls(self): if not hasattr(self.AuthClass, 'canonical_query_string'): raise unittest.SkipTest( - '%s does not expose interim steps' % self.AuthClass.__name__ + f'{self.AuthClass.__name__} does not expose interim steps' ) request = AWSRequest() @@ -704,9 +704,7 @@ def setUp(self): self.request = AWSRequest() self.bucket = 'mybucket' self.key = 'myobject' - self.path = 'https://s3.amazonaws.com/{}/{}'.format( - self.bucket, self.key - ) + self.path = f'https://s3.amazonaws.com/{self.bucket}/{self.key}' self.request.url = self.path self.request.method = 'GET' @@ -986,9 +984,9 @@ def test_presign_content_type_form_encoded_not_signed(self): request = AWSRequest() request.method = 'GET' request.url = 'https://myservice.us-east-1.amazonaws.com/' - request.headers[ - 'Content-Type' - ] = 'application/x-www-form-urlencoded; charset=utf-8' + request.headers['Content-Type'] = ( + 'application/x-www-form-urlencoded; charset=utf-8' + ) self.auth.add_auth(request) query_string = self.get_parsed_query_string(request) signed_headers = query_string.get('X-Amz-SignedHeaders') @@ -1022,7 +1020,7 @@ def setUp(self): } self.request = AWSRequest() - self.request.url = 'https://s3.amazonaws.com/%s' % self.bucket + self.request.url = f'https://s3.amazonaws.com/{self.bucket}' self.request.method = 'POST' self.request.context['s3-presign-post-fields'] = self.fields @@ -1075,7 +1073,7 @@ def test_presign_post_with_security_token(self): def test_empty_fields_and_policy(self): self.request = AWSRequest() - self.request.url = 'https://s3.amazonaws.com/%s' % self.bucket + self.request.url = f'https://s3.amazonaws.com/{self.bucket}' self.request.method = 'POST' self.auth.add_auth(self.request) @@ -1152,7 +1150,7 @@ def test_presign_post_with_security_token(self): def test_empty_fields_and_policy(self): self.request = AWSRequest() - self.request.url = 'https://s3.amazonaws.com/%s' % self.bucket + self.request.url = f'https://s3.amazonaws.com/{self.bucket}' self.request.method = 'POST' self.auth.add_auth(self.request) diff --git a/tests/unit/auth/test_sigv4.py b/tests/unit/auth/test_sigv4.py index 86c37ceca9..e09220f11e 100644 --- a/tests/unit/auth/test_sigv4.py +++ b/tests/unit/auth/test_sigv4.py @@ -22,6 +22,7 @@ generate testcases based on these files. """ + import datetime import io import logging @@ -164,9 +165,9 @@ def _test_signature_version_4(test_case): def assert_equal(actual, expected, raw_request, part): if actual != expected: - message = "The %s did not match" % part + message = f"The {part} did not match" message += f"\nACTUAL:{actual!r} !=\nEXPECT:{expected!r}" - message += '\nThe raw request was:\n%s' % raw_request + message += f'\nThe raw request was:\n{raw_request}' raise AssertionError(message) diff --git a/tests/unit/docs/test_method.py b/tests/unit/docs/test_method.py index a23fb98817..0c08d11738 100644 --- a/tests/unit/docs/test_method.py +++ b/tests/unit/docs/test_method.py @@ -352,9 +352,9 @@ def test_exclude_output(self): def test_streaming_body_in_output(self): self.add_shape_to_params('Body', 'Blob') self.json_model['shapes']['Blob'] = {'type': 'blob'} - self.json_model['shapes']['SampleOperationInputOutput'][ - 'payload' - ] = 'Body' + self.json_model['shapes']['SampleOperationInputOutput']['payload'] = ( + 'Body' + ) document_model_driven_method( self.doc_structure, 'foo', @@ -367,9 +367,9 @@ def test_streaming_body_in_output(self): def test_event_stream_body_in_output(self): self.add_shape_to_params('Payload', 'EventStream') - self.json_model['shapes']['SampleOperationInputOutput'][ - 'payload' - ] = 'Payload' + self.json_model['shapes']['SampleOperationInputOutput']['payload'] = ( + 'Payload' + ) self.json_model['shapes']['EventStream'] = { 'type': 'structure', 'eventstream': True, @@ -416,9 +416,9 @@ def test_streaming_body_in_input(self): del self.json_model['operations']['SampleOperation']['output'] self.add_shape_to_params('Body', 'Blob') self.json_model['shapes']['Blob'] = {'type': 'blob'} - self.json_model['shapes']['SampleOperationInputOutput'][ - 'payload' - ] = 'Body' + self.json_model['shapes']['SampleOperationInputOutput']['payload'] = ( + 'Body' + ) document_model_driven_method( self.doc_structure, 'foo', diff --git a/tests/unit/docs/test_utils.py b/tests/unit/docs/test_utils.py index 25fb6aa46a..fd8cdb37aa 100644 --- a/tests/unit/docs/test_utils.py +++ b/tests/unit/docs/test_utils.py @@ -196,7 +196,7 @@ def setUp(self): def test_hides_params_from_doc_string(self): section = self.doc_structure.add_new_section(self.name) - param_signature = ':param %s: ' % self.name + param_signature = f':param {self.name}: ' section.write(param_signature) self.assert_contains_line(param_signature) self.param.hide_param( @@ -208,7 +208,7 @@ def test_hides_params_from_doc_string(self): def test_hides_param_from_example(self): structure = self.doc_structure.add_new_section('structure-value') section = structure.add_new_section(self.name) - example = '%s: \'string\'' % self.name + example = f'{self.name}: \'string\'' section.write(example) self.assert_contains_line(example) self.param.hide_param( diff --git a/tests/unit/response_parsing/test_response_parsing.py b/tests/unit/response_parsing/test_response_parsing.py index f5ecefd9a9..3e3b9838cf 100644 --- a/tests/unit/response_parsing/test_response_parsing.py +++ b/tests/unit/response_parsing/test_response_parsing.py @@ -70,7 +70,7 @@ def _test_parsed_response(xmlfile, operation_model, expected): response = {'body': response_body, 'status_code': 200, 'headers': {}} for case in SPECIAL_CASES: if case in xmlfile: - print("SKIP: %s" % xmlfile) + print(f"SKIP: {xmlfile}") return if 'errors' in xmlfile: response['status_code'] = 400 @@ -109,7 +109,7 @@ def _test_parsed_response(xmlfile, operation_model, expected): pretty_d1 = pprint.pformat(d1, width=1).splitlines() pretty_d2 = pprint.pformat(d2, width=1).splitlines() diff = '\n' + '\n'.join(difflib.ndiff(pretty_d1, pretty_d2)) - raise AssertionError("Dicts are not equal:\n%s" % diff) + raise AssertionError(f"Dicts are not equal:\n{diff}") def _convert_bytes_to_str(parsed): @@ -135,7 +135,7 @@ def _xml_test_cases(): for dp in ['responses', 'errors']: data_path = os.path.join(os.path.dirname(__file__), 'xml') data_path = os.path.join(data_path, dp) - xml_files = glob.glob('%s/*.xml' % data_path) + xml_files = glob.glob(f'{data_path}/*.xml') service_names = set() for fn in xml_files: service_names.add(os.path.split(fn)[1].split('-')[0]) diff --git a/tests/unit/retries/test_standard.py b/tests/unit/retries/test_standard.py index 6bfd8e3d95..55dcb7ae58 100644 --- a/tests/unit/retries/test_standard.py +++ b/tests/unit/retries/test_standard.py @@ -1,3 +1,5 @@ +from collections import Counter + import pytest from botocore import model @@ -466,6 +468,33 @@ def test_exponential_backoff_with_jitter(self): for x in backoffs: self.assertTrue(0 <= x <= 4) + def test_uniform_rand_dist_on_max_attempts(self): + backoff = standard.ExponentialBackoff() + num_datapoints = 10_000 + backoffs = [ + backoff.delay_amount(standard.RetryContext(attempt_number=10)) + for i in range(num_datapoints) + ] + self._assert_looks_like_uniform_distribution(backoffs) + + def _assert_looks_like_uniform_distribution(self, backoffs): + histogram = Counter(int(el) for el in backoffs) + expected_value = len(backoffs) / len(histogram) + # This is an arbitrarily chosen tolerance, but we're being fairly + # lenient here and giving a 20% tolerance. We're only interested + # in cases where it's obviously broken and not a uniform distribution. + tolerance = 0.20 + low = expected_value - (expected_value * tolerance) + high = expected_value + (expected_value * tolerance) + out_of_range = [ + str(i) for i in histogram.values() if not low <= i <= high + ] + if out_of_range: + raise AssertionError( + "Backoff values outside of uniform distribution range " + f"({low} - {high}): {', '.join(out_of_range)}" + ) + class TestRetryQuotaChecker(unittest.TestCase): def setUp(self): diff --git a/tests/unit/test_args.py b/tests/unit/test_args.py index 8f1c992422..9338b05371 100644 --- a/tests/unit/test_args.py +++ b/tests/unit/test_args.py @@ -186,7 +186,7 @@ def test_s3_with_endpoint_url_still_resolves_region(self): { 'region_name': 'us-west-2', 'signature_version': 's3v4', - 'enpoint_url': 'https://s3-us-west-2.amazonaws.com', + 'endpoint_url': 'https://s3-us-west-2.amazonaws.com', 'signing_name': 's3', 'signing_region': 'us-west-2', 'metadata': {}, diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index 962dab01a7..86974fd553 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -179,9 +179,9 @@ def create_client_creator( if retry_config_translator is None: retry_config_translator = botocore.translate if endpoint_prefix is not None: - self.service_description['metadata'][ - 'endpointPrefix' - ] = endpoint_prefix + self.service_description['metadata']['endpointPrefix'] = ( + endpoint_prefix + ) if endpoint_creator is not None: self.endpoint_creator_cls.return_value = endpoint_creator @@ -232,9 +232,9 @@ def test_client_name(self): self.assertTrue(service_client.__class__.__name__, 'MyService') def test_client_name_with_amazon(self): - self.service_description['metadata'][ - 'serviceFullName' - ] = 'Amazon MyService' + self.service_description['metadata']['serviceFullName'] = ( + 'Amazon MyService' + ) creator = self.create_client_creator() service_client = creator.create_client( 'myservice', 'us-west-2', credentials=self.credentials @@ -242,9 +242,9 @@ def test_client_name_with_amazon(self): self.assertTrue(service_client.__class__.__name__, 'MyService') def test_client_name_using_abreviation(self): - self.service_description['metadata'][ - 'serviceAbbreviation' - ] = 'Abbreviation' + self.service_description['metadata']['serviceAbbreviation'] = ( + 'Abbreviation' + ) creator = self.create_client_creator() service_client = creator.create_client( 'myservice', 'us-west-2', credentials=self.credentials @@ -252,9 +252,9 @@ def test_client_name_using_abreviation(self): self.assertTrue(service_client.__class__.__name__, 'Abbreviation') def test_client_name_with_non_alphabet_characters(self): - self.service_description['metadata'][ - 'serviceFullName' - ] = 'Amazon My-Service' + self.service_description['metadata']['serviceFullName'] = ( + 'Amazon My-Service' + ) creator = self.create_client_creator() service_client = creator.create_client( 'myservice', 'us-west-2', credentials=self.credentials @@ -1284,7 +1284,10 @@ def test_event_emitted_when_invoked(self): creator = self.create_client_creator(event_emitter=event_emitter) calls = [] - handler = lambda **kwargs: calls.append(kwargs) + + def handler(**kwargs): + return calls.append(kwargs) + event_emitter.register('before-call', handler) service_client = creator.create_client( @@ -1298,10 +1301,14 @@ def test_events_are_per_client(self): creator = self.create_client_creator(event_emitter=event_emitter) first_calls = [] - first_handler = lambda **kwargs: first_calls.append(kwargs) + + def first_handler(**kwargs): + return first_calls.append(kwargs) second_calls = [] - second_handler = lambda **kwargs: second_calls.append(kwargs) + + def second_handler(**kwargs): + return second_calls.append(kwargs) first_client = creator.create_client( 'myservice', 'us-west-2', credentials=self.credentials @@ -1339,7 +1346,10 @@ def test_clients_inherit_handlers_from_session(self): # So if an event handler is registered before any clients are created: base_calls = [] - base_handler = lambda **kwargs: base_calls.append(kwargs) + + def base_handler(**kwargs): + return base_calls.append(kwargs) + event_emitter.register('before-call', base_handler) # Then any client created from this point forward from the @@ -1371,7 +1381,10 @@ def test_clients_inherit_only_at_create_time(self): # 2. Now register an event handler from the originating event emitter. base_calls = [] - base_handler = lambda **kwargs: base_calls.append(kwargs) + + def base_handler(**kwargs): + return base_calls.append(kwargs) + event_emitter.register('before-call', base_handler) # 3. The client will _not_ see this because it already has its diff --git a/tests/unit/test_credentials.py b/tests/unit/test_credentials.py index c0480c856c..3017444a6f 100644 --- a/tests/unit/test_credentials.py +++ b/tests/unit/test_credentials.py @@ -2111,9 +2111,9 @@ def test_cache_key_is_windows_safe(self): }, } cache = {} - self.fake_config['profiles']['development'][ - 'role_arn' - ] = 'arn:aws:iam::foo-role' + self.fake_config['profiles']['development']['role_arn'] = ( + 'arn:aws:iam::foo-role' + ) client_creator = self.create_client_creator(with_response=response) provider = credentials.AssumeRoleProvider( @@ -2140,12 +2140,12 @@ def test_cache_key_with_role_session_name(self): }, } cache = {} - self.fake_config['profiles']['development'][ - 'role_arn' - ] = 'arn:aws:iam::foo-role' - self.fake_config['profiles']['development'][ - 'role_session_name' - ] = 'foo_role_session_name' + self.fake_config['profiles']['development']['role_arn'] = ( + 'arn:aws:iam::foo-role' + ) + self.fake_config['profiles']['development']['role_session_name'] = ( + 'foo_role_session_name' + ) client_creator = self.create_client_creator(with_response=response) provider = credentials.AssumeRoleProvider( @@ -2277,9 +2277,9 @@ def test_assume_role_with_duration(self): ) def test_assume_role_with_bad_duration(self): - self.fake_config['profiles']['development'][ - 'duration_seconds' - ] = 'garbage value' + self.fake_config['profiles']['development']['duration_seconds'] = ( + 'garbage value' + ) response = { 'Credentials': { 'AccessKeyId': 'foo', @@ -2780,9 +2780,9 @@ def __init__(self, profile_name): def load(self): return Credentials( - '%s-access-key' % self._profile_name, - '%s-secret-key' % self._profile_name, - '%s-token' % self._profile_name, + f'{self._profile_name}-access-key', + f'{self._profile_name}-secret-key', + f'{self._profile_name}-token', self.METHOD, ) diff --git a/tests/unit/test_endpoint_provider.py b/tests/unit/test_endpoint_provider.py index 51a07079bc..c1f82ace2e 100644 --- a/tests/unit/test_endpoint_provider.py +++ b/tests/unit/test_endpoint_provider.py @@ -249,7 +249,6 @@ def test_invalid_arn_returns_none(rule_lib): "service": "s3", "region": "", "accountId": "", - "region": "", "resourceId": ["myBucket"], }, ), diff --git a/tests/unit/test_eventstream.py b/tests/unit/test_eventstream.py index c83811b3bb..a683e37e7f 100644 --- a/tests/unit/test_eventstream.py +++ b/tests/unit/test_eventstream.py @@ -10,7 +10,8 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -"""Unit tests for the binary event stream decoder. """ +"""Unit tests for the binary event stream decoder.""" + import pytest from botocore.eventstream import ( @@ -214,7 +215,7 @@ CORRUPTED_HEADER_LENGTH = ( ( - b"\x00\x00\x00=\xFF\x00\x01\x02\x07\xfd\x83\x96\x0ccontent-type\x07\x00" + b"\x00\x00\x00=\xff\x00\x01\x02\x07\xfd\x83\x96\x0ccontent-type\x07\x00" b"\x10application/json{'foo':'bar'}\x8d\x9c\x08\xb1" ), InvalidHeadersLength, @@ -375,37 +376,37 @@ def test_message_to_response_dict_error(): def test_unpack_uint8(): - (value, bytes_consumed) = DecodeUtils.unpack_uint8(b'\xDE') + (value, bytes_consumed) = DecodeUtils.unpack_uint8(b'\xde') assert bytes_consumed == 1 assert value == 0xDE def test_unpack_uint32(): - (value, bytes_consumed) = DecodeUtils.unpack_uint32(b'\xDE\xAD\xBE\xEF') + (value, bytes_consumed) = DecodeUtils.unpack_uint32(b'\xde\xad\xbe\xef') assert bytes_consumed == 4 assert value == 0xDEADBEEF def test_unpack_int8(): - (value, bytes_consumed) = DecodeUtils.unpack_int8(b'\xFE') + (value, bytes_consumed) = DecodeUtils.unpack_int8(b'\xfe') assert bytes_consumed == 1 assert value == -2 def test_unpack_int16(): - (value, bytes_consumed) = DecodeUtils.unpack_int16(b'\xFF\xFE') + (value, bytes_consumed) = DecodeUtils.unpack_int16(b'\xff\xfe') assert bytes_consumed == 2 assert value == -2 def test_unpack_int32(): - (value, bytes_consumed) = DecodeUtils.unpack_int32(b'\xFF\xFF\xFF\xFE') + (value, bytes_consumed) = DecodeUtils.unpack_int32(b'\xff\xff\xff\xfe') assert bytes_consumed == 4 assert value == -2 def test_unpack_int64(): - test_bytes = b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE' + test_bytes = b'\xff\xff\xff\xff\xff\xff\xff\xfe' (value, bytes_consumed) = DecodeUtils.unpack_int64(test_bytes) assert bytes_consumed == 8 assert value == -2 diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py index ad0223dac9..4d1c84c802 100644 --- a/tests/unit/test_exceptions.py +++ b/tests/unit/test_exceptions.py @@ -48,8 +48,7 @@ def test_retry_info_added_when_present(): error_msg = str(exceptions.ClientError(response, 'operation')) if '(reached max retries: 3)' not in error_msg: raise AssertionError( - "retry information not inject into error " - "message: %s" % error_msg + "retry information not inject into error " f"message: {error_msg}" ) @@ -67,7 +66,7 @@ def test_retry_info_not_added_if_retry_attempts_not_present(): raise AssertionError( "Retry information should not be in exception " "message when retry attempts not in response " - "metadata: %s" % error_msg + f"metadata: {error_msg}" ) @@ -84,7 +83,7 @@ def test_can_handle_when_response_missing_error_key(): if 'An error occurred (Unknown)' not in str(e): raise AssertionError( "Error code should default to 'Unknown' " - "when missing error response, instead got: %s" % str(e) + f"when missing error response, instead got: {str(e)}" ) diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py index 2701c32efe..fef4c43eab 100644 --- a/tests/unit/test_handlers.py +++ b/tests/unit/test_handlers.py @@ -767,7 +767,7 @@ def test_switch_host_with_param(self): request = AWSRequest() url = 'https://machinelearning.us-east-1.amazonaws.com' new_endpoint = 'https://my-custom-endpoint.amazonaws.com' - data = '{"PredictEndpoint":"%s"}' % new_endpoint + data = f'{{"PredictEndpoint":"{new_endpoint}"}}' request.data = data.encode('utf-8') request.url = url handlers.switch_host_with_param(request, 'PredictEndpoint') @@ -822,7 +822,7 @@ def test_validation_is_s3_accesspoint_arn(self): arn = 'arn:aws:s3:us-west-2:123456789012:accesspoint:endpoint' handlers.validate_bucket_name({'Bucket': arn}) except ParamValidationError: - self.fail('The s3 arn: %s should pass validation' % arn) + self.fail(f'The s3 arn: {arn} should pass validation') def test_validation_is_s3_outpost_arn(self): try: @@ -832,7 +832,7 @@ def test_validation_is_s3_outpost_arn(self): ) handlers.validate_bucket_name({'Bucket': arn}) except ParamValidationError: - self.fail('The s3 arn: %s should pass validation' % arn) + self.fail(f'The s3 arn: {arn} should pass validation') def test_validation_is_global_s3_bucket_arn(self): with self.assertRaises(ParamValidationError): @@ -1036,13 +1036,11 @@ def test_set_operation_specific_signer_v4a_existing_signing_context(self): signing_name = 'myservice' context = { 'auth_type': 'v4a', - 'signing': {'foo': 'bar', 'region': 'abc'}, + 'signing': {'foo': 'bar'}, } handlers.set_operation_specific_signer( context=context, signing_name=signing_name ) - # region has been updated - self.assertEqual(context['signing']['region'], '*') # signing_name has been added self.assertEqual(context['signing']['signing_name'], signing_name) # foo remained untouched @@ -1073,6 +1071,61 @@ def test_set_operation_specific_signer_s3v4_unsigned_payload(self): self.assertEqual(response, 's3v4') self.assertEqual(context.get('payload_signing_enabled'), False) + def test_set_operation_specific_signer_defaults_to_asterisk(self): + signing_name = 'myservice' + context = { + 'auth_type': 'v4a', + } + handlers.set_operation_specific_signer( + context=context, signing_name=signing_name + ) + self.assertEqual(context['signing']['region'], '*') + + def test_set_operation_specific_signer_prefers_client_config(self): + signing_name = 'myservice' + context = { + 'auth_type': 'v4a', + 'client_config': Config( + sigv4a_signing_region_set="region_1,region_2" + ), + 'signing': { + 'region': 'abc', + }, + } + handlers.set_operation_specific_signer( + context=context, signing_name=signing_name + ) + self.assertEqual(context['signing']['region'], 'region_1,region_2') + + def test_payload_signing_disabled_sets_proper_key(self): + signing_name = 'myservice' + context = { + 'auth_type': 'v4', + 'signing': { + 'foo': 'bar', + 'region': 'abc', + }, + 'unsigned_payload': True, + } + handlers.set_operation_specific_signer( + context=context, signing_name=signing_name + ) + self.assertEqual(context.get('payload_signing_enabled'), False) + + def test_no_payload_signing_disabled_does_not_set_key(self): + signing_name = 'myservice' + context = { + 'auth_type': 'v4', + 'signing': { + 'foo': 'bar', + 'region': 'abc', + }, + } + handlers.set_operation_specific_signer( + context=context, signing_name=signing_name + ) + self.assertNotIn('payload_signing_enabled', context) + @pytest.mark.parametrize( 'auth_type, expected_response', [('v4', 's3v4'), ('v4a', 's3v4a')] @@ -1213,7 +1266,7 @@ def test_sse_params(self): 'UploadPartCopy', 'SelectObjectContent', ): - event = 'before-parameter-build.s3.%s' % op + event = f'before-parameter-build.s3.{op}' params = { 'SSECustomerKey': b'bar', 'SSECustomerAlgorithm': 'AES256', @@ -1235,7 +1288,7 @@ def test_sse_params_as_str(self): def test_copy_source_sse_params(self): for op in ['CopyObject', 'UploadPartCopy']: - event = 'before-parameter-build.s3.%s' % op + event = f'before-parameter-build.s3.{op}' params = { 'CopySourceSSECustomerKey': b'bar', 'CopySourceSSECustomerAlgorithm': 'AES256', @@ -1619,10 +1672,10 @@ def test_does_validate_host_with_illegal_char(self): ( { 'AWS_LAMBDA_FUNCTION_NAME': 'foo', - '_X_AMZN_TRACE_ID': 'test123-=;:+&[]{}\"\'', + '_X_AMZN_TRACE_ID': 'test123-=;:+&[]{}"\'', }, {}, - {'X-Amzn-Trace-Id': 'test123-=;:+&[]{}\"\''}, + {'X-Amzn-Trace-Id': 'test123-=;:+&[]{}"\''}, ), ], ) diff --git a/tests/unit/test_hooks.py b/tests/unit/test_hooks.py index ea46ae33f5..5b085e5b22 100644 --- a/tests/unit/test_hooks.py +++ b/tests/unit/test_hooks.py @@ -262,7 +262,7 @@ def assert_hook_is_called_given_event(self, event): self.emitter.emit(event) after = len(self.hook_calls) if not after > starting: - self.fail("Handler was not called for event: %s" % event) + self.fail(f"Handler was not called for event: {event}") self.assertEqual(self.hook_calls[-1]['event_name'], event) def assert_hook_is_not_called_given_event(self, event): @@ -272,8 +272,7 @@ def assert_hook_is_not_called_given_event(self, event): if not after == starting: self.fail( "Handler was called for event but was not " - "suppose to be called: %s, last_event: %s" - % (event, self.hook_calls[-1]) + f"suppose to be called: {event}, last_event: {self.hook_calls[-1]}" ) def test_one_level_wildcard_handler(self): @@ -399,7 +398,9 @@ def test_register_with_unique_id(self): self.assertEqual(len(self.hook_calls), 0) def test_remove_handler_with_unique_id(self): - hook2 = lambda **kwargs: self.hook_calls.append(kwargs) + def hook2(**kwargs): + return self.hook_calls.append(kwargs) + self.emitter.register('foo.bar.baz', self.hook, unique_id='foo') self.emitter.register('foo.bar.baz', hook2) self.emitter.emit('foo.bar.baz') diff --git a/tests/unit/test_http_session.py b/tests/unit/test_http_session.py index 9e059ad5a8..8fb081e1ab 100644 --- a/tests/unit/test_http_session.py +++ b/tests/unit/test_http_session.py @@ -505,7 +505,7 @@ def test_close_proxied(self): proxies = {'https': 'http://proxy.com', 'http': 'http://proxy2.com'} session = URLLib3Session(proxies=proxies) for proxy, proxy_url in proxies.items(): - self.request.url = '%s://example.com/' % proxy + self.request.url = f'{proxy}://example.com/' session.send(self.request.prepare()) session.close() diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py index 303ed1a31a..da95a18bea 100644 --- a/tests/unit/test_model.py +++ b/tests/unit/test_model.py @@ -182,6 +182,7 @@ def setUp(self): 'errors': [{'shape': 'NoSuchResourceException'}], 'documentation': 'Docs for OperationName', 'authtype': 'v4', + 'auth': ['aws.auth#sigv4'], }, 'OperationTwo': { 'http': { @@ -396,6 +397,22 @@ def test_error_shapes(self): operation.error_shapes[0].name, 'NoSuchResourceException' ) + def test_has_auth(self): + operation = self.service_model.operation_model('OperationName') + self.assertEqual(operation.auth, ["aws.auth#sigv4"]) + + def test_auth_not_set(self): + operation = self.service_model.operation_model('OperationTwo') + self.assertIsNone(operation.auth) + + def test_has_resolved_auth_type(self): + operation = self.service_model.operation_model('OperationName') + self.assertEqual(operation.resolved_auth_type, 'v4') + + def test_resolved_auth_type_not_set(self): + operation = self.service_model.operation_model('OperationTwo') + self.assertIsNone(operation.resolved_auth_type) + def test_has_auth_type(self): operation = self.service_model.operation_model('OperationName') self.assertEqual(operation.auth_type, 'v4') diff --git a/tests/unit/test_protocols.py b/tests/unit/test_protocols.py index a2ff077faa..253ec5f7a5 100644 --- a/tests/unit/test_protocols.py +++ b/tests/unit/test_protocols.py @@ -50,6 +50,7 @@ BOTOCORE_TEST_ID=5:1 pytest test/unit/test_protocols.py """ + import copy import os from base64 import b64decode @@ -136,7 +137,7 @@ def test_input_compliance(json_description, case, basename): try: protocol_serializer = PROTOCOL_SERIALIZERS[protocol_type] except KeyError: - raise RuntimeError("Unknown protocol: %s" % protocol_type) + raise RuntimeError(f"Unknown protocol: {protocol_type}") serializer = protocol_serializer() serializer.MAP_TYPE = OrderedDict operation_model = OperationModel(case['given'], model) @@ -155,7 +156,7 @@ def _assert_request_body_is_bytes(body): if not isinstance(body, bytes): raise AssertionError( "Expected body to be serialized as type " - "bytes(), instead got: %s" % type(body) + f"bytes(), instead got: {type(body)}" ) @@ -217,10 +218,9 @@ def test_output_compliance(json_description, case, basename): parsed = _fixup_parsed_result(parsed) except Exception as e: msg = ( - "\nFailed to run test : %s\n" - "Protocol : %s\n" - "Description : %s (%s:%s)\n" - % ( + "\nFailed to run test : {}\n" + "Protocol : {}\n" + "Description : {} ({}:{})\n".format( e, model.metadata['protocol'], case['description'], @@ -312,14 +312,13 @@ def _output_failure_message( ): j = _try_json_dump error_message = ( - "\nDescription : %s (%s:%s)\n" - "Protocol: : %s\n" - "Given : %s\n" - "Response : %s\n" - "Expected serialization: %s\n" - "Actual serialization : %s\n" - "Assertion message : %s\n" - % ( + "\nDescription : {} ({}:{})\n" + "Protocol: : {}\n" + "Given : {}\n" + "Response : {}\n" + "Expected serialization: {}\n" + "Actual serialization : {}\n" + "Assertion message : {}\n".format( case['description'], case['suite_id'], case['test_id'], @@ -337,14 +336,13 @@ def _output_failure_message( def _input_failure_message(protocol_type, case, actual_request, error): j = _try_json_dump error_message = ( - "\nDescription : %s (%s:%s)\n" - "Protocol: : %s\n" - "Given : %s\n" - "Params : %s\n" - "Expected serialization: %s\n" - "Actual serialization : %s\n" - "Assertion message : %s\n" - % ( + "\nDescription : {} ({}:{})\n" + "Protocol: : {}\n" + "Given : {}\n" + "Params : {}\n" + "Expected serialization: {}\n" + "Actual serialization : {}\n" + "Assertion message : {}\n".format( case['description'], case['suite_id'], case['test_id'], @@ -373,15 +371,9 @@ def assert_equal(first, second, prefix): assert first == second except Exception: try: - better = "{} (actual != expected)\n{} !=\n{}".format( - prefix, - json.dumps(first, indent=2), - json.dumps(second, indent=2), - ) + better = f"{prefix} (actual != expected)\n{json.dumps(first, indent=2)} !=\n{json.dumps(second, indent=2)}" except (ValueError, TypeError): - better = "{} (actual != expected)\n{} !=\n{}".format( - prefix, first, second - ) + better = f"{prefix} (actual != expected)\n{first} !=\n{second}" raise AssertionError(better) @@ -397,9 +389,9 @@ def _serialize_request_description(request_dict): # test runner we need to handle the case where the url_path # already has query params. if '?' not in request_dict['url_path']: - request_dict['url_path'] += '?%s' % encoded + request_dict['url_path'] += f'?{encoded}' else: - request_dict['url_path'] += '&%s' % encoded + request_dict['url_path'] += f'&{encoded}' def _assert_requests_equal(actual, expected): diff --git a/tests/unit/test_serialize.py b/tests/unit/test_serialize.py index ca57d01af2..639523fd59 100644 --- a/tests/unit/test_serialize.py +++ b/tests/unit/test_serialize.py @@ -11,6 +11,7 @@ may result in a a coverage gap that would otherwise be untested. """ + import base64 import datetime import io @@ -430,7 +431,7 @@ def test_instantiate_without_validation(self): except ParamValidationError as e: self.fail( "Shouldn't fail serializing valid parameter without " - "validation: {}".format(e) + f"validation: {e}" ) try: @@ -438,7 +439,7 @@ def test_instantiate_without_validation(self): except ParamValidationError as e: self.fail( "Shouldn't fail serializing invalid parameter without " - "validation: {}".format(e) + f"validation: {e}" ) def test_instantiate_with_validation(self): @@ -450,7 +451,7 @@ def test_instantiate_with_validation(self): except ParamValidationError as e: self.fail( "Shouldn't fail serializing invalid parameter without " - "validation: {}".format(e) + f"validation: {e}" ) with self.assertRaises(ParamValidationError): diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 597cd15dc6..d3eabbdf08 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -245,7 +245,10 @@ def test_path_not_in_available_profiles(self): def test_emit_delegates_to_emitter(self): calls = [] - handler = lambda **kwargs: calls.append(kwargs) + + def handler(**kwargs): + return calls.append(kwargs) + self.session.register('foo', handler) self.session.emit('foo') self.assertEqual(len(calls), 1) @@ -255,7 +258,10 @@ def test_emitter_can_be_passed_in(self): events = HierarchicalEmitter() session = create_session(event_hooks=events) calls = [] - handler = lambda **kwargs: calls.append(kwargs) + + def handler(**kwargs): + return calls.append(kwargs) + events.register('foo', handler) session.emit('foo') @@ -289,7 +295,10 @@ def test_general_purpose_logger(self, formatter, file_handler, get_logger): def test_register_with_unique_id(self): calls = [] - handler = lambda **kwargs: calls.append(kwargs) + + def handler(**kwargs): + return calls.append(kwargs) + self.session.register('foo', handler, unique_id='bar') self.session.emit('foo') self.assertEqual(calls[0]['event_name'], 'foo') @@ -711,7 +720,7 @@ def test_create_client_uses_api_version_from_config(self, client_creator): f.write('[default]\n') f.write( 'foo_api_versions =\n' - ' myservice = %s\n' % config_api_version + f' myservice = {config_api_version}\n' ) f.flush() @@ -760,7 +769,7 @@ def test_param_api_version_overrides_config_value(self, client_creator): f.write('[default]\n') f.write( 'foo_api_versions =\n' - ' myservice = %s\n' % config_api_version + f' myservice = {config_api_version}\n' ) f.flush() @@ -901,14 +910,20 @@ def test_last_registration_wins(self): def test_can_lazy_register_a_component(self): component = object() - lazy = lambda: component + + def lazy(): + return component + self.components.lazy_register_component('foo', lazy) self.assertIs(self.components.get_component('foo'), component) def test_latest_registration_wins_even_if_lazy(self): first = object() second = object() - lazy_second = lambda: second + + def lazy_second(): + return second + self.components.register_component('foo', first) self.components.lazy_register_component('foo', lazy_second) self.assertIs(self.components.get_component('foo'), second) @@ -916,7 +931,10 @@ def test_latest_registration_wins_even_if_lazy(self): def test_latest_registration_overrides_lazy(self): first = object() second = object() - lazy_first = lambda: first + + def lazy_first(): + return first + self.components.lazy_register_component('foo', lazy_first) self.components.register_component('foo', second) self.assertIs(self.components.get_component('foo'), second) @@ -996,6 +1014,8 @@ def init_hook(session): self.assertEqual(call_args, []) def test_unregister_hook_raises_value_error(self): - not_registered = lambda session: None + def not_registered(session): + return None + with self.assertRaises(ValueError): self.assertRaises(unregister_initializer(not_registered)) diff --git a/tests/unit/test_session_legacy.py b/tests/unit/test_session_legacy.py index 5388ffa2d4..4ace6c8905 100644 --- a/tests/unit/test_session_legacy.py +++ b/tests/unit/test_session_legacy.py @@ -236,7 +236,10 @@ def test_path_not_in_available_profiles(self): def test_emit_delegates_to_emitter(self): calls = [] - handler = lambda **kwargs: calls.append(kwargs) + + def handler(**kwargs): + return calls.append(kwargs) + self.session.register('foo', handler) self.session.emit('foo') self.assertEqual(len(calls), 1) @@ -248,7 +251,10 @@ def test_emitter_can_be_passed_in(self): session_vars=self.env_vars, event_hooks=events ) calls = [] - handler = lambda **kwargs: calls.append(kwargs) + + def handler(**kwargs): + return calls.append(kwargs) + events.register('foo', handler) session.emit('foo') @@ -283,7 +289,10 @@ def test_general_purpose_logger(self, formatter, file_handler, get_logger): def test_register_with_unique_id(self): calls = [] - handler = lambda **kwargs: calls.append(kwargs) + + def handler(**kwargs): + return calls.append(kwargs) + self.session.register('foo', handler, unique_id='bar') self.session.emit('foo') self.assertEqual(calls[0]['event_name'], 'foo') @@ -723,7 +732,7 @@ def test_create_client_uses_api_version_from_config(self, client_creator): f.write('[default]\n') f.write( 'foo_api_versions =\n' - ' myservice = %s\n' % config_api_version + f' myservice = {config_api_version}\n' ) f.flush() @@ -774,7 +783,7 @@ def test_param_api_version_overrides_config_value(self, client_creator): f.write('[default]\n') f.write( 'foo_api_versions =\n' - ' myservice = %s\n' % config_api_version + f' myservice = {config_api_version}\n' ) f.flush() @@ -842,14 +851,20 @@ def test_last_registration_wins(self): def test_can_lazy_register_a_component(self): component = object() - lazy = lambda: component + + def lazy(): + return component + self.components.lazy_register_component('foo', lazy) self.assertIs(self.components.get_component('foo'), component) def test_latest_registration_wins_even_if_lazy(self): first = object() second = object() - lazy_second = lambda: second + + def lazy_second(): + return second + self.components.register_component('foo', first) self.components.lazy_register_component('foo', lazy_second) self.assertIs(self.components.get_component('foo'), second) @@ -857,7 +872,10 @@ def test_latest_registration_wins_even_if_lazy(self): def test_latest_registration_overrides_lazy(self): first = object() second = object() - lazy_first = lambda: first + + def lazy_first(): + return first + self.components.lazy_register_component('foo', lazy_first) self.components.register_component('foo', second) self.assertIs(self.components.get_component('foo'), second) diff --git a/tests/unit/test_signers.py b/tests/unit/test_signers.py index 92a3d5da3f..d538934593 100644 --- a/tests/unit/test_signers.py +++ b/tests/unit/test_signers.py @@ -996,7 +996,7 @@ def test_generate_presign_url_emits_is_presign_in_context(self): self.assertTrue( kwargs.get('context', {}).get('is_presign_request'), 'The context did not have is_presign_request set to True for ' - 'the following kwargs emitted: %s' % kwargs, + f'the following kwargs emitted: {kwargs}', ) def test_context_param_from_event_handler_sent_to_endpoint_resolver(self): diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 32c9f336c3..240dd6b535 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -1308,25 +1308,25 @@ def test_default(self): ) def test_client_name_with_amazon(self): - self.service_description['metadata'][ - 'serviceFullName' - ] = 'Amazon MyService' + self.service_description['metadata']['serviceFullName'] = ( + 'Amazon MyService' + ) self.assertEqual( get_service_module_name(self.service_model), 'MyService' ) def test_client_name_using_abreviation(self): - self.service_description['metadata'][ - 'serviceAbbreviation' - ] = 'Abbreviation' + self.service_description['metadata']['serviceAbbreviation'] = ( + 'Abbreviation' + ) self.assertEqual( get_service_module_name(self.service_model), 'Abbreviation' ) def test_client_name_with_non_alphabet_characters(self): - self.service_description['metadata'][ - 'serviceFullName' - ] = 'Amazon My-Service' + self.service_description['metadata']['serviceFullName'] = ( + 'Amazon My-Service' + ) self.assertEqual( get_service_module_name(self.service_model), 'MyService' ) @@ -2199,9 +2199,9 @@ def get_s3_request( if bucket: url += bucket if key: - url += '/%s' % key + url += f'/{key}' if querystring: - url += '?%s' % querystring + url += f'?{querystring}' return AWSRequest(method='GET', headers={}, url=url) def get_s3_outpost_request(self, **s3_request_kwargs): @@ -2268,65 +2268,60 @@ def test_register(self): def test_outpost_endpoint(self): request = self.get_s3_outpost_request() self.call_set_endpoint(self.endpoint_setter, request=request) - expected_url = 'https://{}-{}.{}.s3-outposts.{}.amazonaws.com/'.format( - self.accesspoint_name, - self.account, - self.outpost_name, - self.region_name, + outpost_prefix = ( + f'{self.accesspoint_name}-{self.account}.{self.outpost_name}' ) + dns_suffix = f'{self.region_name}.amazonaws.com' + expected_url = f'https://{outpost_prefix}.s3-outposts.{dns_suffix}/' self.assertEqual(request.url, expected_url) def test_outpost_endpoint_preserves_key_in_path(self): request = self.get_s3_outpost_request(key=self.key) self.call_set_endpoint(self.endpoint_setter, request=request) + outpost_prefix = ( + f'{self.accesspoint_name}-{self.account}.{self.outpost_name}' + ) + dns_suffix = f'{self.region_name}.amazonaws.com' expected_url = ( - 'https://{}-{}.{}.s3-outposts.{}.amazonaws.com/{}'.format( - self.accesspoint_name, - self.account, - self.outpost_name, - self.region_name, - self.key, - ) + f'https://{outpost_prefix}.s3-outposts.{dns_suffix}/{self.key}' ) self.assertEqual(request.url, expected_url) def test_accesspoint_endpoint(self): request = self.get_s3_accesspoint_request() self.call_set_endpoint(self.endpoint_setter, request=request) - expected_url = 'https://{}-{}.s3-accesspoint.{}.amazonaws.com/'.format( - self.accesspoint_name, self.account, self.region_name + accesspoint_prefix = f'{self.accesspoint_name}-{self.account}' + dns_suffix = f'{self.region_name}.amazonaws.com' + expected_url = ( + f'https://{accesspoint_prefix}.s3-accesspoint.{dns_suffix}/' ) self.assertEqual(request.url, expected_url) def test_accesspoint_preserves_key_in_path(self): request = self.get_s3_accesspoint_request(key=self.key) self.call_set_endpoint(self.endpoint_setter, request=request) - expected_url = ( - 'https://{}-{}.s3-accesspoint.{}.amazonaws.com/{}'.format( - self.accesspoint_name, self.account, self.region_name, self.key - ) - ) + accesspoint_prefix = f'{self.accesspoint_name}-{self.account}' + dns_suffix = f'{self.region_name}.amazonaws.com' + expected_url = f'https://{accesspoint_prefix}.s3-accesspoint.{dns_suffix}/{self.key}' self.assertEqual(request.url, expected_url) def test_accesspoint_preserves_scheme(self): request = self.get_s3_accesspoint_request(scheme='http://') self.call_set_endpoint(self.endpoint_setter, request=request) - expected_url = 'http://{}-{}.s3-accesspoint.{}.amazonaws.com/'.format( - self.accesspoint_name, - self.account, - self.region_name, + accesspoint_prefix = f'{self.accesspoint_name}-{self.account}' + dns_suffix = f'{self.region_name}.amazonaws.com' + expected_url = ( + f'http://{accesspoint_prefix}.s3-accesspoint.{dns_suffix}/' ) self.assertEqual(request.url, expected_url) def test_accesspoint_preserves_query_string(self): request = self.get_s3_accesspoint_request(querystring='acl') self.call_set_endpoint(self.endpoint_setter, request=request) + accesspoint_prefix = f'{self.accesspoint_name}-{self.account}' + dns_suffix = f'{self.region_name}.amazonaws.com' expected_url = ( - 'https://{}-{}.s3-accesspoint.{}.amazonaws.com/?acl'.format( - self.accesspoint_name, - self.account, - self.region_name, - ) + f'https://{accesspoint_prefix}.s3-accesspoint.{dns_suffix}/?acl' ) self.assertEqual(request.url, expected_url) @@ -2336,10 +2331,10 @@ def test_uses_resolved_dns_suffix(self): } request = self.get_s3_accesspoint_request() self.call_set_endpoint(self.endpoint_setter, request=request) - expected_url = 'https://{}-{}.s3-accesspoint.{}.mysuffix.com/'.format( - self.accesspoint_name, - self.account, - self.region_name, + accesspoint_prefix = f'{self.accesspoint_name}-{self.account}' + dns_suffix = f'{self.region_name}.mysuffix.com' + expected_url = ( + f'https://{accesspoint_prefix}.s3-accesspoint.{dns_suffix}/' ) self.assertEqual(request.url, expected_url) @@ -2350,10 +2345,10 @@ def test_uses_region_of_client_if_use_arn_disabled(self): ) request = self.get_s3_accesspoint_request() self.call_set_endpoint(self.endpoint_setter, request=request) - expected_url = 'https://{}-{}.s3-accesspoint.{}.amazonaws.com/'.format( - self.accesspoint_name, - self.account, - client_region, + accesspoint_prefix = f'{self.accesspoint_name}-{self.account}' + dns_suffix = f'{client_region}.amazonaws.com' + expected_url = ( + f'https://{accesspoint_prefix}.s3-accesspoint.{dns_suffix}/' ) self.assertEqual(request.url, expected_url) @@ -2363,9 +2358,8 @@ def test_accesspoint_supports_custom_endpoint(self): ) request = self.get_s3_accesspoint_request() self.call_set_endpoint(endpoint_setter, request=request) - expected_url = 'https://{}-{}.custom.com/'.format( - self.accesspoint_name, - self.account, + expected_url = ( + f'https://{self.accesspoint_name}-{self.account}.custom.com/' ) self.assertEqual(request.url, expected_url) @@ -2395,8 +2389,8 @@ def test_set_endpoint_for_auto(self): ) request = self.get_s3_request(self.bucket, self.key) self.call_set_endpoint(endpoint_setter, request) - expected_url = 'https://{}.s3.us-west-2.amazonaws.com/{}'.format( - self.bucket, self.key + expected_url = ( + f'https://{self.bucket}.s3.us-west-2.amazonaws.com/{self.key}' ) self.assertEqual(request.url, expected_url) @@ -2406,8 +2400,8 @@ def test_set_endpoint_for_virtual(self): ) request = self.get_s3_request(self.bucket, self.key) self.call_set_endpoint(endpoint_setter, request) - expected_url = 'https://{}.s3.us-west-2.amazonaws.com/{}'.format( - self.bucket, self.key + expected_url = ( + f'https://{self.bucket}.s3.us-west-2.amazonaws.com/{self.key}' ) self.assertEqual(request.url, expected_url) @@ -2417,8 +2411,8 @@ def test_set_endpoint_for_path(self): ) request = self.get_s3_request(self.bucket, self.key) self.call_set_endpoint(endpoint_setter, request) - expected_url = 'https://s3.us-west-2.amazonaws.com/{}/{}'.format( - self.bucket, self.key + expected_url = ( + f'https://s3.us-west-2.amazonaws.com/{self.bucket}/{self.key}' ) self.assertEqual(request.url, expected_url) @@ -2428,8 +2422,8 @@ def test_set_endpoint_for_accelerate(self): ) request = self.get_s3_request(self.bucket, self.key) self.call_set_endpoint(endpoint_setter, request) - expected_url = 'https://{}.s3-accelerate.amazonaws.com/{}'.format( - self.bucket, self.key + expected_url = ( + f'https://{self.bucket}.s3-accelerate.amazonaws.com/{self.key}' ) self.assertEqual(request.url, expected_url) diff --git a/tests/unit/test_waiters.py b/tests/unit/test_waiters.py index 527f63b9d3..79dc29be99 100644 --- a/tests/unit/test_waiters.py +++ b/tests/unit/test_waiters.py @@ -189,6 +189,38 @@ def test_single_waiter_supports_error(self): success_acceptor({'Error': {'Code': 'DoesNotExistErorr'}}) ) + def test_single_waiter_supports_no_error(self): + single_waiter = { + 'acceptors': [ + { + 'state': 'success', + 'matcher': 'error', + 'expected': False, + } + ], + } + single_waiter.update(self.boiler_plate_config) + config = SingleWaiterConfig(single_waiter) + success_acceptor = config.acceptors[0].matcher_func + self.assertTrue(success_acceptor({})) + self.assertFalse(success_acceptor({'Error': {'Code': 'ExampleError'}})) + + def test_single_waiter_supports_any_error(self): + single_waiter = { + 'acceptors': [ + { + 'state': 'success', + 'matcher': 'error', + 'expected': True, + } + ], + } + single_waiter.update(self.boiler_plate_config) + config = SingleWaiterConfig(single_waiter) + success_acceptor = config.acceptors[0].matcher_func + self.assertTrue(success_acceptor({'Error': {'Code': 'ExampleError1'}})) + self.assertTrue(success_acceptor({'Error': {'Code': 'ExampleError2'}})) + def test_unknown_matcher(self): unknown_type = 'arbitrary_type' single_waiter = {