diff --git a/Package.swift b/Package.swift index 9e618ef5e6..a469afdfc1 100644 --- a/Package.swift +++ b/Package.swift @@ -311,6 +311,7 @@ let package = Package( .library(name: "SotoPricing", targets: ["SotoPricing"]), .library(name: "SotoPrivateNetworks", targets: ["SotoPrivateNetworks"]), .library(name: "SotoProton", targets: ["SotoProton"]), + .library(name: "SotoQApps", targets: ["SotoQApps"]), .library(name: "SotoQBusiness", targets: ["SotoQBusiness"]), .library(name: "SotoQConnect", targets: ["SotoQConnect"]), .library(name: "SotoQLDB", targets: ["SotoQLDB"]), @@ -414,7 +415,7 @@ let package = Package( .library(name: "SotoXRay", targets: ["SotoXRay"]) ], dependencies: [ - .package(url: "https://github.com/soto-project/soto-core.git", from: "7.0.0") + .package(url: "https://github.com/soto-project/soto-core.git", from: "7.0.0-rc.1") ], targets: [ .target(name: "SotoACM", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/ACM", swiftSettings: swiftSettings), @@ -700,6 +701,7 @@ let package = Package( .target(name: "SotoPricing", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Pricing", swiftSettings: swiftSettings), .target(name: "SotoPrivateNetworks", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/PrivateNetworks", swiftSettings: swiftSettings), .target(name: "SotoProton", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Proton", swiftSettings: swiftSettings), + .target(name: "SotoQApps", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/QApps", swiftSettings: swiftSettings), .target(name: "SotoQBusiness", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/QBusiness", swiftSettings: swiftSettings), .target(name: "SotoQConnect", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/QConnect", swiftSettings: swiftSettings), .target(name: "SotoQLDB", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/QLDB", swiftSettings: swiftSettings), diff --git a/Sources/Soto/Services/ACM/ACM_shapes.swift b/Sources/Soto/Services/ACM/ACM_shapes.swift index 8bb984bc27..5062917b50 100644 --- a/Sources/Soto/Services/ACM/ACM_shapes.swift +++ b/Sources/Soto/Services/ACM/ACM_shapes.swift @@ -95,13 +95,13 @@ extension ACM { } public enum KeyAlgorithm: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case ecPrime256V1 = "EC_prime256v1" - case ecSecp384R1 = "EC_secp384r1" - case ecSecp521R1 = "EC_secp521r1" - case rsa1024 = "RSA_1024" - case rsa2048 = "RSA_2048" - case rsa3072 = "RSA_3072" - case rsa4096 = "RSA_4096" + case ecPrime256V1 = "EC-prime256v1" + case ecSecp384R1 = "EC-secp384r1" + case ecSecp521R1 = "EC-secp521r1" + case rsa1024 = "RSA-1024" + case rsa2048 = "RSA-2048" + case rsa3072 = "RSA-3072" + case rsa4096 = "RSA-4096" public var description: String { return self.rawValue } } diff --git a/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift b/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift index 98c22d341f..c3b9c4f844 100644 --- a/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift +++ b/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift @@ -70,7 +70,7 @@ public struct ACMPCA: AWSService { serviceName: "ACMPCA", serviceIdentifier: "acm-pca", serviceProtocol: .json(version: "1.1"), - apiVersion: "2017-08-22", + apiVersion: "", endpoint: endpoint, variantEndpoints: Self.variantEndpoints, errorType: ACMPCAErrorType.self, @@ -372,8 +372,8 @@ public struct ACMPCA: AWSService { /// certificate, if any, that your root CA signed must be next to last. The /// subordinate certificate signed by the preceding subordinate CA must come next, /// and so on until your chain is built. The chain must be PEM-encoded. The maximum allowed size of a certificate is 32 KB. The maximum allowed size of a certificate chain is 2 MB. Enforcement of Critical Constraints Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA - /// certificate or chain. Authority key identifier Basic constraints (must be marked critical) Certificate policies Extended key usage Inhibit anyPolicy Issuer alternative name Key usage Name constraints Policy mappings Subject alternative name Subject directory attributes Subject key identifier Subject information access Amazon Web Services Private CA rejects the following extensions when they are marked critical in an - /// imported CA certificate or chain. Authority information access CRL distribution points Freshest CRL Policy constraints Amazon Web Services Private Certificate Authority will also reject any other extension marked as critical not contained on the preceding list of allowed extensions. + /// certificate or chain. Basic constraints (must be marked critical) Subject alternative names Key usage Extended key usage Authority key identifier Subject key identifier Issuer alternative name Subject directory attributes Subject information access Certificate policies Policy mappings Inhibit anyPolicy Amazon Web Services Private CA rejects the following extensions when they are marked critical in an + /// imported CA certificate or chain. Name constraints Policy constraints CRL distribution points Authority information access Freshest CRL Any other extension @Sendable public func importCertificateAuthorityCertificate(_ input: ImportCertificateAuthorityCertificateRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -715,6 +715,7 @@ extension ACMPCA { .init(state: .failure, matcher: AWSErrorCodeMatcher("AccessDeniedException")), ], minDelayTime: .seconds(3), + maxDelayTime: .seconds(180), command: self.describeCertificateAuthorityAuditReport ) return try await self.client.waitUntil(input, waiter: waiter, maxWaitTime: maxWaitTime, logger: logger) @@ -732,6 +733,7 @@ extension ACMPCA { .init(state: .failure, matcher: AWSErrorCodeMatcher("AccessDeniedException")), ], minDelayTime: .seconds(3), + maxDelayTime: .seconds(180), command: self.getCertificateAuthorityCsr ) return try await self.client.waitUntil(input, waiter: waiter, maxWaitTime: maxWaitTime, logger: logger) @@ -749,6 +751,7 @@ extension ACMPCA { .init(state: .failure, matcher: AWSErrorCodeMatcher("AccessDeniedException")), ], minDelayTime: .seconds(1), + maxDelayTime: .seconds(60), command: self.getCertificate ) return try await self.client.waitUntil(input, waiter: waiter, maxWaitTime: maxWaitTime, logger: logger) diff --git a/Sources/Soto/Services/ARCZonalShift/ARCZonalShift_api.swift b/Sources/Soto/Services/ARCZonalShift/ARCZonalShift_api.swift index 87dda657e2..a9d935e72e 100644 --- a/Sources/Soto/Services/ARCZonalShift/ARCZonalShift_api.swift +++ b/Sources/Soto/Services/ARCZonalShift/ARCZonalShift_api.swift @@ -19,24 +19,29 @@ /// Service object for interacting with AWS ARCZonalShift service. /// -/// Welcome to the Zonal Shift API Reference Guide for Amazon Route 53 Application Recovery Controller (Route 53 ARC). You can start a zonal shift to move traffic for a load balancer resource away from an Availability Zone to +/// Welcome to the API Reference Guide for zonal shift and zonal autoshift in Amazon Route 53 Application Recovery Controller (Route 53 ARC). You can start a zonal shift to move traffic for a load balancer resource away from an Availability Zone to /// help your application recover quickly from an impairment in an Availability Zone. For example, /// you can recover your application from a developer's bad code deployment or from an -/// Amazon Web Services infrastructure failure in a single Availability Zone. You can also configure zonal autoshift for a load balancer resource. Zonal autoshift -/// is a capability in Route 53 ARC where Amazon Web Services shifts away application resource -/// traffic from an Availability Zone, on your behalf, to help reduce your time to recovery during events. -/// Amazon Web Services shifts away traffic for resources that are enabled for zonal autoshift whenever Amazon Web Services -/// determines that there's an issue in the Availability Zone that could potentially affect -/// customers. To ensure that zonal autoshift is safe for your application, you must +/// Amazon Web Services infrastructure failure in a single Availability Zone. You can also configure zonal autoshift for supported load balancer resources. Zonal autoshift +/// is a capability in Route 53 ARC where you authorize Amazon Web Services to shift away application resource +/// traffic from an Availability Zone during events, on your behalf, to help reduce your time to recovery. +/// Amazon Web Services starts an autoshift when internal telemetry indicates that there is an Availability +/// Zone impairment that could potentially impact customers. To help make sure that zonal autoshift is safe for your application, you must /// also configure practice runs when you enable zonal autoshift for a resource. Practice runs start -/// weekly zonal shifts for a resource, to shift -/// traffic for the resource out of an Availability Zone. Practice runs make sure, on a regular basis, -/// that you have enough capacity in all the Availability Zones in an Amazon Web Services Region -/// for your application to continue to operate normally -/// when traffic for a resource is shifted away from one Availability Zone. You must prescale resource capacity in all Availability Zones in the Region -/// where your application is deployed, before you configure practice runs or enable zonal autoshift -/// for a resource. You should not rely on scaling on demand when an autoshift or practice run -/// starts. For more information about using zonal shift and zonal autoshift, see the +/// weekly zonal shifts for a resource, to shift traffic for the resource away from an Availability Zone. +/// Practice runs help you to make sure, on a regular basis, that you have enough capacity in all the +/// Availability Zones in an Amazon Web Services Region for your application to continue to operate normally +/// when traffic for a resource is shifted away from one Availability Zone. Before you configure practice runs or enable zonal autoshift, we strongly recommend +/// that you prescale your application resource capacity in all Availability Zones in the Region where +/// your application resources are deployed. You should not rely on scaling on demand when an +/// autoshift or practice run starts. Zonal autoshift, including practice runs, works independently, +/// and does not wait for auto scaling actions to complete. Relying on auto scaling, instead of +/// pre-scaling, can result in loss of availability. If you use auto scaling to handle regular cycles of traffic, we strongly recommend that you configure +/// the minimum capacity of your auto scaling to continue operating normally with the loss of an +/// Availability Zone. Be aware that Route 53 ARC does not inspect the health of individual resources. Amazon Web Services only starts an +/// autoshift when Amazon Web Services telemetry detects that there is an Availability Zone impairment that could +/// potentially impact customers. In some cases, resources might be shifted away that are not experiencing +/// impact. For more information about using zonal shift and zonal autoshift, see the /// Amazon Route 53 Application Recovery Controller /// Developer Guide. public struct ARCZonalShift: AWSService { @@ -109,7 +114,9 @@ public struct ARCZonalShift: AWSService { /// A practice run configuration includes specifications for blocked dates and blocked time windows, /// and for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an /// outcome alarm, to monitor application health during practice runs and, - /// optionally, a blocking alarm, to block practice runs from starting. For more information, see + /// optionally, a blocking alarm, to block practice runs from starting. When a resource has a practice run configuration, Route 53 ARC starts zonal shifts for the resource + /// weekly, to shift traffic for practice runs. Practice runs help you to ensure that + /// shifting away traffic from an Availability Zone during an autoshift is safe for your application. For more information, see /// /// Considerations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide. @Sendable @@ -139,6 +146,27 @@ public struct ARCZonalShift: AWSService { ) } + /// Returns the status of autoshift observer notification. Autoshift observer + /// notification enables you to be notified, through Amazon EventBridge, when + /// there is an autoshift event for zonal autoshift. If the status is ENABLED, + /// Route 53 ARC includes all autoshift events when you use the EventBridge pattern + /// Autoshift In Progress. When the status is DISABLED, + /// Route 53 ARC includes only autoshift events for autoshifts when one or more of your + /// resources is included in the autoshift. For more information, see + /// + /// Notifications for practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller Developer Guide. + @Sendable + public func getAutoshiftObserverNotificationStatus(_ input: GetAutoshiftObserverNotificationStatusRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAutoshiftObserverNotificationStatusResponse { + return try await self.client.execute( + operation: "GetAutoshiftObserverNotificationStatus", + path: "/autoshift-observer-notification", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Get information about a resource that's been registered for zonal shifts with Amazon Route 53 Application Recovery Controller in this Amazon Web Services Region. Resources that are registered for zonal shifts are managed resources in Route 53 ARC. You can start zonal shifts and configure zonal autoshift for managed resources. At this time, you can only start a zonal shift or configure zonal autoshift for Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off. @Sendable public func getManagedResource(_ input: GetManagedResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetManagedResourceResponse { @@ -152,7 +180,10 @@ public struct ARCZonalShift: AWSService { ) } - /// Returns the active autoshifts for a specified resource. + /// Returns a list of autoshifts for an Amazon Web Services Region. By default, the call returns + /// only ACTIVE autoshifts. Optionally, you can specify the status parameter to return + /// COMPLETED autoshifts. + /// @Sendable public func listAutoshifts(_ input: ListAutoshiftsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAutoshiftsResponse { return try await self.client.execute( @@ -178,7 +209,7 @@ public struct ARCZonalShift: AWSService { ) } - /// Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region. ListZonalShifts returns customer-started zonal shifts, as well as practice run zonal shifts that Route 53 ARC started on your behalf for zonal autoshift. The ListZonalShifts operation does not list autoshifts. For more information about listing autoshifts, see ">ListAutoshifts. + /// Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region. ListZonalShifts returns customer-initiated zonal shifts, as well as practice run zonal shifts that Route 53 ARC started on your behalf for zonal autoshift. The ListZonalShifts operation does not list autoshifts. For more information about listing autoshifts, see ">ListAutoshifts. @Sendable public func listZonalShifts(_ input: ListZonalShiftsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListZonalShiftsResponse { return try await self.client.execute( @@ -204,6 +235,27 @@ public struct ARCZonalShift: AWSService { ) } + /// Update the status of autoshift observer notification. Autoshift observer + /// notification enables you to be notified, through Amazon EventBridge, when + /// there is an autoshift event for zonal autoshift. If the status is ENABLED, + /// Route 53 ARC includes all autoshift events when you use the EventBridge pattern + /// Autoshift In Progress. When the status is DISABLED, + /// Route 53 ARC includes only autoshift events for autoshifts when one or more of your + /// resources is included in the autoshift. For more information, see + /// + /// Notifications for practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller Developer Guide. + @Sendable + public func updateAutoshiftObserverNotificationStatus(_ input: UpdateAutoshiftObserverNotificationStatusRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateAutoshiftObserverNotificationStatusResponse { + return try await self.client.execute( + operation: "UpdateAutoshiftObserverNotificationStatus", + path: "/autoshift-observer-notification", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Update a practice run configuration to change one or more of the following: add, /// change, or remove the blocking alarm; change the outcome alarm; or add, change, /// or remove blocking dates or time windows. @@ -219,10 +271,13 @@ public struct ARCZonalShift: AWSService { ) } - /// You can update the zonal autoshift status for a resource, to enable or disable zonal - /// autoshift. When zonal autoshift is ENABLED, Amazon Web Services shifts away - /// resource traffic from an Availability Zone, on your behalf, when Amazon Web Services - /// determines that there's an issue in the Availability Zone that could potentially affect customers. + /// The zonal autoshift configuration for a resource includes the practice run configuration and the status for + /// running autoshifts, zonal autoshift status. When a resource has a practice run configuation, Route 53 ARC + /// starts weekly zonal shifts for the resource, to shift traffic away from an Availability Zone. Weekly practice + /// runs help you to make sure that your application can continue to operate normally with the loss of one Availability Zone. You can update the zonal autoshift autoshift status to enable or disable zonal autoshift. When zonal + /// autoshift is ENABLED, you authorize Amazon Web Services to shift away resource traffic for + /// an application from an Availability Zone during events, on your behalf, to help reduce time to recovery. + /// Traffic is also shifted away for the required weekly practice runs. @Sendable public func updateZonalAutoshiftConfiguration(_ input: UpdateZonalAutoshiftConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateZonalAutoshiftConfigurationResponse { return try await self.client.execute( @@ -262,7 +317,10 @@ extension ARCZonalShift { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension ARCZonalShift { - /// Returns the active autoshifts for a specified resource. + /// Returns a list of autoshifts for an Amazon Web Services Region. By default, the call returns + /// only ACTIVE autoshifts. Optionally, you can specify the status parameter to return + /// COMPLETED autoshifts. + /// /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -300,7 +358,7 @@ extension ARCZonalShift { ) } - /// Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region. ListZonalShifts returns customer-started zonal shifts, as well as practice run zonal shifts that Route 53 ARC started on your behalf for zonal autoshift. The ListZonalShifts operation does not list autoshifts. For more information about listing autoshifts, see ">ListAutoshifts. + /// Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region. ListZonalShifts returns customer-initiated zonal shifts, as well as practice run zonal shifts that Route 53 ARC started on your behalf for zonal autoshift. The ListZonalShifts operation does not list autoshifts. For more information about listing autoshifts, see ">ListAutoshifts. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/ARCZonalShift/ARCZonalShift_shapes.swift b/Sources/Soto/Services/ARCZonalShift/ARCZonalShift_shapes.swift index 67776eeff2..0a8fcf5e7a 100644 --- a/Sources/Soto/Services/ARCZonalShift/ARCZonalShift_shapes.swift +++ b/Sources/Soto/Services/ARCZonalShift/ARCZonalShift_shapes.swift @@ -44,6 +44,12 @@ extension ARCZonalShift { public var description: String { return self.rawValue } } + public enum AutoshiftObserverNotificationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum ControlConditionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case cloudwatch = "CLOUDWATCH" public var description: String { return self.rawValue } @@ -74,17 +80,17 @@ extension ARCZonalShift { public struct AutoshiftInResource: AWSDecodableShape { /// The appliedStatus field specifies which application traffic shift is in effect for a - /// resource when there is more than one traffic shift active. There can be more than one application traffic - /// shift in progress at the same time - that is, practice run zonal shifts, customer-started zonal shifts, - /// or an autoshift. The appliedStatus field for an autoshift for a resource can have one of two - /// values: APPLIED or NOT_APPLIED. The zonal shift or autoshift - /// that is currently in effect for the resource has an applied status set to APPLIED. The overall principle for precedence is that zonal shifts that you start as a customer take precedence - /// autoshifts, which take precedence over practice runs. That is, customer-started zonal shifts > autoshifts > practice run + /// resource when there is more than one active traffic shift. There can be more than one application traffic + /// shift in progress at the same time - that is, practice run zonal shifts, customer-initiated zonal shifts, + /// or an autoshift. The appliedStatus field for a shift that is in progress for a resource can + /// have one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift + /// that is currently in effect for the resource has an appliedStatus set to APPLIED. The overall principle for precedence is that zonal shifts that you start as a customer take precedence + /// autoshifts, which take precedence over practice runs. That is, customer-initiated zonal shifts > autoshifts > practice run /// zonal shifts. For more information, see /// How zonal autoshift /// and practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide. public let appliedStatus: AutoshiftAppliedStatus - /// The Availability Zone that traffic is shifted away from for a resource, when Amazon Web Services starts an autoshift. + /// The Availability Zone (for example, use1-az1) that traffic is shifted away from for a resource, when Amazon Web Services starts an autoshift. /// Until the autoshift ends, traffic for the resource is instead directed to other Availability Zones in the Amazon Web Services Region. /// An autoshift can end for a resource, for example, when Amazon Web Services ends the autoshift for the Availability Zone or when /// you disable zonal autoshift for the resource. @@ -106,7 +112,7 @@ extension ARCZonalShift { } public struct AutoshiftSummary: AWSDecodableShape { - /// The Availability Zone that traffic is shifted away from for a resource when Amazon Web Services starts an autoshift. + /// The Availability Zone (for example, use1-az1) that traffic is shifted away from for a resource when Amazon Web Services starts an autoshift. /// Until the autoshift ends, traffic for the resource is instead directed to other Availability Zones in the Amazon Web Services Region. /// An autoshift can end for a resource, for example, when Amazon Web Services ends the autoshift for the Availability Zone or when /// you disable zonal autoshift for the resource. @@ -157,9 +163,10 @@ extension ARCZonalShift { } public struct ControlCondition: AWSEncodableShape & AWSDecodableShape { - /// The Amazon Resource Name (ARN) for the Amazon CloudWatch alarm that you specify as a control condition for a practice run. + /// The Amazon Resource Name (ARN) for an Amazon CloudWatch alarm that you specify as a control condition for a practice run. public let alarmIdentifier: String - /// The type of alarm specified for a practice run. The only valid value is CLOUDWATCH. + /// The type of alarm specified for a practice run. You can only specify Amazon CloudWatch alarms for practice runs, so the + /// only valid value is CLOUDWATCH. public let type: ControlConditionType public init(alarmIdentifier: String, type: ControlConditionType) { @@ -209,8 +216,8 @@ extension ARCZonalShift { /// if your application is impacted by the zonal shift, and you want to stop the /// zonal shift, to let traffic for the resource return to the Availability Zone. public let outcomeAlarms: [ControlCondition] - /// The identifier of the resource to shift away traffic for when a practice - /// run starts a zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off. + /// The identifier of the resource that Amazon Web Services shifts traffic for with a practice + /// run zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off. public let resourceIdentifier: String public init(blockedDates: [String]? = nil, blockedWindows: [String]? = nil, blockingAlarms: [ControlCondition]? = nil, outcomeAlarms: [ControlCondition], resourceIdentifier: String) { @@ -267,11 +274,11 @@ extension ARCZonalShift { /// outcome alarm that you specify for practice runs, and, optionally, a /// blocking alarm and blocking dates and windows. public let practiceRunConfiguration: PracticeRunConfiguration - /// The status for zonal autoshift for a resource. When you specify the - /// autoshift status as ENABLED, Amazon Web Services shifts traffic + /// The status for zonal autoshift for a resource. When you specify ENABLED + /// for the autoshift status, Amazon Web Services shifts traffic /// away from shifts away application resource traffic from an Availability Zone, - /// on your behalf, when Amazon Web Services determines that there's an issue in - /// the Availability Zone that could potentially affect customers. When you enable zonal autoshift, you must also configure practice runs for + /// on your behalf, when internal telemetry indicates that there is an Availability + /// Zone impairment that could potentially impact customers. When you enable zonal autoshift, you must also configure practice runs for /// the resource. public let zonalAutoshiftStatus: ZonalAutoshiftStatus @@ -336,8 +343,29 @@ extension ARCZonalShift { } } + public struct GetAutoshiftObserverNotificationStatusRequest: AWSEncodableShape { + public init() {} + } + + public struct GetAutoshiftObserverNotificationStatusResponse: AWSDecodableShape { + /// The status of autoshift observer notification. If the status is ENABLED, + /// Route 53 ARC includes all autoshift events when you use the Amazon EventBridge pattern + /// Autoshift In Progress. When the status is DISABLED, + /// Route 53 ARC includes only autoshift events for autoshifts when one or more of your + /// resources is included in the autoshift. + public let status: AutoshiftObserverNotificationStatus + + public init(status: AutoshiftObserverNotificationStatus) { + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + } + } + public struct GetManagedResourceRequest: AWSEncodableShape { - /// The identifier for the resource to shift away traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off. + /// The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off. public let resourceIdentifier: String public init(resourceIdentifier: String) { @@ -625,7 +653,7 @@ extension ARCZonalShift { } public struct StartZonalShiftRequest: AWSEncodableShape { - /// The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region. + /// The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region. public let awayFrom: String /// A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. A new comment overwrites any existing comment string. public let comment: String @@ -634,7 +662,7 @@ extension ARCZonalShift { /// zonal shift and set a new expiration. You can also cancel a zonal shift, before it expires, for example, if you're ready to /// restore traffic to the Availability Zone. To set a length of time for a zonal shift to be active, specify a whole number, and then one of the following, with no space: A lowercase letter m: To specify that the value is in minutes. A lowercase letter h: To specify that the value is in hours. For example: 20h means the zonal shift expires in 20 hours. 120m means the zonal shift expires in 120 minutes (2 hours). public let expiresIn: String - /// The identifier for the resource to shift away traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off. + /// The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off. public let resourceIdentifier: String public init(awayFrom: String, comment: String, expiresIn: String, resourceIdentifier: String) { @@ -662,6 +690,36 @@ extension ARCZonalShift { } } + public struct UpdateAutoshiftObserverNotificationStatusRequest: AWSEncodableShape { + /// The status to set for autoshift observer notification. If the status is ENABLED, + /// Route 53 ARC includes all autoshift events when you use the Amazon EventBridge pattern + /// Autoshift In Progress. When the status is DISABLED, + /// Route 53 ARC includes only autoshift events for autoshifts when one or more of your + /// resources is included in the autoshift. + public let status: AutoshiftObserverNotificationStatus + + public init(status: AutoshiftObserverNotificationStatus) { + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + } + } + + public struct UpdateAutoshiftObserverNotificationStatusResponse: AWSDecodableShape { + /// The status for autoshift observer notification. + public let status: AutoshiftObserverNotificationStatus + + public init(status: AutoshiftObserverNotificationStatus) { + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + } + } + public struct UpdatePracticeRunConfigurationRequest: AWSEncodableShape { /// Add, change, or remove blocked dates for a practice run in zonal autoshift. Optionally, you can block practice runs for specific calendar dates. /// The format for blocked dates is: YYYY-MM-DD. Keep in mind, when you specify dates, @@ -774,7 +832,9 @@ extension ARCZonalShift { /// configuration for. The identifier is the Amazon Resource Name (ARN) for the resource. public let resourceIdentifier: String /// The zonal autoshift status for the resource that you want to update the zonal - /// autoshift configuration for. + /// autoshift configuration for. Choose ENABLED to authorize Amazon Web Services + /// to shift away resource traffic for an application from an Availability Zone during events, + /// on your behalf, to help reduce time to recovery. public let zonalAutoshiftStatus: ZonalAutoshiftStatus public init(resourceIdentifier: String, zonalAutoshiftStatus: ZonalAutoshiftStatus) { @@ -803,8 +863,7 @@ extension ARCZonalShift { /// The identifier for the resource that you updated the zonal autoshift /// configuration for. The identifier is the Amazon Resource Name (ARN) for the resource. public let resourceIdentifier: String - /// The zonal autoshift status for the resource that you updated the zonal - /// autoshift configuration for. + /// The updated zonal autoshift status for the resource. public let zonalAutoshiftStatus: ZonalAutoshiftStatus public init(resourceIdentifier: String, zonalAutoshiftStatus: ZonalAutoshiftStatus) { @@ -857,18 +916,18 @@ extension ARCZonalShift { } public struct ZonalShift: AWSDecodableShape { - /// The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. + /// The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. /// Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region. public let awayFrom: String /// A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. A new comment overwrites any existing comment string. public let comment: String - /// The expiry time (expiration time) for a customer-started zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. + /// The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. /// You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift /// to set a new expiration at any time. When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts /// to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or /// just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in. public let expiryTime: Date - /// The identifier for the resource to shift away traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off. + /// The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off. public let resourceIdentifier: String /// The time (UTC) when the zonal shift starts. public let startTime: Date @@ -899,13 +958,13 @@ extension ARCZonalShift { } public struct ZonalShiftInResource: AWSDecodableShape { - /// The appliedStatus field specifies which application traffic shift is in effect for a resource when there is more than one traffic shift active. There can be more than one application traffic shift in progress at the same time - that is, practice run zonal shifts, customer-started zonal shifts, or an autoshift. The appliedStatus field for an autoshift for a resource can have one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift that is currently in effect for the resource has an applied status set to APPLIED. The overall principle for precedence is that zonal shifts that you start as a customer take precedence autoshifts, which take precedence over practice runs. That is, customer-started zonal shifts > autoshifts > practice run zonal shifts. For more information, see How zonal autoshift and practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide. + /// The appliedStatus field specifies which application traffic shift is in effect for a resource when there is more than one active traffic shift. There can be more than one application traffic shift in progress at the same time - that is, practice run zonal shifts, customer-initiated zonal shifts, or an autoshift. The appliedStatus field for a shift that is in progress for a resource can have one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift that is currently in effect for the resource has an appliedStatus set to APPLIED. The overall principle for precedence is that zonal shifts that you start as a customer take precedence autoshifts, which take precedence over practice runs. That is, customer-initiated zonal shifts > autoshifts > practice run zonal shifts. For more information, see How zonal autoshift and practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide. public let appliedStatus: AppliedStatus - /// The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region. + /// The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region. public let awayFrom: String - /// A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. That is, a new comment overwrites any existing comment string. + /// A comment that you enter for a customer-initiated zonal shift. Only the latest comment is retained; no comment history is maintained. That is, a new comment overwrites any existing comment string. public let comment: String - /// The expiry time (expiration time) for a customer-started zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time. When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in. + /// The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time. When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in. public let expiryTime: Date /// The outcome, or end state, returned for a practice run. The following values can be returned: PENDING: Outcome value when a practice run is in progress. SUCCEEDED: Outcome value when the outcome alarm specified for /// the practice run configuration does not go into an ALARM state during the practice run, and the practice run @@ -947,11 +1006,11 @@ extension ARCZonalShift { } public struct ZonalShiftSummary: AWSDecodableShape { - /// The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region. + /// The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region. public let awayFrom: String /// A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. That is, a new comment overwrites any existing comment string. public let comment: String - /// The expiry time (expiration time) for a customer-started zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time. When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in. + /// The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time. When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in. public let expiryTime: Date /// The outcome, or end state, of a practice run. The following values can be returned: PENDING: Outcome value when the practice run is in progress. SUCCEEDED: Outcome value when the outcome alarm specified for /// the practice run configuration does not go into an ALARM state during the practice run, and the practice run diff --git a/Sources/Soto/Services/Batch/Batch_shapes.swift b/Sources/Soto/Services/Batch/Batch_shapes.swift index 7d421ce12b..f89edd5b5d 100644 --- a/Sources/Soto/Services/Batch/Batch_shapes.swift +++ b/Sources/Soto/Services/Batch/Batch_shapes.swift @@ -1642,6 +1642,8 @@ extension Batch { public struct EksAttemptDetail: AWSDecodableShape { /// The details for the final status of the containers for this job attempt. public let containers: [EksAttemptContainerDetail]? + /// The Amazon Resource Name (ARN) of the Amazon EKS cluster. + public let eksClusterArn: String? /// The details for the init containers. public let initContainers: [EksAttemptContainerDetail]? /// The name of the node for this job attempt. @@ -1655,8 +1657,9 @@ extension Batch { /// The Unix timestamp (in milliseconds) for when the attempt was stopped. This happens when the attempt transitioned from the RUNNING state to a terminal state, such as SUCCEEDED or FAILED. public let stoppedAt: Int64? - public init(containers: [EksAttemptContainerDetail]? = nil, initContainers: [EksAttemptContainerDetail]? = nil, nodeName: String? = nil, podName: String? = nil, startedAt: Int64? = nil, statusReason: String? = nil, stoppedAt: Int64? = nil) { + public init(containers: [EksAttemptContainerDetail]? = nil, eksClusterArn: String? = nil, initContainers: [EksAttemptContainerDetail]? = nil, nodeName: String? = nil, podName: String? = nil, startedAt: Int64? = nil, statusReason: String? = nil, stoppedAt: Int64? = nil) { self.containers = containers + self.eksClusterArn = eksClusterArn self.initContainers = initContainers self.nodeName = nodeName self.podName = podName @@ -1667,6 +1670,7 @@ extension Batch { private enum CodingKeys: String, CodingKey { case containers = "containers" + case eksClusterArn = "eksClusterArn" case initContainers = "initContainers" case nodeName = "nodeName" case podName = "podName" @@ -3043,6 +3047,12 @@ extension Batch { self.numNodes = numNodes } + public func validate(name: String) throws { + try self.nodePropertyOverrides?.forEach { + try $0.validate(name: "\(name).nodePropertyOverrides[]") + } + } + private enum CodingKeys: String, CodingKey { case nodePropertyOverrides = "nodePropertyOverrides" case numNodes = "numNodes" @@ -3063,6 +3073,12 @@ extension Batch { self.numNodes = numNodes } + public func validate(name: String) throws { + try self.nodeRangeProperties?.forEach { + try $0.validate(name: "\(name).nodeRangeProperties[]") + } + } + private enum CodingKeys: String, CodingKey { case mainNode = "mainNode" case nodeRangeProperties = "nodeRangeProperties" @@ -3096,21 +3112,29 @@ extension Batch { public let containerOverrides: ContainerOverrides? /// An object that contains the properties that you want to replace for the existing Amazon ECS resources of a job. public let ecsPropertiesOverride: EcsPropertiesOverride? + /// An object that contains the properties that you want to replace for the existing Amazon EKS resources of a job. + public let eksPropertiesOverride: EksPropertiesOverride? /// An object that contains the instance types that you want to replace for the existing resources of a job. public let instanceTypes: [String]? /// The range of nodes, using node index values, that's used to override. A range of 0:3 indicates nodes with index values of 0 through 3. If the starting range value is omitted (:n), then 0 is used to start the range. If the ending range value is omitted (n:), then the highest possible node index is used to end the range. public let targetNodes: String? - public init(containerOverrides: ContainerOverrides? = nil, ecsPropertiesOverride: EcsPropertiesOverride? = nil, instanceTypes: [String]? = nil, targetNodes: String? = nil) { + public init(containerOverrides: ContainerOverrides? = nil, ecsPropertiesOverride: EcsPropertiesOverride? = nil, eksPropertiesOverride: EksPropertiesOverride? = nil, instanceTypes: [String]? = nil, targetNodes: String? = nil) { self.containerOverrides = containerOverrides self.ecsPropertiesOverride = ecsPropertiesOverride + self.eksPropertiesOverride = eksPropertiesOverride self.instanceTypes = instanceTypes self.targetNodes = targetNodes } + public func validate(name: String) throws { + try self.eksPropertiesOverride?.validate(name: "\(name).eksPropertiesOverride") + } + private enum CodingKeys: String, CodingKey { case containerOverrides = "containerOverrides" case ecsPropertiesOverride = "ecsPropertiesOverride" + case eksPropertiesOverride = "eksPropertiesOverride" case instanceTypes = "instanceTypes" case targetNodes = "targetNodes" } @@ -3121,21 +3145,29 @@ extension Batch { public let container: ContainerProperties? /// This is an object that represents the properties of the node range for a multi-node parallel job. public let ecsProperties: EcsProperties? + /// This is an object that represents the properties of the node range for a multi-node parallel job. + public let eksProperties: EksProperties? /// The instance types of the underlying host infrastructure of a multi-node parallel job. This parameter isn't applicable to jobs that are running on Fargate resources. In addition, this list object is currently limited to one element. public let instanceTypes: [String]? /// The range of nodes, using node index values. A range of 0:3 indicates nodes with index values of 0 through 3. If the starting range value is omitted (:n), then 0 is used to start the range. If the ending range value is omitted (n:), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes (0:n). You can nest node ranges (for example, 0:10 and 4:5). In this case, the 4:5 range properties override the 0:10 properties. public let targetNodes: String? - public init(container: ContainerProperties? = nil, ecsProperties: EcsProperties? = nil, instanceTypes: [String]? = nil, targetNodes: String? = nil) { + public init(container: ContainerProperties? = nil, ecsProperties: EcsProperties? = nil, eksProperties: EksProperties? = nil, instanceTypes: [String]? = nil, targetNodes: String? = nil) { self.container = container self.ecsProperties = ecsProperties + self.eksProperties = eksProperties self.instanceTypes = instanceTypes self.targetNodes = targetNodes } + public func validate(name: String) throws { + try self.eksProperties?.validate(name: "\(name).eksProperties") + } + private enum CodingKeys: String, CodingKey { case container = "container" case ecsProperties = "ecsProperties" + case eksProperties = "eksProperties" case instanceTypes = "instanceTypes" case targetNodes = "targetNodes" } @@ -3187,6 +3219,7 @@ extension Batch { public func validate(name: String) throws { try self.eksProperties?.validate(name: "\(name).eksProperties") + try self.nodeProperties?.validate(name: "\(name).nodeProperties") try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -3427,6 +3460,7 @@ extension Batch { public func validate(name: String) throws { try self.eksPropertiesOverride?.validate(name: "\(name).eksPropertiesOverride") + try self.nodeOverrides?.validate(name: "\(name).nodeOverrides") try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) diff --git a/Sources/Soto/Services/Bedrock/Bedrock_api.swift b/Sources/Soto/Services/Bedrock/Bedrock_api.swift index 71413f7140..9a3dab3b3d 100644 --- a/Sources/Soto/Services/Bedrock/Bedrock_api.swift +++ b/Sources/Soto/Services/Bedrock/Bedrock_api.swift @@ -124,7 +124,7 @@ public struct Bedrock: AWSService { ) } - /// Creates a guardrail to block topics and to filter out harmful content. Specify a name and optional description. Specify messages for when the guardrail successfully blocks a prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields. Specify topics for the guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the topicsConfig list pertains to one topic. Give a name and description so that the guardrail can properly identify the topic. Specify DENY in the type field. (Optional) Provide up to five prompts that you would categorize as belonging to the topic in the examples list. Specify filter strengths for the harmful categories defined in Amazon Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful category. For more information, see Content filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig. Specify the category in the type field. Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig. (Optional) For security, include the ARN of a KMS key in the kmsKeyId field. (Optional) Attach any tags to the guardrail in the tags object. For more information, see Tag resources. + /// Creates a guardrail to block topics and to implement safeguards for your generative AI applications. You can configure the following policies in a guardrail to avoid undesirable and harmful content, filter out denied topics and words, and remove sensitive information for privacy protection. Content filters - Adjust filter strengths to block input prompts or model responses containing harmful content. Denied topics - Define a set of topics that are undesirable in the context of your application. These topics will be blocked if detected in user queries or model responses. Word filters - Configure filters to block undesirable words, phrases, and profanity. Such words can include offensive terms, competitor names etc. Sensitive information filters - Block or mask sensitive information such as personally identifiable information (PII) or custom regex in user inputs and model responses. In addition to the above policies, you can also configure the messages to be returned to the user if a user input or model response is in violation of the policies defined in the guardrail. For more information, see Guardrails for Amazon Bedrock in the Amazon Bedrock User Guide. @Sendable public func createGuardrail(_ input: CreateGuardrailRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateGuardrailResponse { return try await self.client.execute( @@ -475,7 +475,7 @@ public struct Bedrock: AWSService { ) } - /// Updates a guardrail with the values you specify. Specify a name and optional description. Specify messages for when the guardrail successfully blocks a prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields. Specify topics for the guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the topicsConfig list pertains to one topic. Give a name and description so that the guardrail can properly identify the topic. Specify DENY in the type field. (Optional) Provide up to five prompts that you would categorize as belonging to the topic in the examples list. Specify filter strengths for the harmful categories defined in Amazon Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful category. For more information, see Content filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig. Specify the category in the type field. Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig. (Optional) For security, include the ARN of a KMS key in the kmsKeyId field. (Optional) Attach any tags to the guardrail in the tags object. For more information, see Tag resources. + /// Updates a guardrail with the values you specify. Specify a name and optional description. Specify messages for when the guardrail successfully blocks a prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields. Specify topics for the guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the topicsConfig list pertains to one topic. Give a name and description so that the guardrail can properly identify the topic. Specify DENY in the type field. (Optional) Provide up to five prompts that you would categorize as belonging to the topic in the examples list. Specify filter strengths for the harmful categories defined in Amazon Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful category. For more information, see Content filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig. Specify the category in the type field. Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig. (Optional) For security, include the ARN of a KMS key in the kmsKeyId field. @Sendable public func updateGuardrail(_ input: UpdateGuardrailRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateGuardrailResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift b/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift index b16f359359..26e79279c8 100644 --- a/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift +++ b/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift @@ -87,6 +87,12 @@ extension Bedrock { public var description: String { return self.rawValue } } + public enum GuardrailContextualGroundingFilterType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case grounding = "GROUNDING" + case relevance = "RELEVANCE" + public var description: String { return self.rawValue } + } + public enum GuardrailFilterStrength: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case high = "HIGH" case low = "LOW" @@ -406,6 +412,8 @@ extension Bedrock { public let clientRequestToken: String? /// The content filter policies to configure for the guardrail. public let contentPolicyConfig: GuardrailContentPolicyConfig? + /// The contextual grounding policy configuration used to create a guardrail. + public let contextualGroundingPolicyConfig: GuardrailContextualGroundingPolicyConfig? /// A description of the guardrail. public let description: String? /// The ARN of the KMS key that you use to encrypt the guardrail. @@ -421,11 +429,12 @@ extension Bedrock { /// The word policy you configure for the guardrail. public let wordPolicyConfig: GuardrailWordPolicyConfig? - public init(blockedInputMessaging: String, blockedOutputsMessaging: String, clientRequestToken: String? = CreateGuardrailRequest.idempotencyToken(), contentPolicyConfig: GuardrailContentPolicyConfig? = nil, description: String? = nil, kmsKeyId: String? = nil, name: String, sensitiveInformationPolicyConfig: GuardrailSensitiveInformationPolicyConfig? = nil, tags: [Tag]? = nil, topicPolicyConfig: GuardrailTopicPolicyConfig? = nil, wordPolicyConfig: GuardrailWordPolicyConfig? = nil) { + public init(blockedInputMessaging: String, blockedOutputsMessaging: String, clientRequestToken: String? = CreateGuardrailRequest.idempotencyToken(), contentPolicyConfig: GuardrailContentPolicyConfig? = nil, contextualGroundingPolicyConfig: GuardrailContextualGroundingPolicyConfig? = nil, description: String? = nil, kmsKeyId: String? = nil, name: String, sensitiveInformationPolicyConfig: GuardrailSensitiveInformationPolicyConfig? = nil, tags: [Tag]? = nil, topicPolicyConfig: GuardrailTopicPolicyConfig? = nil, wordPolicyConfig: GuardrailWordPolicyConfig? = nil) { self.blockedInputMessaging = blockedInputMessaging self.blockedOutputsMessaging = blockedOutputsMessaging self.clientRequestToken = clientRequestToken self.contentPolicyConfig = contentPolicyConfig + self.contextualGroundingPolicyConfig = contextualGroundingPolicyConfig self.description = description self.kmsKeyId = kmsKeyId self.name = name @@ -444,6 +453,7 @@ extension Bedrock { try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1) try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$") try self.contentPolicyConfig?.validate(name: "\(name).contentPolicyConfig") + try self.contextualGroundingPolicyConfig?.validate(name: "\(name).contextualGroundingPolicyConfig") try self.validate(self.description, name: "description", parent: name, max: 200) try self.validate(self.description, name: "description", parent: name, min: 1) try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2048) @@ -466,6 +476,7 @@ extension Bedrock { case blockedOutputsMessaging = "blockedOutputsMessaging" case clientRequestToken = "clientRequestToken" case contentPolicyConfig = "contentPolicyConfig" + case contextualGroundingPolicyConfig = "contextualGroundingPolicyConfig" case description = "description" case kmsKeyId = "kmsKeyId" case name = "name" @@ -480,11 +491,11 @@ extension Bedrock { /// The time at which the guardrail was created. @CustomCoding public var createdAt: Date - /// The ARN of the guardrail that was created. + /// The ARN of the guardrail. public let guardrailArn: String /// The unique identifier of the guardrail that was created. public let guardrailId: String - /// The version of the guardrail that was created. This value should be 1. + /// The version of the guardrail that was created. This value will always be DRAFT. public let version: String public init(createdAt: Date, guardrailArn: String, guardrailId: String, version: String) { @@ -507,7 +518,7 @@ extension Bedrock { public let clientRequestToken: String? /// A description of the guardrail version. public let description: String? - /// The unique identifier of the guardrail. + /// The unique identifier of the guardrail. This can be an ID or the ARN. public let guardrailIdentifier: String public init(clientRequestToken: String? = CreateGuardrailVersionRequest.idempotencyToken(), description: String? = nil, guardrailIdentifier: String) { @@ -792,7 +803,7 @@ extension Bedrock { } public struct DeleteGuardrailRequest: AWSEncodableShape { - /// The unique identifier of the guardrail. + /// The unique identifier of the guardrail. This can be an ID or the ARN. public let guardrailIdentifier: String /// The version of the guardrail. public let guardrailVersion: String? @@ -1315,7 +1326,7 @@ extension Bedrock { } public struct GetGuardrailRequest: AWSEncodableShape { - /// The unique identifier of the guardrail for which to get details. + /// The unique identifier of the guardrail for which to get details. This can be an ID or the ARN. public let guardrailIdentifier: String /// The version of the guardrail for which to get details. If you don't specify a version, the response returns details for the DRAFT version. public let guardrailVersion: String? @@ -1348,6 +1359,8 @@ extension Bedrock { public let blockedOutputsMessaging: String /// The content policy that was configured for the guardrail. public let contentPolicy: GuardrailContentPolicy? + /// The contextual grounding policy used in the guardrail. + public let contextualGroundingPolicy: GuardrailContextualGroundingPolicy? /// The date and time at which the guardrail was created. @CustomCoding public var createdAt: Date @@ -1355,7 +1368,7 @@ extension Bedrock { public let description: String? /// Appears if the status of the guardrail is FAILED. A list of recommendations to carry out before retrying the request. public let failureRecommendations: [String]? - /// The ARN of the guardrail that was created. + /// The ARN of the guardrail. public let guardrailArn: String /// The unique identifier of the guardrail. public let guardrailId: String @@ -1379,10 +1392,11 @@ extension Bedrock { /// The word policy that was configured for the guardrail. public let wordPolicy: GuardrailWordPolicy? - public init(blockedInputMessaging: String, blockedOutputsMessaging: String, contentPolicy: GuardrailContentPolicy? = nil, createdAt: Date, description: String? = nil, failureRecommendations: [String]? = nil, guardrailArn: String, guardrailId: String, kmsKeyArn: String? = nil, name: String, sensitiveInformationPolicy: GuardrailSensitiveInformationPolicy? = nil, status: GuardrailStatus, statusReasons: [String]? = nil, topicPolicy: GuardrailTopicPolicy? = nil, updatedAt: Date, version: String, wordPolicy: GuardrailWordPolicy? = nil) { + public init(blockedInputMessaging: String, blockedOutputsMessaging: String, contentPolicy: GuardrailContentPolicy? = nil, contextualGroundingPolicy: GuardrailContextualGroundingPolicy? = nil, createdAt: Date, description: String? = nil, failureRecommendations: [String]? = nil, guardrailArn: String, guardrailId: String, kmsKeyArn: String? = nil, name: String, sensitiveInformationPolicy: GuardrailSensitiveInformationPolicy? = nil, status: GuardrailStatus, statusReasons: [String]? = nil, topicPolicy: GuardrailTopicPolicy? = nil, updatedAt: Date, version: String, wordPolicy: GuardrailWordPolicy? = nil) { self.blockedInputMessaging = blockedInputMessaging self.blockedOutputsMessaging = blockedOutputsMessaging self.contentPolicy = contentPolicy + self.contextualGroundingPolicy = contextualGroundingPolicy self.createdAt = createdAt self.description = description self.failureRecommendations = failureRecommendations @@ -1403,6 +1417,7 @@ extension Bedrock { case blockedInputMessaging = "blockedInputMessaging" case blockedOutputsMessaging = "blockedOutputsMessaging" case contentPolicy = "contentPolicy" + case contextualGroundingPolicy = "contextualGroundingPolicy" case createdAt = "createdAt" case description = "description" case failureRecommendations = "failureRecommendations" @@ -1713,6 +1728,70 @@ extension Bedrock { } } + public struct GuardrailContextualGroundingFilter: AWSDecodableShape { + /// The threshold details for the guardrails contextual grounding filter. + public let threshold: Double + /// The filter type details for the guardrails contextual grounding filter. + public let type: GuardrailContextualGroundingFilterType + + public init(threshold: Double, type: GuardrailContextualGroundingFilterType) { + self.threshold = threshold + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case threshold = "threshold" + case type = "type" + } + } + + public struct GuardrailContextualGroundingFilterConfig: AWSEncodableShape { + /// The threshold details for the guardrails contextual grounding filter. + public let threshold: Double + /// The filter details for the guardrails contextual grounding filter. + public let type: GuardrailContextualGroundingFilterType + + public init(threshold: Double, type: GuardrailContextualGroundingFilterType) { + self.threshold = threshold + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case threshold = "threshold" + case type = "type" + } + } + + public struct GuardrailContextualGroundingPolicy: AWSDecodableShape { + /// The filter details for the guardrails contextual grounding policy. + public let filters: [GuardrailContextualGroundingFilter] + + public init(filters: [GuardrailContextualGroundingFilter]) { + self.filters = filters + } + + private enum CodingKeys: String, CodingKey { + case filters = "filters" + } + } + + public struct GuardrailContextualGroundingPolicyConfig: AWSEncodableShape { + /// The filter configuration details for the guardrails contextual grounding policy. + public let filtersConfig: [GuardrailContextualGroundingFilterConfig] + + public init(filtersConfig: [GuardrailContextualGroundingFilterConfig]) { + self.filtersConfig = filtersConfig + } + + public func validate(name: String) throws { + try self.validate(self.filtersConfig, name: "filtersConfig", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case filtersConfig = "filtersConfig" + } + } + public struct GuardrailManagedWords: AWSDecodableShape { /// ManagedWords$type The managed word type that was configured for the guardrail. (For now, we only offer profanity word list) public let type: GuardrailManagedWordsType @@ -1759,7 +1838,7 @@ extension Bedrock { public struct GuardrailPiiEntityConfig: AWSEncodableShape { /// Configure guardrail action when the PII entity is detected. public let action: GuardrailSensitiveInformationAction - /// Configure guardrail type when the PII entity is detected. + /// Configure guardrail type when the PII entity is detected. The following PIIs are used to block or mask sensitive information: General ADDRESS A physical address, such as "100 Main Street, Anytown, USA" or "Suite #12, Building 123". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood. AGE An individual's age, including the quantity and unit of time. For example, in the phrase "I am 40 years old," Guarrails recognizes "40 years" as an age. NAME An individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. guardrails doesn't apply this entity type to names that are part of organizations or addresses. For example, guardrails recognizes the "John Doe Organization" as an organization, and it recognizes "Jane Doe Street" as an address. EMAIL An email address, such as marymajor@email.com. PHONE A phone number. This entity type also includes fax and pager numbers. USERNAME A user name that identifies an account, such as a login name, screen name, nick name, or handle. PASSWORD An alphanumeric string that is used as a password, such as "*very20special#pass*". DRIVER_ID The number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters. LICENSE_PLATE A license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country. VEHICLE_IDENTIFICATION_NUMBER A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the ISO 3779 specification. Each country has specific codes and formats for VINs. Finance REDIT_DEBIT_CARD_CVV A three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code. CREDIT_DEBIT_CARD_EXPIRY The expiration date for a credit or debit card. This number is usually four digits long and is often formatted as month/year or MM/YY. Guardrails recognizes expiration dates such as 01/21, 01/2021, and Jan 2021. CREDIT_DEBIT_CARD_NUMBER The number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present. PIN A four-digit personal identification number (PIN) with which you can access your bank account. INTERNATIONAL_BANK_ACCOUNT_NUMBER An International Bank Account Number has specific formats in each country. For more information, see www.iban.com/structure. SWIFT_CODE A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers. SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office. IT IP_ADDRESS An IPv4 address, such as 198.51.100.0. MAC_ADDRESS A media access control (MAC) address is a unique identifier assigned to a network interface controller (NIC). URL A web address, such as www.example.com. AWS_ACCESS_KEY A unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. AWS_SECRET_KEY A unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. USA specific US_BANK_ACCOUNT_NUMBER A US bank account number, which is typically 10 to 12 digits long. US_BANK_ROUTING_NUMBER A US bank account routing number. These are typically nine digits long, US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a "9" and contain a "7" or "8" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits. US_PASSPORT_NUMBER A US passport number. Passport numbers range from six to nine alphanumeric characters. US_SOCIAL_SECURITY_NUMBER A US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents. Canada specific CA_HEALTH_NUMBER A Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits. CA_SOCIAL_INSURANCE_NUMBER A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits. The SIN is formatted as three groups of three digits, such as 123-456-789. A SIN can be validated through a simple check-digit process called the Luhn algorithm. UK Specific UK_NATIONAL_HEALTH_SERVICE_NUMBER A UK National Health Service Number is a 10-17 digit number, such as 485 777 3456. The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum. UK_NATIONAL_INSURANCE_NUMBER A UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system. The number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits. UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business. Custom Regex filter - You can use a regular expressions to define patterns for a guardrail to recognize and act upon such as serial number, booking ID etc.. public let type: GuardrailPiiEntityType public init(action: GuardrailSensitiveInformationAction, type: GuardrailPiiEntityType) { @@ -2366,7 +2445,7 @@ extension Bedrock { } public struct ListGuardrailsRequest: AWSEncodableShape { - /// The unique identifier of the guardrail. + /// The unique identifier of the guardrail. This can be an ID or the ARN. public let guardrailIdentifier: String? /// The maximum number of results to return in the response. public let maxResults: Int? @@ -2995,9 +3074,11 @@ extension Bedrock { public let blockedOutputsMessaging: String /// The content policy to configure for the guardrail. public let contentPolicyConfig: GuardrailContentPolicyConfig? + /// The contextual grounding policy configuration used to update a guardrail. + public let contextualGroundingPolicyConfig: GuardrailContextualGroundingPolicyConfig? /// A description of the guardrail. public let description: String? - /// The unique identifier of the guardrail + /// The unique identifier of the guardrail. This can be an ID or the ARN. public let guardrailIdentifier: String /// The ARN of the KMS key with which to encrypt the guardrail. public let kmsKeyId: String? @@ -3010,10 +3091,11 @@ extension Bedrock { /// The word policy to configure for the guardrail. public let wordPolicyConfig: GuardrailWordPolicyConfig? - public init(blockedInputMessaging: String, blockedOutputsMessaging: String, contentPolicyConfig: GuardrailContentPolicyConfig? = nil, description: String? = nil, guardrailIdentifier: String, kmsKeyId: String? = nil, name: String, sensitiveInformationPolicyConfig: GuardrailSensitiveInformationPolicyConfig? = nil, topicPolicyConfig: GuardrailTopicPolicyConfig? = nil, wordPolicyConfig: GuardrailWordPolicyConfig? = nil) { + public init(blockedInputMessaging: String, blockedOutputsMessaging: String, contentPolicyConfig: GuardrailContentPolicyConfig? = nil, contextualGroundingPolicyConfig: GuardrailContextualGroundingPolicyConfig? = nil, description: String? = nil, guardrailIdentifier: String, kmsKeyId: String? = nil, name: String, sensitiveInformationPolicyConfig: GuardrailSensitiveInformationPolicyConfig? = nil, topicPolicyConfig: GuardrailTopicPolicyConfig? = nil, wordPolicyConfig: GuardrailWordPolicyConfig? = nil) { self.blockedInputMessaging = blockedInputMessaging self.blockedOutputsMessaging = blockedOutputsMessaging self.contentPolicyConfig = contentPolicyConfig + self.contextualGroundingPolicyConfig = contextualGroundingPolicyConfig self.description = description self.guardrailIdentifier = guardrailIdentifier self.kmsKeyId = kmsKeyId @@ -3029,6 +3111,7 @@ extension Bedrock { try container.encode(self.blockedInputMessaging, forKey: .blockedInputMessaging) try container.encode(self.blockedOutputsMessaging, forKey: .blockedOutputsMessaging) try container.encodeIfPresent(self.contentPolicyConfig, forKey: .contentPolicyConfig) + try container.encodeIfPresent(self.contextualGroundingPolicyConfig, forKey: .contextualGroundingPolicyConfig) try container.encodeIfPresent(self.description, forKey: .description) request.encodePath(self.guardrailIdentifier, key: "guardrailIdentifier") try container.encodeIfPresent(self.kmsKeyId, forKey: .kmsKeyId) @@ -3044,6 +3127,7 @@ extension Bedrock { try self.validate(self.blockedOutputsMessaging, name: "blockedOutputsMessaging", parent: name, max: 500) try self.validate(self.blockedOutputsMessaging, name: "blockedOutputsMessaging", parent: name, min: 1) try self.contentPolicyConfig?.validate(name: "\(name).contentPolicyConfig") + try self.contextualGroundingPolicyConfig?.validate(name: "\(name).contextualGroundingPolicyConfig") try self.validate(self.description, name: "description", parent: name, max: 200) try self.validate(self.description, name: "description", parent: name, min: 1) try self.validate(self.guardrailIdentifier, name: "guardrailIdentifier", parent: name, max: 2048) @@ -3063,6 +3147,7 @@ extension Bedrock { case blockedInputMessaging = "blockedInputMessaging" case blockedOutputsMessaging = "blockedOutputsMessaging" case contentPolicyConfig = "contentPolicyConfig" + case contextualGroundingPolicyConfig = "contextualGroundingPolicyConfig" case description = "description" case kmsKeyId = "kmsKeyId" case name = "name" @@ -3073,7 +3158,7 @@ extension Bedrock { } public struct UpdateGuardrailResponse: AWSDecodableShape { - /// The ARN of the guardrail that was created. + /// The ARN of the guardrail. public let guardrailArn: String /// The unique identifier of the guardrail public let guardrailId: String diff --git a/Sources/Soto/Services/BedrockAgent/BedrockAgent_api.swift b/Sources/Soto/Services/BedrockAgent/BedrockAgent_api.swift index c0673b29e9..de37812953 100644 --- a/Sources/Soto/Services/BedrockAgent/BedrockAgent_api.swift +++ b/Sources/Soto/Services/BedrockAgent/BedrockAgent_api.swift @@ -87,7 +87,7 @@ public struct BedrockAgent: AWSService { ) } - /// Creates an agent that orchestrates interactions between foundation models, data sources, software applications, user conversations, and APIs to carry out tasks to help customers. Specify the following fields for security purposes. agentResourceRoleArn – The Amazon Resource Name (ARN) of the role with permissions to invoke API operations on an agent. (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeAgent request begins a new session. To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts. If you agent fails to be created, the response returns a list of failureReasons alongside a list of recommendedActions for you to troubleshoot. + /// Creates an agent that orchestrates interactions between foundation models, data sources, software applications, user conversations, and APIs to carry out tasks to help customers. Specify the following fields for security purposes. agentResourceRoleArn – The Amazon Resource Name (ARN) of the role with permissions to invoke API operations on an agent. (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeAgent request begins a new session. To enable your agent to retain conversational context across multiple sessions, include a memoryConfiguration object. For more information, see Configure memory. To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts. If you agent fails to be created, the response returns a list of failureReasons alongside a list of recommendedActions for you to troubleshoot. @Sendable public func createAgent(_ input: CreateAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAgentResponse { return try await self.client.execute( @@ -100,7 +100,7 @@ public struct BedrockAgent: AWSService { ) } - /// Creates an action group for an agent. An action group represents the actions that an agent can carry out for the customer by defining the APIs that an agent can call and the logic for calling them. To allow your agent to request the user for additional information when trying to complete a task, add an action group with the parentActionGroupSignature field set to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group. During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information. + /// Creates an action group for an agent. An action group represents the actions that an agent can carry out for the customer by defining the APIs that an agent can call and the logic for calling them. To allow your agent to request the user for additional information when trying to complete a task, add an action group with the parentActionGroupSignature field set to AMAZON.UserInput. To allow your agent to generate, run, and troubleshoot code when trying to complete a task, add an action group with the parentActionGroupSignature field set to AMAZON.CodeInterpreter. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group. During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information. @Sendable public func createAgentActionGroup(_ input: CreateAgentActionGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAgentActionGroupResponse { return try await self.client.execute( @@ -126,7 +126,7 @@ public struct BedrockAgent: AWSService { ) } - /// Sets up a data source to be added to a knowledge base. You can't change the chunkingConfiguration after you create the data source. + /// Creates a data source connector for a knowledge base. You can't change the chunkingConfiguration after you create the data source connector. @Sendable public func createDataSource(_ input: CreateDataSourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataSourceResponse { return try await self.client.execute( @@ -139,6 +139,45 @@ public struct BedrockAgent: AWSService { ) } + /// Creates a prompt flow that you can use to send an input through various steps to yield an output. Configure nodes, each of which corresponds to a step of the flow, and create connections between the nodes to create paths to different outputs. For more information, see How it works and Create a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func createFlow(_ input: CreateFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateFlowResponse { + return try await self.client.execute( + operation: "CreateFlow", + path: "/flows/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates an alias of a flow for deployment. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func createFlowAlias(_ input: CreateFlowAliasRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateFlowAliasResponse { + return try await self.client.execute( + operation: "CreateFlowAlias", + path: "/flows/{flowIdentifier}/aliases", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a version of the flow that you can deploy. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func createFlowVersion(_ input: CreateFlowVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateFlowVersionResponse { + return try await self.client.execute( + operation: "CreateFlowVersion", + path: "/flows/{flowIdentifier}/versions", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a knowledge base that contains data sources from which information can be queried and used by LLMs. To create a knowledge base, you must first set up your data sources and configure a supported vector store. For more information, see Set up your data for ingestion. If you prefer to let Amazon Bedrock create and manage a vector store for you in Amazon OpenSearch Service, use the console. For more information, see Create a knowledge base. Provide the name and an optional description. Provide the Amazon Resource Name (ARN) with permissions to create a knowledge base in the roleArn field. Provide the embedding model to use in the embeddingModelArn field in the knowledgeBaseConfiguration object. Provide the configuration for your vector store in the storageConfiguration object. For an Amazon OpenSearch Service database, use the opensearchServerlessConfiguration object. For more information, see Create a vector store in Amazon OpenSearch Service. For an Amazon Aurora database, use the RdsConfiguration object. For more information, see Create a vector store in Amazon Aurora. For a Pinecone database, use the pineconeConfiguration object. For more information, see Create a vector store in Pinecone. For a Redis Enterprise Cloud database, use the redisEnterpriseCloudConfiguration object. For more information, see Create a vector store in Redis Enterprise Cloud. @Sendable public func createKnowledgeBase(_ input: CreateKnowledgeBaseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateKnowledgeBaseResponse { @@ -152,6 +191,32 @@ public struct BedrockAgent: AWSService { ) } + /// Creates a prompt in your prompt library that you can add to a flow. For more information, see Prompt management in Amazon Bedrock, Create a prompt using Prompt management and Prompt flows in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func createPrompt(_ input: CreatePromptRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePromptResponse { + return try await self.client.execute( + operation: "CreatePrompt", + path: "/prompts/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a static snapshot of your prompt that can be deployed to production. For more information, see Deploy prompts using Prompt management by creating versions in the Amazon Bedrock User Guide. + @Sendable + public func createPromptVersion(_ input: CreatePromptVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePromptVersionResponse { + return try await self.client.execute( + operation: "CreatePromptVersion", + path: "/prompts/{promptIdentifier}/versions", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes an agent. @Sendable public func deleteAgent(_ input: DeleteAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAgentResponse { @@ -217,6 +282,45 @@ public struct BedrockAgent: AWSService { ) } + /// Deletes a flow. + @Sendable + public func deleteFlow(_ input: DeleteFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteFlowResponse { + return try await self.client.execute( + operation: "DeleteFlow", + path: "/flows/{flowIdentifier}/", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes an alias of a flow. + @Sendable + public func deleteFlowAlias(_ input: DeleteFlowAliasRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteFlowAliasResponse { + return try await self.client.execute( + operation: "DeleteFlowAlias", + path: "/flows/{flowIdentifier}/aliases/{aliasIdentifier}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes a version of a flow. + @Sendable + public func deleteFlowVersion(_ input: DeleteFlowVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteFlowVersionResponse { + return try await self.client.execute( + operation: "DeleteFlowVersion", + path: "/flows/{flowIdentifier}/versions/{flowVersion}/", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes a knowledge base. Before deleting a knowledge base, you should disassociate the knowledge base from any agents that it is associated with by making a DisassociateAgentKnowledgeBase request. @Sendable public func deleteKnowledgeBase(_ input: DeleteKnowledgeBaseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteKnowledgeBaseResponse { @@ -230,6 +334,19 @@ public struct BedrockAgent: AWSService { ) } + /// Deletes a prompt or a prompt version from the Prompt management tool. For more information, see Delete prompts from the Prompt management tool and Delete a version of a prompt from the Prompt management tool in the Amazon Bedrock User Guide. + @Sendable + public func deletePrompt(_ input: DeletePromptRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeletePromptResponse { + return try await self.client.execute( + operation: "DeletePrompt", + path: "/prompts/{promptIdentifier}/", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Disassociates a knowledge base from an agent. @Sendable public func disassociateAgentKnowledgeBase(_ input: DisassociateAgentKnowledgeBaseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateAgentKnowledgeBaseResponse { @@ -321,6 +438,45 @@ public struct BedrockAgent: AWSService { ) } + /// Retrieves information about a flow. For more information, see Manage a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func getFlow(_ input: GetFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetFlowResponse { + return try await self.client.execute( + operation: "GetFlow", + path: "/flows/{flowIdentifier}/", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves information about a flow. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func getFlowAlias(_ input: GetFlowAliasRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetFlowAliasResponse { + return try await self.client.execute( + operation: "GetFlowAlias", + path: "/flows/{flowIdentifier}/aliases/{aliasIdentifier}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves information about a version of a flow. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func getFlowVersion(_ input: GetFlowVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetFlowVersionResponse { + return try await self.client.execute( + operation: "GetFlowVersion", + path: "/flows/{flowIdentifier}/versions/{flowVersion}/", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets information about a ingestion job, in which a data source is added to a knowledge base. @Sendable public func getIngestionJob(_ input: GetIngestionJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetIngestionJobResponse { @@ -347,6 +503,19 @@ public struct BedrockAgent: AWSService { ) } + /// Retrieves information about a prompt or a version of it. For more information, see View information about prompts using Prompt management and View information about a version of your prompt in the Amazon Bedrock User Guide. + @Sendable + public func getPrompt(_ input: GetPromptRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetPromptResponse { + return try await self.client.execute( + operation: "GetPrompt", + path: "/prompts/{promptIdentifier}/", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists the action groups for an agent and information about each one. @Sendable public func listAgentActionGroups(_ input: ListAgentActionGroupsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAgentActionGroupsResponse { @@ -425,6 +594,45 @@ public struct BedrockAgent: AWSService { ) } + /// Returns a list of aliases for a flow. + @Sendable + public func listFlowAliases(_ input: ListFlowAliasesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListFlowAliasesResponse { + return try await self.client.execute( + operation: "ListFlowAliases", + path: "/flows/{flowIdentifier}/aliases", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of information about each flow. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func listFlowVersions(_ input: ListFlowVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListFlowVersionsResponse { + return try await self.client.execute( + operation: "ListFlowVersions", + path: "/flows/{flowIdentifier}/versions", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of flows and information about each flow. For more information, see Manage a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func listFlows(_ input: ListFlowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListFlowsResponse { + return try await self.client.execute( + operation: "ListFlows", + path: "/flows/", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists the ingestion jobs for a data source and information about each of them. @Sendable public func listIngestionJobs(_ input: ListIngestionJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListIngestionJobsResponse { @@ -451,6 +659,19 @@ public struct BedrockAgent: AWSService { ) } + /// Returns a list of prompts from the Prompt management tool and information about each prompt. For more information, see View information about prompts using Prompt management in the Amazon Bedrock User Guide. + @Sendable + public func listPrompts(_ input: ListPromptsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListPromptsResponse { + return try await self.client.execute( + operation: "ListPrompts", + path: "/prompts/", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// List all the tags for the resource you specify. @Sendable public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { @@ -477,6 +698,19 @@ public struct BedrockAgent: AWSService { ) } + /// Prepares the DRAFT version of a flow so that it can be invoked. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func prepareFlow(_ input: PrepareFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PrepareFlowResponse { + return try await self.client.execute( + operation: "PrepareFlow", + path: "/flows/{flowIdentifier}/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Begins an ingestion job, in which a data source is added to a knowledge base. @Sendable public func startIngestionJob(_ input: StartIngestionJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartIngestionJobResponse { @@ -568,7 +802,7 @@ public struct BedrockAgent: AWSService { ) } - /// Updates configurations for a data source. You can't change the chunkingConfiguration after you create the data source. Specify the existing chunkingConfiguration. + /// Updates the configurations for a data source connector. You can't change the chunkingConfiguration after you create the data source connector. Specify the existing chunkingConfiguration. @Sendable public func updateDataSource(_ input: UpdateDataSourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDataSourceResponse { return try await self.client.execute( @@ -581,6 +815,32 @@ public struct BedrockAgent: AWSService { ) } + /// Modifies a flow. Include both fields that you want to keep and fields that you want to change. For more information, see How it works and Create a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func updateFlow(_ input: UpdateFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateFlowResponse { + return try await self.client.execute( + operation: "UpdateFlow", + path: "/flows/{flowIdentifier}/", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Modifies the alias of a flow. Include both fields that you want to keep and ones that you want to change. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func updateFlowAlias(_ input: UpdateFlowAliasRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateFlowAliasResponse { + return try await self.client.execute( + operation: "UpdateFlowAlias", + path: "/flows/{flowIdentifier}/aliases/{aliasIdentifier}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates the configuration of a knowledge base with the fields that you specify. Because all fields will be overwritten, you must include the same values for fields that you want to keep the same. You can change the following fields: name description roleArn You can't change the knowledgeBaseConfiguration or storageConfiguration fields, so you must specify the same configurations as when you created the knowledge base. You can send a GetKnowledgeBase request and copy the same configurations. @Sendable public func updateKnowledgeBase(_ input: UpdateKnowledgeBaseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateKnowledgeBaseResponse { @@ -593,6 +853,19 @@ public struct BedrockAgent: AWSService { logger: logger ) } + + /// Modifies a prompt in your prompt library. Include both fields that you want to keep and fields that you want to replace. For more information, see Prompt management in Amazon Bedrock and Edit prompts in your prompt library in the Amazon Bedrock User Guide. + @Sendable + public func updatePrompt(_ input: UpdatePromptRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdatePromptResponse { + return try await self.client.execute( + operation: "UpdatePrompt", + path: "/prompts/{promptIdentifier}/", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } } extension BedrockAgent { @@ -722,6 +995,63 @@ extension BedrockAgent { ) } + /// Returns a list of aliases for a flow. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listFlowAliasesPaginator( + _ input: ListFlowAliasesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listFlowAliases, + inputKey: \ListFlowAliasesRequest.nextToken, + outputKey: \ListFlowAliasesResponse.nextToken, + logger: logger + ) + } + + /// Returns a list of information about each flow. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listFlowVersionsPaginator( + _ input: ListFlowVersionsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listFlowVersions, + inputKey: \ListFlowVersionsRequest.nextToken, + outputKey: \ListFlowVersionsResponse.nextToken, + logger: logger + ) + } + + /// Returns a list of flows and information about each flow. For more information, see Manage a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listFlowsPaginator( + _ input: ListFlowsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listFlows, + inputKey: \ListFlowsRequest.nextToken, + outputKey: \ListFlowsResponse.nextToken, + logger: logger + ) + } + /// Lists the ingestion jobs for a data source and information about each of them. /// Return PaginatorSequence for operation. /// @@ -759,6 +1089,25 @@ extension BedrockAgent { logger: logger ) } + + /// Returns a list of prompts from the Prompt management tool and information about each prompt. For more information, see View information about prompts using Prompt management in the Amazon Bedrock User Guide. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listPromptsPaginator( + _ input: ListPromptsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listPrompts, + inputKey: \ListPromptsRequest.nextToken, + outputKey: \ListPromptsResponse.nextToken, + logger: logger + ) + } } extension BedrockAgent.ListAgentActionGroupsRequest: AWSPaginateToken { @@ -822,6 +1171,35 @@ extension BedrockAgent.ListDataSourcesRequest: AWSPaginateToken { } } +extension BedrockAgent.ListFlowAliasesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> BedrockAgent.ListFlowAliasesRequest { + return .init( + flowIdentifier: self.flowIdentifier, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension BedrockAgent.ListFlowVersionsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> BedrockAgent.ListFlowVersionsRequest { + return .init( + flowIdentifier: self.flowIdentifier, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension BedrockAgent.ListFlowsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> BedrockAgent.ListFlowsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension BedrockAgent.ListIngestionJobsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> BedrockAgent.ListIngestionJobsRequest { return .init( @@ -843,3 +1221,13 @@ extension BedrockAgent.ListKnowledgeBasesRequest: AWSPaginateToken { ) } } + +extension BedrockAgent.ListPromptsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> BedrockAgent.ListPromptsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + promptIdentifier: self.promptIdentifier + ) + } +} diff --git a/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift b/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift index 92aa840f9f..2a445820c7 100644 --- a/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift +++ b/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift @@ -27,6 +27,7 @@ extension BedrockAgent { // MARK: Enums public enum ActionGroupSignature: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case amazonCodeinterpreter = "AMAZON.CodeInterpreter" case amazonUserinput = "AMAZON.UserInput" public var description: String { return self.rawValue } } @@ -60,7 +61,25 @@ extension BedrockAgent { public enum ChunkingStrategy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case fixedSize = "FIXED_SIZE" + case hierarchical = "HIERARCHICAL" case none = "NONE" + case semantic = "SEMANTIC" + public var description: String { return self.rawValue } + } + + public enum ConfluenceAuthType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case basic = "BASIC" + case oauth2ClientCredentials = "OAUTH2_CLIENT_CREDENTIALS" + public var description: String { return self.rawValue } + } + + public enum ConfluenceHostType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case saas = "SAAS" + public var description: String { return self.rawValue } + } + + public enum CrawlFilterConfigurationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case pattern = "PATTERN" public var description: String { return self.rawValue } } @@ -89,7 +108,56 @@ extension BedrockAgent { } public enum DataSourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case confluence = "CONFLUENCE" case s3 = "S3" + case salesforce = "SALESFORCE" + case sharepoint = "SHAREPOINT" + case web = "WEB" + public var description: String { return self.rawValue } + } + + public enum FlowConnectionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case conditional = "Conditional" + case data = "Data" + public var description: String { return self.rawValue } + } + + public enum FlowNodeIODataType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case array = "Array" + case boolean = "Boolean" + case number = "Number" + case object = "Object" + case string = "String" + public var description: String { return self.rawValue } + } + + public enum FlowNodeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case agent = "Agent" + case collector = "Collector" + case condition = "Condition" + case input = "Input" + case iterator = "Iterator" + case knowledgeBase = "KnowledgeBase" + case lambdaFunction = "LambdaFunction" + case lex = "Lex" + case output = "Output" + case prompt = "Prompt" + case retrieval = "Retrieval" + case storage = "Storage" + public var description: String { return self.rawValue } + } + + public enum FlowStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failed = "Failed" + case notPrepared = "NotPrepared" + case prepared = "Prepared" + case preparing = "Preparing" + public var description: String { return self.rawValue } + } + + public enum FlowValidationSeverity: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case error = "Error" + case warning = "Warning" public var description: String { return self.rawValue } } @@ -147,12 +215,27 @@ extension BedrockAgent { public var description: String { return self.rawValue } } + public enum MemoryType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case sessionSummary = "SESSION_SUMMARY" + public var description: String { return self.rawValue } + } + + public enum ParsingStrategy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case bedrockFoundationModel = "BEDROCK_FOUNDATION_MODEL" + public var description: String { return self.rawValue } + } + public enum PromptState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" public var description: String { return self.rawValue } } + public enum PromptTemplateType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case text = "TEXT" + public var description: String { return self.rawValue } + } + public enum PromptType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case knowledgeBaseResponseGeneration = "KNOWLEDGE_BASE_RESPONSE_GENERATION" case orchestration = "ORCHESTRATION" @@ -161,12 +244,38 @@ extension BedrockAgent { public var description: String { return self.rawValue } } + public enum SalesforceAuthType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case oauth2ClientCredentials = "OAUTH2_CLIENT_CREDENTIALS" + public var description: String { return self.rawValue } + } + + public enum SharePointAuthType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case oauth2ClientCredentials = "OAUTH2_CLIENT_CREDENTIALS" + public var description: String { return self.rawValue } + } + + public enum SharePointHostType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case online = "ONLINE" + public var description: String { return self.rawValue } + } + public enum SortOrder: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case ascending = "ASCENDING" case descending = "DESCENDING" public var description: String { return self.rawValue } } + public enum StepType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case postChunking = "POST_CHUNKING" + public var description: String { return self.rawValue } + } + + public enum WebScopeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case hostOnly = "HOST_ONLY" + case subdomains = "SUBDOMAINS" + public var description: String { return self.rawValue } + } + public enum `Type`: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case array = "array" case boolean = "boolean" @@ -277,6 +386,250 @@ extension BedrockAgent { } } + public enum FlowConnectionConfiguration: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The configuration of a connection originating from a Condition node. + case conditional(FlowConditionalConnectionConfiguration) + /// The configuration of a connection originating from a node that isn't a Condition node. + case data(FlowDataConnectionConfiguration) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .conditional: + let value = try container.decode(FlowConditionalConnectionConfiguration.self, forKey: .conditional) + self = .conditional(value) + case .data: + let value = try container.decode(FlowDataConnectionConfiguration.self, forKey: .data) + self = .data(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .conditional(let value): + try container.encode(value, forKey: .conditional) + case .data(let value): + try container.encode(value, forKey: .data) + } + } + + public func validate(name: String) throws { + switch self { + case .conditional(let value): + try value.validate(name: "\(name).conditional") + case .data(let value): + try value.validate(name: "\(name).data") + } + } + + private enum CodingKeys: String, CodingKey { + case conditional = "conditional" + case data = "data" + } + } + + public enum FlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape, Sendable { + /// Contains configurations for an agent node in your flow. Invokes an alias of an agent and returns the response. + case agent(AgentFlowNodeConfiguration) + /// Contains configurations for a collector node in your flow. Collects an iteration of inputs and consolidates them into an array of outputs. + case collector(CollectorFlowNodeConfiguration) + /// Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow. + case condition(ConditionFlowNodeConfiguration) + /// Contains configurations for an input flow node in your flow. The first node in the flow. inputs can't be specified for this node. + case input(InputFlowNodeConfiguration) + /// Contains configurations for an iterator node in your flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output. The output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node. + case iterator(IteratorFlowNodeConfiguration) + /// Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response. + case knowledgeBase(KnowledgeBaseFlowNodeConfiguration) + /// Contains configurations for a Lambda function node in your flow. Invokes an Lambda function. + case lambdaFunction(LambdaFunctionFlowNodeConfiguration) + /// Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output. + case lex(LexFlowNodeConfiguration) + /// Contains configurations for an output flow node in your flow. The last node in the flow. outputs can't be specified for this node. + case output(OutputFlowNodeConfiguration) + /// Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node. + case prompt(PromptFlowNodeConfiguration) + /// Contains configurations for a Retrieval node in your flow. Retrieves data from an Amazon S3 location and returns it as the output. + case retrieval(RetrievalFlowNodeConfiguration) + /// Contains configurations for a Storage node in your flow. Stores an input in an Amazon S3 location. + case storage(StorageFlowNodeConfiguration) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .agent: + let value = try container.decode(AgentFlowNodeConfiguration.self, forKey: .agent) + self = .agent(value) + case .collector: + let value = try container.decode(CollectorFlowNodeConfiguration.self, forKey: .collector) + self = .collector(value) + case .condition: + let value = try container.decode(ConditionFlowNodeConfiguration.self, forKey: .condition) + self = .condition(value) + case .input: + let value = try container.decode(InputFlowNodeConfiguration.self, forKey: .input) + self = .input(value) + case .iterator: + let value = try container.decode(IteratorFlowNodeConfiguration.self, forKey: .iterator) + self = .iterator(value) + case .knowledgeBase: + let value = try container.decode(KnowledgeBaseFlowNodeConfiguration.self, forKey: .knowledgeBase) + self = .knowledgeBase(value) + case .lambdaFunction: + let value = try container.decode(LambdaFunctionFlowNodeConfiguration.self, forKey: .lambdaFunction) + self = .lambdaFunction(value) + case .lex: + let value = try container.decode(LexFlowNodeConfiguration.self, forKey: .lex) + self = .lex(value) + case .output: + let value = try container.decode(OutputFlowNodeConfiguration.self, forKey: .output) + self = .output(value) + case .prompt: + let value = try container.decode(PromptFlowNodeConfiguration.self, forKey: .prompt) + self = .prompt(value) + case .retrieval: + let value = try container.decode(RetrievalFlowNodeConfiguration.self, forKey: .retrieval) + self = .retrieval(value) + case .storage: + let value = try container.decode(StorageFlowNodeConfiguration.self, forKey: .storage) + self = .storage(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .agent(let value): + try container.encode(value, forKey: .agent) + case .collector(let value): + try container.encode(value, forKey: .collector) + case .condition(let value): + try container.encode(value, forKey: .condition) + case .input(let value): + try container.encode(value, forKey: .input) + case .iterator(let value): + try container.encode(value, forKey: .iterator) + case .knowledgeBase(let value): + try container.encode(value, forKey: .knowledgeBase) + case .lambdaFunction(let value): + try container.encode(value, forKey: .lambdaFunction) + case .lex(let value): + try container.encode(value, forKey: .lex) + case .output(let value): + try container.encode(value, forKey: .output) + case .prompt(let value): + try container.encode(value, forKey: .prompt) + case .retrieval(let value): + try container.encode(value, forKey: .retrieval) + case .storage(let value): + try container.encode(value, forKey: .storage) + } + } + + public func validate(name: String) throws { + switch self { + case .agent(let value): + try value.validate(name: "\(name).agent") + case .condition(let value): + try value.validate(name: "\(name).condition") + case .knowledgeBase(let value): + try value.validate(name: "\(name).knowledgeBase") + case .lambdaFunction(let value): + try value.validate(name: "\(name).lambdaFunction") + case .lex(let value): + try value.validate(name: "\(name).lex") + case .prompt(let value): + try value.validate(name: "\(name).prompt") + case .retrieval(let value): + try value.validate(name: "\(name).retrieval") + case .storage(let value): + try value.validate(name: "\(name).storage") + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case agent = "agent" + case collector = "collector" + case condition = "condition" + case input = "input" + case iterator = "iterator" + case knowledgeBase = "knowledgeBase" + case lambdaFunction = "lambdaFunction" + case lex = "lex" + case output = "output" + case prompt = "prompt" + case retrieval = "retrieval" + case storage = "storage" + } + } + + public enum PromptFlowNodeSourceConfiguration: AWSEncodableShape & AWSDecodableShape, Sendable { + /// Contains configurations for a prompt that is defined inline + case inline(PromptFlowNodeInlineConfiguration) + /// Contains configurations for a prompt from Prompt management. + case resource(PromptFlowNodeResourceConfiguration) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .inline: + let value = try container.decode(PromptFlowNodeInlineConfiguration.self, forKey: .inline) + self = .inline(value) + case .resource: + let value = try container.decode(PromptFlowNodeResourceConfiguration.self, forKey: .resource) + self = .resource(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .inline(let value): + try container.encode(value, forKey: .inline) + case .resource(let value): + try container.encode(value, forKey: .resource) + } + } + + public func validate(name: String) throws { + switch self { + case .inline(let value): + try value.validate(name: "\(name).inline") + case .resource(let value): + try value.validate(name: "\(name).resource") + } + } + + private enum CodingKeys: String, CodingKey { + case inline = "inline" + case resource = "resource" + } + } + // MARK: Shapes public struct ActionGroupSummary: AWSDecodableShape { @@ -335,12 +688,14 @@ extension BedrockAgent { public let failureReasons: [String]? /// The foundation model used for orchestration by the agent. public let foundationModel: String? - /// The guardrails configuration assigned to the agent. + /// Details about the guardrail associated with the agent. public let guardrailConfiguration: GuardrailConfiguration? /// The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent. A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout. public let idleSessionTTLInSeconds: Int /// Instructions that tell the agent what it should do and how it should interact with users. public let instruction: String? + /// Contains memory configuration for the agent. + public let memoryConfiguration: MemoryConfiguration? /// The time at which the agent was last prepared. @OptionalCustomCoding public var preparedAt: Date? @@ -352,7 +707,7 @@ extension BedrockAgent { @CustomCoding public var updatedAt: Date - public init(agentArn: String, agentId: String, agentName: String, agentResourceRoleArn: String, agentStatus: AgentStatus, agentVersion: String, clientToken: String? = nil, createdAt: Date, customerEncryptionKeyArn: String? = nil, description: String? = nil, failureReasons: [String]? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int, instruction: String? = nil, preparedAt: Date? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, recommendedActions: [String]? = nil, updatedAt: Date) { + public init(agentArn: String, agentId: String, agentName: String, agentResourceRoleArn: String, agentStatus: AgentStatus, agentVersion: String, clientToken: String? = nil, createdAt: Date, customerEncryptionKeyArn: String? = nil, description: String? = nil, failureReasons: [String]? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int, instruction: String? = nil, memoryConfiguration: MemoryConfiguration? = nil, preparedAt: Date? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, recommendedActions: [String]? = nil, updatedAt: Date) { self.agentArn = agentArn self.agentId = agentId self.agentName = agentName @@ -368,6 +723,7 @@ extension BedrockAgent { self.guardrailConfiguration = guardrailConfiguration self.idleSessionTTLInSeconds = idleSessionTTLInSeconds self.instruction = instruction + self.memoryConfiguration = memoryConfiguration self.preparedAt = preparedAt self.promptOverrideConfiguration = promptOverrideConfiguration self.recommendedActions = recommendedActions @@ -390,6 +746,7 @@ extension BedrockAgent { case guardrailConfiguration = "guardrailConfiguration" case idleSessionTTLInSeconds = "idleSessionTTLInSeconds" case instruction = "instruction" + case memoryConfiguration = "memoryConfiguration" case preparedAt = "preparedAt" case promptOverrideConfiguration = "promptOverrideConfiguration" case recommendedActions = "recommendedActions" @@ -607,6 +964,24 @@ extension BedrockAgent { } } + public struct AgentFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the alias of the agent to invoke. + public let agentAliasArn: String + + public init(agentAliasArn: String) { + self.agentAliasArn = agentAliasArn + } + + public func validate(name: String) throws { + try self.validate(self.agentAliasArn, name: "agentAliasArn", parent: name, max: 2048) + try self.validate(self.agentAliasArn, name: "agentAliasArn", parent: name, pattern: "^arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:agent-alias/[0-9a-zA-Z]{10}/[0-9a-zA-Z]{10}$") + } + + private enum CodingKeys: String, CodingKey { + case agentAliasArn = "agentAliasArn" + } + } + public struct AgentKnowledgeBase: AWSDecodableShape { /// The unique identifier of the agent with which the knowledge base is associated. public let agentId: String @@ -681,7 +1056,7 @@ extension BedrockAgent { public let agentStatus: AgentStatus /// The description of the agent. public let description: String? - /// The details of the guardrails configuration in the agent summary. + /// Details about the guardrail associated with the agent. public let guardrailConfiguration: GuardrailConfiguration? /// The latest version of the agent. public let latestAgentVersion: String? @@ -732,12 +1107,14 @@ extension BedrockAgent { public let failureReasons: [String]? /// The foundation model that the version invokes. public let foundationModel: String? - /// The guardrails configuration assigned to the agent version. + /// Details about the guardrail associated with the agent. public let guardrailConfiguration: GuardrailConfiguration? /// The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent. A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout. public let idleSessionTTLInSeconds: Int /// The instructions provided to the agent. public let instruction: String? + /// Contains details of the memory configuration on the version of the agent. + public let memoryConfiguration: MemoryConfiguration? /// Contains configurations to override prompt templates in different parts of an agent sequence. For more information, see Advanced prompts. public let promptOverrideConfiguration: PromptOverrideConfiguration? /// A list of recommended actions to take for the failed API operation on the version to succeed. @@ -748,7 +1125,7 @@ extension BedrockAgent { /// The version number. public let version: String - public init(agentArn: String, agentId: String, agentName: String, agentResourceRoleArn: String, agentStatus: AgentStatus, createdAt: Date, customerEncryptionKeyArn: String? = nil, description: String? = nil, failureReasons: [String]? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int, instruction: String? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, recommendedActions: [String]? = nil, updatedAt: Date, version: String) { + public init(agentArn: String, agentId: String, agentName: String, agentResourceRoleArn: String, agentStatus: AgentStatus, createdAt: Date, customerEncryptionKeyArn: String? = nil, description: String? = nil, failureReasons: [String]? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int, instruction: String? = nil, memoryConfiguration: MemoryConfiguration? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, recommendedActions: [String]? = nil, updatedAt: Date, version: String) { self.agentArn = agentArn self.agentId = agentId self.agentName = agentName @@ -762,6 +1139,7 @@ extension BedrockAgent { self.guardrailConfiguration = guardrailConfiguration self.idleSessionTTLInSeconds = idleSessionTTLInSeconds self.instruction = instruction + self.memoryConfiguration = memoryConfiguration self.promptOverrideConfiguration = promptOverrideConfiguration self.recommendedActions = recommendedActions self.updatedAt = updatedAt @@ -782,6 +1160,7 @@ extension BedrockAgent { case guardrailConfiguration = "guardrailConfiguration" case idleSessionTTLInSeconds = "idleSessionTTLInSeconds" case instruction = "instruction" + case memoryConfiguration = "memoryConfiguration" case promptOverrideConfiguration = "promptOverrideConfiguration" case recommendedActions = "recommendedActions" case updatedAt = "updatedAt" @@ -801,7 +1180,7 @@ extension BedrockAgent { public var createdAt: Date /// The description of the version of the agent. public let description: String? - /// The details of the guardrails configuration in the agent version summary. + /// Details about the guardrail associated with the agent. public let guardrailConfiguration: GuardrailConfiguration? /// The time at which the version was last updated. @CustomCoding @@ -906,20 +1285,171 @@ extension BedrockAgent { } } + public struct BedrockFoundationModelConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The model's ARN. + public let modelArn: String + /// Instructions for interpreting the contents of a document. + public let parsingPrompt: ParsingPrompt? + + public init(modelArn: String, parsingPrompt: ParsingPrompt? = nil) { + self.modelArn = modelArn + self.parsingPrompt = parsingPrompt + } + + public func validate(name: String) throws { + try self.validate(self.modelArn, name: "modelArn", parent: name, max: 2048) + try self.validate(self.modelArn, name: "modelArn", parent: name, min: 1) + try self.validate(self.modelArn, name: "modelArn", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})$") + try self.parsingPrompt?.validate(name: "\(name).parsingPrompt") + } + + private enum CodingKeys: String, CodingKey { + case modelArn = "modelArn" + case parsingPrompt = "parsingPrompt" + } + } + public struct ChunkingConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Knowledge base can split your source data into chunks. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for NONE, then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk. FIXED_SIZE – Amazon Bedrock splits your source data into chunks of the approximate size that you set in the fixedSizeChunkingConfiguration. NONE – Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files. + /// Knowledge base can split your source data into chunks. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for NONE, then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk. FIXED_SIZE – Amazon Bedrock splits your source data into chunks of the approximate size that you set in the fixedSizeChunkingConfiguration. HIERARCHICAL – Split documents into layers of chunks where the first layer contains large chunks, and the second layer contains smaller chunks derived from the first layer. SEMANTIC – Split documents into chunks based on groups of similar content derived with natural language processing. NONE – Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files. public let chunkingStrategy: ChunkingStrategy /// Configurations for when you choose fixed-size chunking. If you set the chunkingStrategy as NONE, exclude this field. public let fixedSizeChunkingConfiguration: FixedSizeChunkingConfiguration? + /// Settings for hierarchical document chunking for a data source. Hierarchical chunking splits documents into layers of chunks where the first layer contains large chunks, and the second layer contains smaller chunks derived from the first layer. + public let hierarchicalChunkingConfiguration: HierarchicalChunkingConfiguration? + /// Settings for semantic document chunking for a data source. Semantic chunking splits a document into into smaller documents based on groups of similar content derived from the text with natural language processing. + public let semanticChunkingConfiguration: SemanticChunkingConfiguration? - public init(chunkingStrategy: ChunkingStrategy, fixedSizeChunkingConfiguration: FixedSizeChunkingConfiguration? = nil) { + public init(chunkingStrategy: ChunkingStrategy, fixedSizeChunkingConfiguration: FixedSizeChunkingConfiguration? = nil, hierarchicalChunkingConfiguration: HierarchicalChunkingConfiguration? = nil, semanticChunkingConfiguration: SemanticChunkingConfiguration? = nil) { self.chunkingStrategy = chunkingStrategy self.fixedSizeChunkingConfiguration = fixedSizeChunkingConfiguration + self.hierarchicalChunkingConfiguration = hierarchicalChunkingConfiguration + self.semanticChunkingConfiguration = semanticChunkingConfiguration + } + + public func validate(name: String) throws { + try self.hierarchicalChunkingConfiguration?.validate(name: "\(name).hierarchicalChunkingConfiguration") } private enum CodingKeys: String, CodingKey { case chunkingStrategy = "chunkingStrategy" case fixedSizeChunkingConfiguration = "fixedSizeChunkingConfiguration" + case hierarchicalChunkingConfiguration = "hierarchicalChunkingConfiguration" + case semanticChunkingConfiguration = "semanticChunkingConfiguration" + } + } + + public struct CollectorFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + public init() {} + } + + public struct ConditionFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + /// An array of conditions. Each member contains the name of a condition and an expression that defines the condition. + public let conditions: [FlowCondition] + + public init(conditions: [FlowCondition]) { + self.conditions = conditions + } + + public func validate(name: String) throws { + try self.conditions.forEach { + try $0.validate(name: "\(name).conditions[]") + } + try self.validate(self.conditions, name: "conditions", parent: name, max: 5) + try self.validate(self.conditions, name: "conditions", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case conditions = "conditions" + } + } + + public struct ConfluenceCrawlerConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration of filtering the Confluence content. For example, configuring regular expression patterns to include or exclude certain content. + public let filterConfiguration: CrawlFilterConfiguration? + + public init(filterConfiguration: CrawlFilterConfiguration? = nil) { + self.filterConfiguration = filterConfiguration + } + + public func validate(name: String) throws { + try self.filterConfiguration?.validate(name: "\(name).filterConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case filterConfiguration = "filterConfiguration" + } + } + + public struct ConfluenceDataSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration of the Confluence content. For example, configuring specific types of Confluence content. + public let crawlerConfiguration: ConfluenceCrawlerConfiguration? + /// The endpoint information to connect to your Confluence data source. + public let sourceConfiguration: ConfluenceSourceConfiguration + + public init(crawlerConfiguration: ConfluenceCrawlerConfiguration? = nil, sourceConfiguration: ConfluenceSourceConfiguration) { + self.crawlerConfiguration = crawlerConfiguration + self.sourceConfiguration = sourceConfiguration + } + + public func validate(name: String) throws { + try self.crawlerConfiguration?.validate(name: "\(name).crawlerConfiguration") + try self.sourceConfiguration.validate(name: "\(name).sourceConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case crawlerConfiguration = "crawlerConfiguration" + case sourceConfiguration = "sourceConfiguration" + } + } + + public struct ConfluenceSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The supported authentication type to authenticate and connect to your Confluence instance. + public let authType: ConfluenceAuthType + /// The Amazon Resource Name of an Secrets Manager secret that stores your authentication credentials for your SharePoint site/sites. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see Confluence connection configuration. + public let credentialsSecretArn: String + /// The supported host type, whether online/cloud or server/on-premises. + public let hostType: ConfluenceHostType + /// The Confluence host URL or instance URL. + public let hostUrl: String + + public init(authType: ConfluenceAuthType, credentialsSecretArn: String, hostType: ConfluenceHostType, hostUrl: String) { + self.authType = authType + self.credentialsSecretArn = credentialsSecretArn + self.hostType = hostType + self.hostUrl = hostUrl + } + + public func validate(name: String) throws { + try self.validate(self.credentialsSecretArn, name: "credentialsSecretArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):secretsmanager:[a-z0-9-]{1,20}:([0-9]{12}|):secret:[a-zA-Z0-9!/_+=.@-]{1,512}$") + try self.validate(self.hostUrl, name: "hostUrl", parent: name, pattern: "^https://[A-Za-z0-9][^\\s]*$") + } + + private enum CodingKeys: String, CodingKey { + case authType = "authType" + case credentialsSecretArn = "credentialsSecretArn" + case hostType = "hostType" + case hostUrl = "hostUrl" + } + } + + public struct CrawlFilterConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration of filtering certain objects or content types of the data source. + public let patternObjectFilter: PatternObjectFilterConfiguration? + /// The type of filtering that you want to apply to certain objects or content of the data source. For example, the PATTERN type is regular expression patterns you can apply to filter your content. + public let type: CrawlFilterConfigurationType + + public init(patternObjectFilter: PatternObjectFilterConfiguration? = nil, type: CrawlFilterConfigurationType) { + self.patternObjectFilter = patternObjectFilter + self.type = type + } + + public func validate(name: String) throws { + try self.patternObjectFilter?.validate(name: "\(name).patternObjectFilter") + } + + private enum CodingKeys: String, CodingKey { + case patternObjectFilter = "patternObjectFilter" + case type = "type" } } @@ -942,7 +1472,7 @@ extension BedrockAgent { public let description: String? /// Contains details about the function schema for the action group or the JSON or YAML-formatted payload defining the schema. public let functionSchema: FunctionSchema? - /// To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group. During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information. + /// To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group. To allow your agent to generate, run, and troubleshoot code when trying to complete a task, set this field to AMAZON.CodeInterpreter. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group. During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information. public let parentActionGroupSignature: ActionGroupSignature? public init(actionGroupExecutor: ActionGroupExecutor? = nil, actionGroupName: String, actionGroupState: ActionGroupState? = nil, agentId: String, agentVersion: String, apiSchema: APISchema? = nil, clientToken: String? = CreateAgentActionGroupRequest.idempotencyToken(), description: String? = nil, functionSchema: FunctionSchema? = nil, parentActionGroupSignature: ActionGroupSignature? = nil) { @@ -983,7 +1513,7 @@ extension BedrockAgent { try self.apiSchema?.validate(name: "\(name).apiSchema") try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) - try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$") + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$") try self.validate(self.description, name: "description", parent: name, max: 200) try self.validate(self.description, name: "description", parent: name, min: 1) try self.functionSchema?.validate(name: "\(name).functionSchema") @@ -1053,7 +1583,7 @@ extension BedrockAgent { try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) - try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$") + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$") try self.validate(self.description, name: "description", parent: name, max: 200) try self.validate(self.description, name: "description", parent: name, min: 1) try self.routingConfiguration?.forEach { @@ -1110,12 +1640,14 @@ extension BedrockAgent { public let idleSessionTTLInSeconds: Int? /// Instructions that tell the agent what it should do and how it should interact with users. public let instruction: String? + /// Contains the details of the memory configured for the agent. + public let memoryConfiguration: MemoryConfiguration? /// Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts. public let promptOverrideConfiguration: PromptOverrideConfiguration? /// Any tags that you want to attach to the agent. public let tags: [String: String]? - public init(agentName: String, agentResourceRoleArn: String? = nil, clientToken: String? = CreateAgentRequest.idempotencyToken(), customerEncryptionKeyArn: String? = nil, description: String? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int? = nil, instruction: String? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, tags: [String: String]? = nil) { + public init(agentName: String, agentResourceRoleArn: String? = nil, clientToken: String? = CreateAgentRequest.idempotencyToken(), customerEncryptionKeyArn: String? = nil, description: String? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int? = nil, instruction: String? = nil, memoryConfiguration: MemoryConfiguration? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, tags: [String: String]? = nil) { self.agentName = agentName self.agentResourceRoleArn = agentResourceRoleArn self.clientToken = clientToken @@ -1125,6 +1657,7 @@ extension BedrockAgent { self.guardrailConfiguration = guardrailConfiguration self.idleSessionTTLInSeconds = idleSessionTTLInSeconds self.instruction = instruction + self.memoryConfiguration = memoryConfiguration self.promptOverrideConfiguration = promptOverrideConfiguration self.tags = tags } @@ -1135,7 +1668,7 @@ extension BedrockAgent { try self.validate(self.agentResourceRoleArn, name: "agentResourceRoleArn", parent: name, pattern: "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/.+$") try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) - try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$") + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$") try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, max: 2048) try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, min: 1) try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") @@ -1149,6 +1682,7 @@ extension BedrockAgent { try self.validate(self.idleSessionTTLInSeconds, name: "idleSessionTTLInSeconds", parent: name, min: 60) try self.validate(self.instruction, name: "instruction", parent: name, max: 4000) try self.validate(self.instruction, name: "instruction", parent: name, min: 40) + try self.memoryConfiguration?.validate(name: "\(name).memoryConfiguration") try self.promptOverrideConfiguration?.validate(name: "\(name).promptOverrideConfiguration") try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) @@ -1169,6 +1703,7 @@ extension BedrockAgent { case guardrailConfiguration = "guardrailConfiguration" case idleSessionTTLInSeconds = "idleSessionTTLInSeconds" case instruction = "instruction" + case memoryConfiguration = "memoryConfiguration" case promptOverrideConfiguration = "promptOverrideConfiguration" case tags = "tags" } @@ -1190,9 +1725,9 @@ extension BedrockAgent { public struct CreateDataSourceRequest: AWSEncodableShape { /// A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. public let clientToken: String? - /// The data deletion policy assigned to the data source. + /// The data deletion policy for the data source. You can set the data deletion policy to: DELETE: Deletes all underlying data belonging to the data source from the vector store upon deletion of a knowledge base or data source resource. Note that the vector store itself is not deleted, only the underlying data. This flag is ignored if an Amazon Web Services account is deleted. RETAIN: Retains all underlying data in your vector store upon deletion of a knowledge base or data source resource. public let dataDeletionPolicy: DataDeletionPolicy? - /// Contains metadata about where the data source is stored. + /// The connection configuration for the data source. public let dataSourceConfiguration: DataSourceConfiguration /// A description of the data source. public let description: String? @@ -1232,13 +1767,14 @@ extension BedrockAgent { public func validate(name: String) throws { try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) - try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$") + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$") try self.dataSourceConfiguration.validate(name: "\(name).dataSourceConfiguration") try self.validate(self.description, name: "description", parent: name, max: 200) try self.validate(self.description, name: "description", parent: name, min: 1) try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") try self.serverSideEncryptionConfiguration?.validate(name: "\(name).serverSideEncryptionConfiguration") + try self.vectorIngestionConfiguration?.validate(name: "\(name).vectorIngestionConfiguration") } private enum CodingKeys: String, CodingKey { @@ -1265,43 +1801,53 @@ extension BedrockAgent { } } - public struct CreateKnowledgeBaseRequest: AWSEncodableShape { + public struct CreateFlowAliasRequest: AWSEncodableShape { /// A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. public let clientToken: String? - /// A description of the knowledge base. + /// A description for the alias. public let description: String? - /// Contains details about the embeddings model used for the knowledge base. - public let knowledgeBaseConfiguration: KnowledgeBaseConfiguration - /// A name for the knowledge base. + /// The unique identifier of the flow for which to create an alias. + public let flowIdentifier: String + /// A name for the alias. public let name: String - /// The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base. - public let roleArn: String - /// Contains details about the configuration of the vector database used for the knowledge base. - public let storageConfiguration: StorageConfiguration - /// Specify the key-value pairs for the tags that you want to attach to your knowledge base in this object. + /// Contains information about the version to which to map the alias. + public let routingConfiguration: [FlowAliasRoutingConfigurationListItem] + /// Any tags that you want to attach to the alias of the flow. For more information, see Tagging resources in Amazon Bedrock. public let tags: [String: String]? - public init(clientToken: String? = CreateKnowledgeBaseRequest.idempotencyToken(), description: String? = nil, knowledgeBaseConfiguration: KnowledgeBaseConfiguration, name: String, roleArn: String, storageConfiguration: StorageConfiguration, tags: [String: String]? = nil) { + public init(clientToken: String? = CreateFlowAliasRequest.idempotencyToken(), description: String? = nil, flowIdentifier: String, name: String, routingConfiguration: [FlowAliasRoutingConfigurationListItem], tags: [String: String]? = nil) { self.clientToken = clientToken self.description = description - self.knowledgeBaseConfiguration = knowledgeBaseConfiguration + self.flowIdentifier = flowIdentifier self.name = name - self.roleArn = roleArn - self.storageConfiguration = storageConfiguration + self.routingConfiguration = routingConfiguration self.tags = tags } + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + try container.encode(self.name, forKey: .name) + try container.encode(self.routingConfiguration, forKey: .routingConfiguration) + try container.encodeIfPresent(self.tags, forKey: .tags) + } + public func validate(name: String) throws { try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) - try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$") + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$") try self.validate(self.description, name: "description", parent: name, max: 200) try self.validate(self.description, name: "description", parent: name, min: 1) - try self.knowledgeBaseConfiguration.validate(name: "\(name).knowledgeBaseConfiguration") + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") - try self.validate(self.roleArn, name: "roleArn", parent: name, max: 2048) - try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/.+$") - try self.storageConfiguration.validate(name: "\(name).storageConfiguration") + try self.routingConfiguration.forEach { + try $0.validate(name: "\(name).routingConfiguration[]") + } + try self.validate(self.routingConfiguration, name: "routingConfiguration", parent: name, max: 1) + try self.validate(self.routingConfiguration, name: "routingConfiguration", parent: name, min: 1) try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -1314,285 +1860,1149 @@ extension BedrockAgent { private enum CodingKeys: String, CodingKey { case clientToken = "clientToken" case description = "description" - case knowledgeBaseConfiguration = "knowledgeBaseConfiguration" case name = "name" - case roleArn = "roleArn" - case storageConfiguration = "storageConfiguration" + case routingConfiguration = "routingConfiguration" case tags = "tags" } } - public struct CreateKnowledgeBaseResponse: AWSDecodableShape { - /// Contains details about the knowledge base. - public let knowledgeBase: KnowledgeBase + public struct CreateFlowAliasResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the alias. + public let arn: String + /// The time at which the alias was created. + @CustomCoding + public var createdAt: Date + /// The description of the alias. + public let description: String? + /// The unique identifier of the flow that the alias belongs to. + public let flowId: String + /// The unique identifier of the alias. + public let id: String + /// The name of the alias. + public let name: String + /// Contains information about the version that the alias is mapped to. + public let routingConfiguration: [FlowAliasRoutingConfigurationListItem] + /// The time at which the alias of the flow was last updated. + @CustomCoding + public var updatedAt: Date - public init(knowledgeBase: KnowledgeBase) { - self.knowledgeBase = knowledgeBase + public init(arn: String, createdAt: Date, description: String? = nil, flowId: String, id: String, name: String, routingConfiguration: [FlowAliasRoutingConfigurationListItem], updatedAt: Date) { + self.arn = arn + self.createdAt = createdAt + self.description = description + self.flowId = flowId + self.id = id + self.name = name + self.routingConfiguration = routingConfiguration + self.updatedAt = updatedAt } private enum CodingKeys: String, CodingKey { - case knowledgeBase = "knowledgeBase" + case arn = "arn" + case createdAt = "createdAt" + case description = "description" + case flowId = "flowId" + case id = "id" + case name = "name" + case routingConfiguration = "routingConfiguration" + case updatedAt = "updatedAt" } } - public struct DataSource: AWSDecodableShape { - /// The time at which the data source was created. + public struct CreateFlowRequest: AWSEncodableShape { + /// A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. + public let clientToken: String? + /// The Amazon Resource Name (ARN) of the KMS key to encrypt the flow. + public let customerEncryptionKeyArn: String? + /// A definition of the nodes and connections between nodes in the flow. + public let definition: FlowDefinition? + /// A description for the flow. + public let description: String? + /// The Amazon Resource Name (ARN) of the service role with permissions to create and manage a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide. + public let executionRoleArn: String + /// A name for the flow. + public let name: String + /// Any tags that you want to attach to the flow. For more information, see Tagging resources in Amazon Bedrock. + public let tags: [String: String]? + + public init(clientToken: String? = CreateFlowRequest.idempotencyToken(), customerEncryptionKeyArn: String? = nil, definition: FlowDefinition? = nil, description: String? = nil, executionRoleArn: String, name: String, tags: [String: String]? = nil) { + self.clientToken = clientToken + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.definition = definition + self.description = description + self.executionRoleArn = executionRoleArn + self.name = name + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$") + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, max: 2048) + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, min: 1) + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") + try self.definition?.validate(name: "\(name).definition") + try self.validate(self.description, name: "description", parent: name, max: 200) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, max: 2048) + try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, pattern: "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/(service-role/)?.+$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^[a-zA-Z0-9\\s._:/=+@-]*$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[a-zA-Z0-9\\s._:/=+@-]*$") + } + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case definition = "definition" + case description = "description" + case executionRoleArn = "executionRoleArn" + case name = "name" + case tags = "tags" + } + } + + public struct CreateFlowResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the flow. + public let arn: String + /// The time at which the flow was created. @CustomCoding public var createdAt: Date - /// The data deletion policy for a data source. - public let dataDeletionPolicy: DataDeletionPolicy? - /// Contains details about how the data source is stored. - public let dataSourceConfiguration: DataSourceConfiguration - /// The unique identifier of the data source. - public let dataSourceId: String - /// The description of the data source. + /// The Amazon Resource Name (ARN) of the KMS key that you encrypted the flow with. + public let customerEncryptionKeyArn: String? + /// A definition of the nodes and connections between nodes in the flow. + public let definition: FlowDefinition? + /// The description of the flow. public let description: String? - /// The detailed reasons on the failure to delete a data source. - public let failureReasons: [String]? - /// The unique identifier of the knowledge base to which the data source belongs. - public let knowledgeBaseId: String - /// The name of the data source. + /// The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide. + public let executionRoleArn: String + /// The unique identifier of the flow. + public let id: String + /// The name of the flow. public let name: String - /// Contains details about the configuration of the server-side encryption. - public let serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? - /// The status of the data source. The following statuses are possible: Available – The data source has been created and is ready for ingestion into the knowledge base. Deleting – The data source is being deleted. - public let status: DataSourceStatus - /// The time at which the data source was last updated. + /// The status of the flow. When you submit this request, the status will be NotPrepared. If creation fails, the status becomes Failed. + public let status: FlowStatus + /// The time at which the flow was last updated. @CustomCoding public var updatedAt: Date - /// Contains details about how to ingest the documents in the data source. - public let vectorIngestionConfiguration: VectorIngestionConfiguration? + /// The version of the flow. When you create a flow, the version created is the DRAFT version. + public let version: String - public init(createdAt: Date, dataDeletionPolicy: DataDeletionPolicy? = nil, dataSourceConfiguration: DataSourceConfiguration, dataSourceId: String, description: String? = nil, failureReasons: [String]? = nil, knowledgeBaseId: String, name: String, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, status: DataSourceStatus, updatedAt: Date, vectorIngestionConfiguration: VectorIngestionConfiguration? = nil) { + public init(arn: String, createdAt: Date, customerEncryptionKeyArn: String? = nil, definition: FlowDefinition? = nil, description: String? = nil, executionRoleArn: String, id: String, name: String, status: FlowStatus, updatedAt: Date, version: String) { + self.arn = arn self.createdAt = createdAt - self.dataDeletionPolicy = dataDeletionPolicy - self.dataSourceConfiguration = dataSourceConfiguration - self.dataSourceId = dataSourceId + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.definition = definition self.description = description - self.failureReasons = failureReasons - self.knowledgeBaseId = knowledgeBaseId + self.executionRoleArn = executionRoleArn + self.id = id self.name = name - self.serverSideEncryptionConfiguration = serverSideEncryptionConfiguration self.status = status self.updatedAt = updatedAt - self.vectorIngestionConfiguration = vectorIngestionConfiguration + self.version = version } private enum CodingKeys: String, CodingKey { + case arn = "arn" case createdAt = "createdAt" - case dataDeletionPolicy = "dataDeletionPolicy" - case dataSourceConfiguration = "dataSourceConfiguration" - case dataSourceId = "dataSourceId" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case definition = "definition" case description = "description" - case failureReasons = "failureReasons" - case knowledgeBaseId = "knowledgeBaseId" + case executionRoleArn = "executionRoleArn" + case id = "id" case name = "name" - case serverSideEncryptionConfiguration = "serverSideEncryptionConfiguration" case status = "status" case updatedAt = "updatedAt" - case vectorIngestionConfiguration = "vectorIngestionConfiguration" + case version = "version" } } - public struct DataSourceConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Contains details about the configuration of the S3 object containing the data source. - public let s3Configuration: S3DataSourceConfiguration? - /// The type of storage for the data source. - public let type: DataSourceType + public struct CreateFlowVersionRequest: AWSEncodableShape { + /// A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. + public let clientToken: String? + /// A description of the version of the flow. + public let description: String? + /// The unique identifier of the flow that you want to create a version of. + public let flowIdentifier: String - public init(s3Configuration: S3DataSourceConfiguration? = nil, type: DataSourceType) { - self.s3Configuration = s3Configuration - self.type = type + public init(clientToken: String? = CreateFlowVersionRequest.idempotencyToken(), description: String? = nil, flowIdentifier: String) { + self.clientToken = clientToken + self.description = description + self.flowIdentifier = flowIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.flowIdentifier, key: "flowIdentifier") } public func validate(name: String) throws { - try self.s3Configuration?.validate(name: "\(name).s3Configuration") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$") + try self.validate(self.description, name: "description", parent: name, max: 200) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") } private enum CodingKeys: String, CodingKey { - case s3Configuration = "s3Configuration" - case type = "type" + case clientToken = "clientToken" + case description = "description" } } - public struct DataSourceSummary: AWSDecodableShape { - /// The unique identifier of the data source. - public let dataSourceId: String - /// The description of the data source. + public struct CreateFlowVersionResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the flow. + public let arn: String + /// The time at which the flow was created. + @CustomCoding + public var createdAt: Date + /// The KMS key that the flow is encrypted with. + public let customerEncryptionKeyArn: String? + /// A definition of the nodes and connections in the flow. + public let definition: FlowDefinition? + /// The description of the flow version. public let description: String? - /// The unique identifier of the knowledge base to which the data source belongs. - public let knowledgeBaseId: String - /// The name of the data source. + /// The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide. + public let executionRoleArn: String + /// The unique identifier of the flow. + public let id: String + /// The name of the flow version. public let name: String - /// The status of the data source. - public let status: DataSourceStatus - /// The time at which the data source was last updated. - @CustomCoding - public var updatedAt: Date + /// The status of the flow. + public let status: FlowStatus + /// The version of the flow that was created. Versions are numbered incrementally, starting from 1. + public let version: String - public init(dataSourceId: String, description: String? = nil, knowledgeBaseId: String, name: String, status: DataSourceStatus, updatedAt: Date) { - self.dataSourceId = dataSourceId + public init(arn: String, createdAt: Date, customerEncryptionKeyArn: String? = nil, definition: FlowDefinition? = nil, description: String? = nil, executionRoleArn: String, id: String, name: String, status: FlowStatus, version: String) { + self.arn = arn + self.createdAt = createdAt + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.definition = definition self.description = description - self.knowledgeBaseId = knowledgeBaseId + self.executionRoleArn = executionRoleArn + self.id = id self.name = name self.status = status - self.updatedAt = updatedAt + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case definition = "definition" + case description = "description" + case executionRoleArn = "executionRoleArn" + case id = "id" + case name = "name" + case status = "status" + case version = "version" + } + } + + public struct CreateKnowledgeBaseRequest: AWSEncodableShape { + /// A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. + public let clientToken: String? + /// A description of the knowledge base. + public let description: String? + /// Contains details about the embeddings model used for the knowledge base. + public let knowledgeBaseConfiguration: KnowledgeBaseConfiguration + /// A name for the knowledge base. + public let name: String + /// The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base. + public let roleArn: String + /// Contains details about the configuration of the vector database used for the knowledge base. + public let storageConfiguration: StorageConfiguration + /// Specify the key-value pairs for the tags that you want to attach to your knowledge base in this object. + public let tags: [String: String]? + + public init(clientToken: String? = CreateKnowledgeBaseRequest.idempotencyToken(), description: String? = nil, knowledgeBaseConfiguration: KnowledgeBaseConfiguration, name: String, roleArn: String, storageConfiguration: StorageConfiguration, tags: [String: String]? = nil) { + self.clientToken = clientToken + self.description = description + self.knowledgeBaseConfiguration = knowledgeBaseConfiguration + self.name = name + self.roleArn = roleArn + self.storageConfiguration = storageConfiguration + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$") + try self.validate(self.description, name: "description", parent: name, max: 200) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.knowledgeBaseConfiguration.validate(name: "\(name).knowledgeBaseConfiguration") + try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") + try self.validate(self.roleArn, name: "roleArn", parent: name, max: 2048) + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/.+$") + try self.storageConfiguration.validate(name: "\(name).storageConfiguration") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^[a-zA-Z0-9\\s._:/=+@-]*$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[a-zA-Z0-9\\s._:/=+@-]*$") + } + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case description = "description" + case knowledgeBaseConfiguration = "knowledgeBaseConfiguration" + case name = "name" + case roleArn = "roleArn" + case storageConfiguration = "storageConfiguration" + case tags = "tags" + } + } + + public struct CreateKnowledgeBaseResponse: AWSDecodableShape { + /// Contains details about the knowledge base. + public let knowledgeBase: KnowledgeBase + + public init(knowledgeBase: KnowledgeBase) { + self.knowledgeBase = knowledgeBase + } + + private enum CodingKeys: String, CodingKey { + case knowledgeBase = "knowledgeBase" + } + } + + public struct CreatePromptRequest: AWSEncodableShape { + /// A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. + public let clientToken: String? + /// The Amazon Resource Name (ARN) of the KMS key to encrypt the prompt. + public let customerEncryptionKeyArn: String? + /// The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object. + public let defaultVariant: String? + /// A description for the prompt. + public let description: String? + /// A name for the prompt. + public let name: String + /// Any tags that you want to attach to the prompt. For more information, see Tagging resources in Amazon Bedrock. + public let tags: [String: String]? + /// A list of objects, each containing details about a variant of the prompt. + public let variants: [PromptVariant]? + + public init(clientToken: String? = CreatePromptRequest.idempotencyToken(), customerEncryptionKeyArn: String? = nil, defaultVariant: String? = nil, description: String? = nil, name: String, tags: [String: String]? = nil, variants: [PromptVariant]? = nil) { + self.clientToken = clientToken + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.defaultVariant = defaultVariant + self.description = description + self.name = name + self.tags = tags + self.variants = variants + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$") + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, max: 2048) + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, min: 1) + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") + try self.validate(self.defaultVariant, name: "defaultVariant", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") + try self.validate(self.description, name: "description", parent: name, max: 200) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^[a-zA-Z0-9\\s._:/=+@-]*$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[a-zA-Z0-9\\s._:/=+@-]*$") + } + try self.variants?.forEach { + try $0.validate(name: "\(name).variants[]") + } + try self.validate(self.variants, name: "variants", parent: name, max: 3) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case defaultVariant = "defaultVariant" + case description = "description" + case name = "name" + case tags = "tags" + case variants = "variants" + } + } + + public struct CreatePromptResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the prompt. + public let arn: String + /// The time at which the prompt was created. + @CustomCoding + public var createdAt: Date + /// The Amazon Resource Name (ARN) of the KMS key that you encrypted the prompt with. + public let customerEncryptionKeyArn: String? + /// The name of the default variant for your prompt. + public let defaultVariant: String? + /// The description of the prompt. + public let description: String? + /// The unique identifier of the prompt. + public let id: String + /// The name of the prompt. + public let name: String + /// The time at which the prompt was last updated. + @CustomCoding + public var updatedAt: Date + /// A list of objects, each containing details about a variant of the prompt. + public let variants: [PromptVariant]? + /// The version of the prompt. When you create a prompt, the version created is the DRAFT version. + public let version: String + + public init(arn: String, createdAt: Date, customerEncryptionKeyArn: String? = nil, defaultVariant: String? = nil, description: String? = nil, id: String, name: String, updatedAt: Date, variants: [PromptVariant]? = nil, version: String) { + self.arn = arn + self.createdAt = createdAt + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.defaultVariant = defaultVariant + self.description = description + self.id = id + self.name = name + self.updatedAt = updatedAt + self.variants = variants + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case defaultVariant = "defaultVariant" + case description = "description" + case id = "id" + case name = "name" + case updatedAt = "updatedAt" + case variants = "variants" + case version = "version" + } + } + + public struct CreatePromptVersionRequest: AWSEncodableShape { + /// A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. + public let clientToken: String? + /// A description for the version of the prompt. + public let description: String? + /// The unique identifier of the prompt that you want to create a version of. + public let promptIdentifier: String + /// Any tags that you want to attach to the version of the prompt. For more information, see Tagging resources in Amazon Bedrock. + public let tags: [String: String]? + + public init(clientToken: String? = CreatePromptVersionRequest.idempotencyToken(), description: String? = nil, promptIdentifier: String, tags: [String: String]? = nil) { + self.clientToken = clientToken + self.description = description + self.promptIdentifier = promptIdentifier + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.promptIdentifier, key: "promptIdentifier") + try container.encodeIfPresent(self.tags, forKey: .tags) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$") + try self.validate(self.description, name: "description", parent: name, max: 200) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.promptIdentifier, name: "promptIdentifier", parent: name, pattern: "^([0-9a-zA-Z]{10})|(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10})(?::[0-9]{1,5})?$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^[a-zA-Z0-9\\s._:/=+@-]*$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[a-zA-Z0-9\\s._:/=+@-]*$") + } + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case description = "description" + case tags = "tags" + } + } + + public struct CreatePromptVersionResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the version of the prompt. + public let arn: String + /// The time at which the prompt was created. + @CustomCoding + public var createdAt: Date + /// The Amazon Resource Name (ARN) of the KMS key to encrypt the version of the prompt. + public let customerEncryptionKeyArn: String? + /// The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object. + public let defaultVariant: String? + /// A description for the prompt version. + public let description: String? + /// The unique identifier of the prompt. + public let id: String + /// The name of the prompt version. + public let name: String + /// The time at which the prompt was last updated. + @CustomCoding + public var updatedAt: Date + /// A list of objects, each containing details about a variant of the prompt. + public let variants: [PromptVariant]? + /// The version of the prompt that was created. Versions are numbered incrementally, starting from 1. + public let version: String + + public init(arn: String, createdAt: Date, customerEncryptionKeyArn: String? = nil, defaultVariant: String? = nil, description: String? = nil, id: String, name: String, updatedAt: Date, variants: [PromptVariant]? = nil, version: String) { + self.arn = arn + self.createdAt = createdAt + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.defaultVariant = defaultVariant + self.description = description + self.id = id + self.name = name + self.updatedAt = updatedAt + self.variants = variants + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case defaultVariant = "defaultVariant" + case description = "description" + case id = "id" + case name = "name" + case updatedAt = "updatedAt" + case variants = "variants" + case version = "version" + } + } + + public struct CustomTransformationConfiguration: AWSEncodableShape & AWSDecodableShape { + /// An S3 bucket path for input and output objects. + public let intermediateStorage: IntermediateStorage + /// A Lambda function that processes documents. + public let transformations: [Transformation] + + public init(intermediateStorage: IntermediateStorage, transformations: [Transformation]) { + self.intermediateStorage = intermediateStorage + self.transformations = transformations + } + + public func validate(name: String) throws { + try self.intermediateStorage.validate(name: "\(name).intermediateStorage") + try self.transformations.forEach { + try $0.validate(name: "\(name).transformations[]") + } + try self.validate(self.transformations, name: "transformations", parent: name, max: 1) + try self.validate(self.transformations, name: "transformations", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case intermediateStorage = "intermediateStorage" + case transformations = "transformations" + } + } + + public struct DataSource: AWSDecodableShape { + /// The time at which the data source was created. + @CustomCoding + public var createdAt: Date + /// The data deletion policy for the data source. + public let dataDeletionPolicy: DataDeletionPolicy? + /// The connection configuration for the data source. + public let dataSourceConfiguration: DataSourceConfiguration + /// The unique identifier of the data source. + public let dataSourceId: String + /// The description of the data source. + public let description: String? + /// The detailed reasons on the failure to delete a data source. + public let failureReasons: [String]? + /// The unique identifier of the knowledge base to which the data source belongs. + public let knowledgeBaseId: String + /// The name of the data source. + public let name: String + /// Contains details about the configuration of the server-side encryption. + public let serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? + /// The status of the data source. The following statuses are possible: Available – The data source has been created and is ready for ingestion into the knowledge base. Deleting – The data source is being deleted. + public let status: DataSourceStatus + /// The time at which the data source was last updated. + @CustomCoding + public var updatedAt: Date + /// Contains details about how to ingest the documents in the data source. + public let vectorIngestionConfiguration: VectorIngestionConfiguration? + + public init(createdAt: Date, dataDeletionPolicy: DataDeletionPolicy? = nil, dataSourceConfiguration: DataSourceConfiguration, dataSourceId: String, description: String? = nil, failureReasons: [String]? = nil, knowledgeBaseId: String, name: String, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, status: DataSourceStatus, updatedAt: Date, vectorIngestionConfiguration: VectorIngestionConfiguration? = nil) { + self.createdAt = createdAt + self.dataDeletionPolicy = dataDeletionPolicy + self.dataSourceConfiguration = dataSourceConfiguration + self.dataSourceId = dataSourceId + self.description = description + self.failureReasons = failureReasons + self.knowledgeBaseId = knowledgeBaseId + self.name = name + self.serverSideEncryptionConfiguration = serverSideEncryptionConfiguration + self.status = status + self.updatedAt = updatedAt + self.vectorIngestionConfiguration = vectorIngestionConfiguration + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case dataDeletionPolicy = "dataDeletionPolicy" + case dataSourceConfiguration = "dataSourceConfiguration" + case dataSourceId = "dataSourceId" + case description = "description" + case failureReasons = "failureReasons" + case knowledgeBaseId = "knowledgeBaseId" + case name = "name" + case serverSideEncryptionConfiguration = "serverSideEncryptionConfiguration" + case status = "status" + case updatedAt = "updatedAt" + case vectorIngestionConfiguration = "vectorIngestionConfiguration" + } + } + + public struct DataSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration information to connect to Confluence as your data source. Confluence data source connector is in preview release and is subject to change. + public let confluenceConfiguration: ConfluenceDataSourceConfiguration? + /// The configuration information to connect to Amazon S3 as your data source. + public let s3Configuration: S3DataSourceConfiguration? + /// The configuration information to connect to Salesforce as your data source. Salesforce data source connector is in preview release and is subject to change. + public let salesforceConfiguration: SalesforceDataSourceConfiguration? + /// The configuration information to connect to SharePoint as your data source. SharePoint data source connector is in preview release and is subject to change. + public let sharePointConfiguration: SharePointDataSourceConfiguration? + /// The type of data source. + public let type: DataSourceType + /// The configuration of web URLs to crawl for your data source. You should be authorized to crawl the URLs. Crawling web URLs as your data source is in preview release and is subject to change. + public let webConfiguration: WebDataSourceConfiguration? + + public init(confluenceConfiguration: ConfluenceDataSourceConfiguration? = nil, s3Configuration: S3DataSourceConfiguration? = nil, salesforceConfiguration: SalesforceDataSourceConfiguration? = nil, sharePointConfiguration: SharePointDataSourceConfiguration? = nil, type: DataSourceType, webConfiguration: WebDataSourceConfiguration? = nil) { + self.confluenceConfiguration = confluenceConfiguration + self.s3Configuration = s3Configuration + self.salesforceConfiguration = salesforceConfiguration + self.sharePointConfiguration = sharePointConfiguration + self.type = type + self.webConfiguration = webConfiguration + } + + public func validate(name: String) throws { + try self.confluenceConfiguration?.validate(name: "\(name).confluenceConfiguration") + try self.s3Configuration?.validate(name: "\(name).s3Configuration") + try self.salesforceConfiguration?.validate(name: "\(name).salesforceConfiguration") + try self.sharePointConfiguration?.validate(name: "\(name).sharePointConfiguration") + try self.webConfiguration?.validate(name: "\(name).webConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case confluenceConfiguration = "confluenceConfiguration" + case s3Configuration = "s3Configuration" + case salesforceConfiguration = "salesforceConfiguration" + case sharePointConfiguration = "sharePointConfiguration" + case type = "type" + case webConfiguration = "webConfiguration" + } + } + + public struct DataSourceSummary: AWSDecodableShape { + /// The unique identifier of the data source. + public let dataSourceId: String + /// The description of the data source. + public let description: String? + /// The unique identifier of the knowledge base to which the data source belongs. + public let knowledgeBaseId: String + /// The name of the data source. + public let name: String + /// The status of the data source. + public let status: DataSourceStatus + /// The time at which the data source was last updated. + @CustomCoding + public var updatedAt: Date + + public init(dataSourceId: String, description: String? = nil, knowledgeBaseId: String, name: String, status: DataSourceStatus, updatedAt: Date) { + self.dataSourceId = dataSourceId + self.description = description + self.knowledgeBaseId = knowledgeBaseId + self.name = name + self.status = status + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case dataSourceId = "dataSourceId" + case description = "description" + case knowledgeBaseId = "knowledgeBaseId" + case name = "name" + case status = "status" + case updatedAt = "updatedAt" + } + } + + public struct DeleteAgentActionGroupRequest: AWSEncodableShape { + /// The unique identifier of the action group to delete. + public let actionGroupId: String + /// The unique identifier of the agent that the action group belongs to. + public let agentId: String + /// The version of the agent that the action group belongs to. + public let agentVersion: String + /// By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use. + public let skipResourceInUseCheck: Bool? + + public init(actionGroupId: String, agentId: String, agentVersion: String, skipResourceInUseCheck: Bool? = nil) { + self.actionGroupId = actionGroupId + self.agentId = agentId + self.agentVersion = agentVersion + self.skipResourceInUseCheck = skipResourceInUseCheck + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.actionGroupId, key: "actionGroupId") + request.encodePath(self.agentId, key: "agentId") + request.encodePath(self.agentVersion, key: "agentVersion") + request.encodeQuery(self.skipResourceInUseCheck, key: "skipResourceInUseCheck") + } + + public func validate(name: String) throws { + try self.validate(self.actionGroupId, name: "actionGroupId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.agentVersion, name: "agentVersion", parent: name, max: 5) + try self.validate(self.agentVersion, name: "agentVersion", parent: name, min: 5) + try self.validate(self.agentVersion, name: "agentVersion", parent: name, pattern: "^DRAFT$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteAgentActionGroupResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteAgentAliasRequest: AWSEncodableShape { + /// The unique identifier of the alias to delete. + public let agentAliasId: String + /// The unique identifier of the agent that the alias belongs to. + public let agentId: String + + public init(agentAliasId: String, agentId: String) { + self.agentAliasId = agentAliasId + self.agentId = agentId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.agentAliasId, key: "agentAliasId") + request.encodePath(self.agentId, key: "agentId") + } + + public func validate(name: String) throws { + try self.validate(self.agentAliasId, name: "agentAliasId", parent: name, max: 10) + try self.validate(self.agentAliasId, name: "agentAliasId", parent: name, min: 10) + try self.validate(self.agentAliasId, name: "agentAliasId", parent: name, pattern: "^(\\bTSTALIASID\\b|[0-9a-zA-Z]+)$") + try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteAgentAliasResponse: AWSDecodableShape { + /// The unique identifier of the alias that was deleted. + public let agentAliasId: String + /// The status of the alias. + public let agentAliasStatus: AgentAliasStatus + /// The unique identifier of the agent that the alias belongs to. + public let agentId: String + + public init(agentAliasId: String, agentAliasStatus: AgentAliasStatus, agentId: String) { + self.agentAliasId = agentAliasId + self.agentAliasStatus = agentAliasStatus + self.agentId = agentId + } + + private enum CodingKeys: String, CodingKey { + case agentAliasId = "agentAliasId" + case agentAliasStatus = "agentAliasStatus" + case agentId = "agentId" + } + } + + public struct DeleteAgentRequest: AWSEncodableShape { + /// The unique identifier of the agent to delete. + public let agentId: String + /// By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use. + public let skipResourceInUseCheck: Bool? + + public init(agentId: String, skipResourceInUseCheck: Bool? = nil) { + self.agentId = agentId + self.skipResourceInUseCheck = skipResourceInUseCheck + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.agentId, key: "agentId") + request.encodeQuery(self.skipResourceInUseCheck, key: "skipResourceInUseCheck") + } + + public func validate(name: String) throws { + try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteAgentResponse: AWSDecodableShape { + /// The unique identifier of the agent that was deleted. + public let agentId: String + /// The status of the agent. + public let agentStatus: AgentStatus + + public init(agentId: String, agentStatus: AgentStatus) { + self.agentId = agentId + self.agentStatus = agentStatus + } + + private enum CodingKeys: String, CodingKey { + case agentId = "agentId" + case agentStatus = "agentStatus" + } + } + + public struct DeleteAgentVersionRequest: AWSEncodableShape { + /// The unique identifier of the agent that the version belongs to. + public let agentId: String + /// The version of the agent to delete. + public let agentVersion: String + /// By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use. + public let skipResourceInUseCheck: Bool? + + public init(agentId: String, agentVersion: String, skipResourceInUseCheck: Bool? = nil) { + self.agentId = agentId + self.agentVersion = agentVersion + self.skipResourceInUseCheck = skipResourceInUseCheck + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.agentId, key: "agentId") + request.encodePath(self.agentVersion, key: "agentVersion") + request.encodeQuery(self.skipResourceInUseCheck, key: "skipResourceInUseCheck") + } + + public func validate(name: String) throws { + try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.agentVersion, name: "agentVersion", parent: name, pattern: "^[0-9]{1,5}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteAgentVersionResponse: AWSDecodableShape { + /// The unique identifier of the agent that the version belongs to. + public let agentId: String + /// The status of the agent version. + public let agentStatus: AgentStatus + /// The version that was deleted. + public let agentVersion: String + + public init(agentId: String, agentStatus: AgentStatus, agentVersion: String) { + self.agentId = agentId + self.agentStatus = agentStatus + self.agentVersion = agentVersion + } + + private enum CodingKeys: String, CodingKey { + case agentId = "agentId" + case agentStatus = "agentStatus" + case agentVersion = "agentVersion" + } + } + + public struct DeleteDataSourceRequest: AWSEncodableShape { + /// The unique identifier of the data source to delete. + public let dataSourceId: String + /// The unique identifier of the knowledge base from which to delete the data source. + public let knowledgeBaseId: String + + public init(dataSourceId: String, knowledgeBaseId: String) { + self.dataSourceId = dataSourceId + self.knowledgeBaseId = knowledgeBaseId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.dataSourceId, key: "dataSourceId") + request.encodePath(self.knowledgeBaseId, key: "knowledgeBaseId") + } + + public func validate(name: String) throws { + try self.validate(self.dataSourceId, name: "dataSourceId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteDataSourceResponse: AWSDecodableShape { + /// The unique identifier of the data source that was deleted. + public let dataSourceId: String + /// The unique identifier of the knowledge base to which the data source that was deleted belonged. + public let knowledgeBaseId: String + /// The status of the data source. + public let status: DataSourceStatus + + public init(dataSourceId: String, knowledgeBaseId: String, status: DataSourceStatus) { + self.dataSourceId = dataSourceId + self.knowledgeBaseId = knowledgeBaseId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case dataSourceId = "dataSourceId" + case knowledgeBaseId = "knowledgeBaseId" + case status = "status" + } + } + + public struct DeleteFlowAliasRequest: AWSEncodableShape { + /// The unique identifier of the alias to be deleted. + public let aliasIdentifier: String + /// The unique identifier of the flow that the alias belongs to. + public let flowIdentifier: String + + public init(aliasIdentifier: String, flowIdentifier: String) { + self.aliasIdentifier = aliasIdentifier + self.flowIdentifier = flowIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aliasIdentifier, key: "aliasIdentifier") + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.aliasIdentifier, name: "aliasIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/[0-9a-zA-Z]{10})|(TSTALIASID|[0-9a-zA-Z]{10})$") + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteFlowAliasResponse: AWSDecodableShape { + /// The unique identifier of the flow that the alias belongs to. + public let flowId: String + /// The unique identifier of the flow. + public let id: String + + public init(flowId: String, id: String) { + self.flowId = flowId + self.id = id + } + + private enum CodingKeys: String, CodingKey { + case flowId = "flowId" + case id = "id" + } + } + + public struct DeleteFlowRequest: AWSEncodableShape { + /// The unique identifier of the flow. + public let flowIdentifier: String + /// By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use. + public let skipResourceInUseCheck: Bool? + + public init(flowIdentifier: String, skipResourceInUseCheck: Bool? = nil) { + self.flowIdentifier = flowIdentifier + self.skipResourceInUseCheck = skipResourceInUseCheck + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + request.encodeQuery(self.skipResourceInUseCheck, key: "skipResourceInUseCheck") + } + + public func validate(name: String) throws { + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteFlowResponse: AWSDecodableShape { + /// The unique identifier of the flow. + public let id: String + + public init(id: String) { + self.id = id } private enum CodingKeys: String, CodingKey { - case dataSourceId = "dataSourceId" - case description = "description" - case knowledgeBaseId = "knowledgeBaseId" - case name = "name" - case status = "status" - case updatedAt = "updatedAt" + case id = "id" } } - public struct DeleteAgentActionGroupRequest: AWSEncodableShape { - /// The unique identifier of the action group to delete. - public let actionGroupId: String - /// The unique identifier of the agent that the action group belongs to. - public let agentId: String - /// The version of the agent that the action group belongs to. - public let agentVersion: String + public struct DeleteFlowVersionRequest: AWSEncodableShape { + /// The unique identifier of the flow whose version that you want to delete + public let flowIdentifier: String + /// The version of the flow that you want to delete. + public let flowVersion: String /// By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use. public let skipResourceInUseCheck: Bool? - public init(actionGroupId: String, agentId: String, agentVersion: String, skipResourceInUseCheck: Bool? = nil) { - self.actionGroupId = actionGroupId - self.agentId = agentId - self.agentVersion = agentVersion + public init(flowIdentifier: String, flowVersion: String, skipResourceInUseCheck: Bool? = nil) { + self.flowIdentifier = flowIdentifier + self.flowVersion = flowVersion self.skipResourceInUseCheck = skipResourceInUseCheck } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.actionGroupId, key: "actionGroupId") - request.encodePath(self.agentId, key: "agentId") - request.encodePath(self.agentVersion, key: "agentVersion") + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + request.encodePath(self.flowVersion, key: "flowVersion") request.encodeQuery(self.skipResourceInUseCheck, key: "skipResourceInUseCheck") } public func validate(name: String) throws { - try self.validate(self.actionGroupId, name: "actionGroupId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") - try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") - try self.validate(self.agentVersion, name: "agentVersion", parent: name, max: 5) - try self.validate(self.agentVersion, name: "agentVersion", parent: name, min: 5) - try self.validate(self.agentVersion, name: "agentVersion", parent: name, pattern: "^DRAFT$") + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") + try self.validate(self.flowVersion, name: "flowVersion", parent: name, pattern: "^[0-9]{1,5}$") } private enum CodingKeys: CodingKey {} } - public struct DeleteAgentActionGroupResponse: AWSDecodableShape { - public init() {} + public struct DeleteFlowVersionResponse: AWSDecodableShape { + /// The unique identifier of the flow. + public let id: String + /// The version of the flow being deleted. + public let version: String + + public init(id: String, version: String) { + self.id = id + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case id = "id" + case version = "version" + } } - public struct DeleteAgentAliasRequest: AWSEncodableShape { - /// The unique identifier of the alias to delete. - public let agentAliasId: String - /// The unique identifier of the agent that the alias belongs to. - public let agentId: String + public struct DeleteKnowledgeBaseRequest: AWSEncodableShape { + /// The unique identifier of the knowledge base to delete. + public let knowledgeBaseId: String - public init(agentAliasId: String, agentId: String) { - self.agentAliasId = agentAliasId - self.agentId = agentId + public init(knowledgeBaseId: String) { + self.knowledgeBaseId = knowledgeBaseId } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.agentAliasId, key: "agentAliasId") - request.encodePath(self.agentId, key: "agentId") + request.encodePath(self.knowledgeBaseId, key: "knowledgeBaseId") } public func validate(name: String) throws { - try self.validate(self.agentAliasId, name: "agentAliasId", parent: name, max: 10) - try self.validate(self.agentAliasId, name: "agentAliasId", parent: name, min: 10) - try self.validate(self.agentAliasId, name: "agentAliasId", parent: name, pattern: "^(\\bTSTALIASID\\b|[0-9a-zA-Z]+)$") - try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") } private enum CodingKeys: CodingKey {} } - public struct DeleteAgentAliasResponse: AWSDecodableShape { - /// The unique identifier of the alias that was deleted. - public let agentAliasId: String - /// The status of the alias. - public let agentAliasStatus: AgentAliasStatus - /// The unique identifier of the agent that the alias belongs to. - public let agentId: String + public struct DeleteKnowledgeBaseResponse: AWSDecodableShape { + /// The unique identifier of the knowledge base that was deleted. + public let knowledgeBaseId: String + /// The status of the knowledge base and whether it has been successfully deleted. + public let status: KnowledgeBaseStatus - public init(agentAliasId: String, agentAliasStatus: AgentAliasStatus, agentId: String) { - self.agentAliasId = agentAliasId - self.agentAliasStatus = agentAliasStatus - self.agentId = agentId + public init(knowledgeBaseId: String, status: KnowledgeBaseStatus) { + self.knowledgeBaseId = knowledgeBaseId + self.status = status } private enum CodingKeys: String, CodingKey { - case agentAliasId = "agentAliasId" - case agentAliasStatus = "agentAliasStatus" - case agentId = "agentId" + case knowledgeBaseId = "knowledgeBaseId" + case status = "status" } } - public struct DeleteAgentRequest: AWSEncodableShape { - /// The unique identifier of the agent to delete. - public let agentId: String - /// By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use. - public let skipResourceInUseCheck: Bool? + public struct DeletePromptRequest: AWSEncodableShape { + /// The unique identifier of the prompt. + public let promptIdentifier: String + /// The version of the prompt to delete. + public let promptVersion: String? - public init(agentId: String, skipResourceInUseCheck: Bool? = nil) { - self.agentId = agentId - self.skipResourceInUseCheck = skipResourceInUseCheck + public init(promptIdentifier: String, promptVersion: String? = nil) { + self.promptIdentifier = promptIdentifier + self.promptVersion = promptVersion } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.agentId, key: "agentId") - request.encodeQuery(self.skipResourceInUseCheck, key: "skipResourceInUseCheck") + request.encodePath(self.promptIdentifier, key: "promptIdentifier") + request.encodeQuery(self.promptVersion, key: "promptVersion") } public func validate(name: String) throws { - try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.promptIdentifier, name: "promptIdentifier", parent: name, pattern: "^([0-9a-zA-Z]{10})|(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10})(?::[0-9]{1,5})?$") + try self.validate(self.promptVersion, name: "promptVersion", parent: name, pattern: "^[0-9]{1,5}$") } private enum CodingKeys: CodingKey {} } - public struct DeleteAgentResponse: AWSDecodableShape { - /// The unique identifier of the agent that was deleted. - public let agentId: String - /// The status of the agent. - public let agentStatus: AgentStatus + public struct DeletePromptResponse: AWSDecodableShape { + /// The unique identifier of the prompt that was deleted. + public let id: String + /// The version of the prompt that was deleted. + public let version: String? - public init(agentId: String, agentStatus: AgentStatus) { - self.agentId = agentId - self.agentStatus = agentStatus + public init(id: String, version: String? = nil) { + self.id = id + self.version = version } private enum CodingKeys: String, CodingKey { - case agentId = "agentId" - case agentStatus = "agentStatus" + case id = "id" + case version = "version" } } - public struct DeleteAgentVersionRequest: AWSEncodableShape { - /// The unique identifier of the agent that the version belongs to. + public struct DisassociateAgentKnowledgeBaseRequest: AWSEncodableShape { + /// The unique identifier of the agent from which to disassociate the knowledge base. public let agentId: String - /// The version of the agent to delete. + /// The version of the agent from which to disassociate the knowledge base. public let agentVersion: String - /// By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use. - public let skipResourceInUseCheck: Bool? + /// The unique identifier of the knowledge base to disassociate. + public let knowledgeBaseId: String - public init(agentId: String, agentVersion: String, skipResourceInUseCheck: Bool? = nil) { + public init(agentId: String, agentVersion: String, knowledgeBaseId: String) { self.agentId = agentId self.agentVersion = agentVersion - self.skipResourceInUseCheck = skipResourceInUseCheck + self.knowledgeBaseId = knowledgeBaseId } public func encode(to encoder: Encoder) throws { @@ -1600,191 +3010,423 @@ extension BedrockAgent { _ = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.agentId, key: "agentId") request.encodePath(self.agentVersion, key: "agentVersion") - request.encodeQuery(self.skipResourceInUseCheck, key: "skipResourceInUseCheck") + request.encodePath(self.knowledgeBaseId, key: "knowledgeBaseId") } public func validate(name: String) throws { try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") - try self.validate(self.agentVersion, name: "agentVersion", parent: name, pattern: "^[0-9]{1,5}$") + try self.validate(self.agentVersion, name: "agentVersion", parent: name, max: 5) + try self.validate(self.agentVersion, name: "agentVersion", parent: name, min: 5) + try self.validate(self.agentVersion, name: "agentVersion", parent: name, pattern: "^DRAFT$") + try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") } private enum CodingKeys: CodingKey {} } - public struct DeleteAgentVersionResponse: AWSDecodableShape { - /// The unique identifier of the agent that the version belongs to. - public let agentId: String - /// The status of the agent version. - public let agentStatus: AgentStatus - /// The version that was deleted. - public let agentVersion: String + public struct DisassociateAgentKnowledgeBaseResponse: AWSDecodableShape { + public init() {} + } - public init(agentId: String, agentStatus: AgentStatus, agentVersion: String) { - self.agentId = agentId - self.agentStatus = agentStatus - self.agentVersion = agentVersion + public struct EmbeddingModelConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The vector configuration details on the Bedrock embeddings model. + public let bedrockEmbeddingModelConfiguration: BedrockEmbeddingModelConfiguration? + + public init(bedrockEmbeddingModelConfiguration: BedrockEmbeddingModelConfiguration? = nil) { + self.bedrockEmbeddingModelConfiguration = bedrockEmbeddingModelConfiguration + } + + public func validate(name: String) throws { + try self.bedrockEmbeddingModelConfiguration?.validate(name: "\(name).bedrockEmbeddingModelConfiguration") } private enum CodingKeys: String, CodingKey { - case agentId = "agentId" - case agentStatus = "agentStatus" - case agentVersion = "agentVersion" + case bedrockEmbeddingModelConfiguration = "bedrockEmbeddingModelConfiguration" } } - public struct DeleteDataSourceRequest: AWSEncodableShape { - /// The unique identifier of the data source to delete. - public let dataSourceId: String - /// The unique identifier of the knowledge base from which to delete the data source. - public let knowledgeBaseId: String + public struct FixedSizeChunkingConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The maximum number of tokens to include in a chunk. + public let maxTokens: Int + /// The percentage of overlap between adjacent chunks of a data source. + public let overlapPercentage: Int - public init(dataSourceId: String, knowledgeBaseId: String) { - self.dataSourceId = dataSourceId - self.knowledgeBaseId = knowledgeBaseId + public init(maxTokens: Int, overlapPercentage: Int) { + self.maxTokens = maxTokens + self.overlapPercentage = overlapPercentage } - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.dataSourceId, key: "dataSourceId") - request.encodePath(self.knowledgeBaseId, key: "knowledgeBaseId") + private enum CodingKeys: String, CodingKey { + case maxTokens = "maxTokens" + case overlapPercentage = "overlapPercentage" + } + } + + public struct FlowAliasRoutingConfigurationListItem: AWSEncodableShape & AWSDecodableShape { + /// The version that the alias maps to. + public let flowVersion: String? + + public init(flowVersion: String? = nil) { + self.flowVersion = flowVersion } public func validate(name: String) throws { - try self.validate(self.dataSourceId, name: "dataSourceId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") - try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.flowVersion, name: "flowVersion", parent: name, max: 5) + try self.validate(self.flowVersion, name: "flowVersion", parent: name, min: 1) + try self.validate(self.flowVersion, name: "flowVersion", parent: name, pattern: "^(DRAFT|[0-9]{0,4}[1-9][0-9]{0,4})$") } - private enum CodingKeys: CodingKey {} + private enum CodingKeys: String, CodingKey { + case flowVersion = "flowVersion" + } } - public struct DeleteDataSourceResponse: AWSDecodableShape { - /// The unique identifier of the data source that was deleted. - public let dataSourceId: String - /// The unique identifier of the knowledge base to which the data source that was deleted belonged. - public let knowledgeBaseId: String - /// The status of the data source. - public let status: DataSourceStatus + public struct FlowAliasSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the flow alias. + public let arn: String + /// The time at which the alias was created. + @CustomCoding + public var createdAt: Date + /// A description of the alias. + public let description: String? + /// The unique identifier of the flow. + public let flowId: String + /// The unique identifier of the alias of the flow. + public let id: String + /// The name of the alias. + public let name: String + /// A list of configurations about the versions that the alias maps to. Currently, you can only specify one. + public let routingConfiguration: [FlowAliasRoutingConfigurationListItem] + /// The time at which the alias was last updated. + @CustomCoding + public var updatedAt: Date + + public init(arn: String, createdAt: Date, description: String? = nil, flowId: String, id: String, name: String, routingConfiguration: [FlowAliasRoutingConfigurationListItem], updatedAt: Date) { + self.arn = arn + self.createdAt = createdAt + self.description = description + self.flowId = flowId + self.id = id + self.name = name + self.routingConfiguration = routingConfiguration + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case description = "description" + case flowId = "flowId" + case id = "id" + case name = "name" + case routingConfiguration = "routingConfiguration" + case updatedAt = "updatedAt" + } + } + + public struct FlowCondition: AWSEncodableShape & AWSDecodableShape { + /// Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in Node types in prompt flows. + public let expression: String? + /// A name for the condition that you can reference. + public let name: String + + public init(expression: String? = nil, name: String) { + self.expression = expression + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.expression, name: "expression", parent: name, max: 64) + try self.validate(self.expression, name: "expression", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$") + } + + private enum CodingKeys: String, CodingKey { + case expression = "expression" + case name = "name" + } + } + + public struct FlowConditionalConnectionConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The condition that triggers this connection. For more information about how to write conditions, see the Condition node type in the Node types topic in the Amazon Bedrock User Guide. + public let condition: String + + public init(condition: String) { + self.condition = condition + } + + public func validate(name: String) throws { + try self.validate(self.condition, name: "condition", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$") + } + + private enum CodingKeys: String, CodingKey { + case condition = "condition" + } + } + + public struct FlowConnection: AWSEncodableShape & AWSDecodableShape { + /// The configuration of the connection. + public let configuration: FlowConnectionConfiguration? + /// A name for the connection that you can reference. + public let name: String + /// The node that the connection starts at. + public let source: String + /// The node that the connection ends at. + public let target: String + /// Whether the source node that the connection begins from is a condition node (Conditional) or not (Data). + public let type: FlowConnectionType + + public init(configuration: FlowConnectionConfiguration? = nil, name: String, source: String, target: String, type: FlowConnectionType) { + self.configuration = configuration + self.name = name + self.source = source + self.target = target + self.type = type + } + + public func validate(name: String) throws { + try self.configuration?.validate(name: "\(name).configuration") + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,100}$") + try self.validate(self.source, name: "source", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$") + try self.validate(self.target, name: "target", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$") + } + + private enum CodingKeys: String, CodingKey { + case configuration = "configuration" + case name = "name" + case source = "source" + case target = "target" + case type = "type" + } + } + + public struct FlowDataConnectionConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The name of the output in the source node that the connection begins from. + public let sourceOutput: String + /// The name of the input in the target node that the connection ends at. + public let targetInput: String + + public init(sourceOutput: String, targetInput: String) { + self.sourceOutput = sourceOutput + self.targetInput = targetInput + } - public init(dataSourceId: String, knowledgeBaseId: String, status: DataSourceStatus) { - self.dataSourceId = dataSourceId - self.knowledgeBaseId = knowledgeBaseId - self.status = status + public func validate(name: String) throws { + try self.validate(self.sourceOutput, name: "sourceOutput", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$") + try self.validate(self.targetInput, name: "targetInput", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$") } private enum CodingKeys: String, CodingKey { - case dataSourceId = "dataSourceId" - case knowledgeBaseId = "knowledgeBaseId" - case status = "status" + case sourceOutput = "sourceOutput" + case targetInput = "targetInput" } } - public struct DeleteKnowledgeBaseRequest: AWSEncodableShape { - /// The unique identifier of the knowledge base to delete. - public let knowledgeBaseId: String - - public init(knowledgeBaseId: String) { - self.knowledgeBaseId = knowledgeBaseId - } + public struct FlowDefinition: AWSEncodableShape & AWSDecodableShape { + /// An array of connection definitions in the flow. + public let connections: [FlowConnection]? + /// An array of node definitions in the flow. + public let nodes: [FlowNode]? - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.knowledgeBaseId, key: "knowledgeBaseId") + public init(connections: [FlowConnection]? = nil, nodes: [FlowNode]? = nil) { + self.connections = connections + self.nodes = nodes } public func validate(name: String) throws { - try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.connections?.forEach { + try $0.validate(name: "\(name).connections[]") + } + try self.validate(self.connections, name: "connections", parent: name, max: 20) + try self.nodes?.forEach { + try $0.validate(name: "\(name).nodes[]") + } + try self.validate(self.nodes, name: "nodes", parent: name, max: 20) } - private enum CodingKeys: CodingKey {} + private enum CodingKeys: String, CodingKey { + case connections = "connections" + case nodes = "nodes" + } } - public struct DeleteKnowledgeBaseResponse: AWSDecodableShape { - /// The unique identifier of the knowledge base that was deleted. - public let knowledgeBaseId: String - /// The status of the knowledge base and whether it has been successfully deleted. - public let status: KnowledgeBaseStatus + public struct FlowNode: AWSEncodableShape & AWSDecodableShape { + /// Contains configurations for the node. + public let configuration: FlowNodeConfiguration? + /// An array of objects, each of which contains information about an input into the node. + public let inputs: [FlowNodeInput]? + /// A name for the node. + public let name: String + /// A list of objects, each of which contains information about an output from the node. + public let outputs: [FlowNodeOutput]? + /// The type of node. This value must match the name of the key that you provide in the configuration you provide in the FlowNodeConfiguration field. + public let type: FlowNodeType + + public init(configuration: FlowNodeConfiguration? = nil, inputs: [FlowNodeInput]? = nil, name: String, outputs: [FlowNodeOutput]? = nil, type: FlowNodeType) { + self.configuration = configuration + self.inputs = inputs + self.name = name + self.outputs = outputs + self.type = type + } - public init(knowledgeBaseId: String, status: KnowledgeBaseStatus) { - self.knowledgeBaseId = knowledgeBaseId - self.status = status + public func validate(name: String) throws { + try self.configuration?.validate(name: "\(name).configuration") + try self.inputs?.forEach { + try $0.validate(name: "\(name).inputs[]") + } + try self.validate(self.inputs, name: "inputs", parent: name, max: 5) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$") + try self.outputs?.forEach { + try $0.validate(name: "\(name).outputs[]") + } + try self.validate(self.outputs, name: "outputs", parent: name, max: 5) } private enum CodingKeys: String, CodingKey { - case knowledgeBaseId = "knowledgeBaseId" - case status = "status" + case configuration = "configuration" + case inputs = "inputs" + case name = "name" + case outputs = "outputs" + case type = "type" } } - public struct DisassociateAgentKnowledgeBaseRequest: AWSEncodableShape { - /// The unique identifier of the agent from which to disassociate the knowledge base. - public let agentId: String - /// The version of the agent from which to disassociate the knowledge base. - public let agentVersion: String - /// The unique identifier of the knowledge base to disassociate. - public let knowledgeBaseId: String + public struct FlowNodeInput: AWSEncodableShape & AWSDecodableShape { + /// An expression that formats the input for the node. For an explanation of how to create expressions, see Expressions in Prompt flows in Amazon Bedrock. + public let expression: String + /// A name for the input that you can reference. + public let name: String + /// The data type of the input. If the input doesn't match this type at runtime, a validation error will be thrown. + public let type: FlowNodeIODataType - public init(agentId: String, agentVersion: String, knowledgeBaseId: String) { - self.agentId = agentId - self.agentVersion = agentVersion - self.knowledgeBaseId = knowledgeBaseId + public init(expression: String, name: String, type: FlowNodeIODataType) { + self.expression = expression + self.name = name + self.type = type } - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.agentId, key: "agentId") - request.encodePath(self.agentVersion, key: "agentVersion") - request.encodePath(self.knowledgeBaseId, key: "knowledgeBaseId") + public func validate(name: String) throws { + try self.validate(self.expression, name: "expression", parent: name, max: 64) + try self.validate(self.expression, name: "expression", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$") + } + + private enum CodingKeys: String, CodingKey { + case expression = "expression" + case name = "name" + case type = "type" + } + } + + public struct FlowNodeOutput: AWSEncodableShape & AWSDecodableShape { + /// A name for the output that you can reference. + public let name: String + /// The data type of the output. If the output doesn't match this type at runtime, a validation error will be thrown. + public let type: FlowNodeIODataType + + public init(name: String, type: FlowNodeIODataType) { + self.name = name + self.type = type } public func validate(name: String) throws { - try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") - try self.validate(self.agentVersion, name: "agentVersion", parent: name, max: 5) - try self.validate(self.agentVersion, name: "agentVersion", parent: name, min: 5) - try self.validate(self.agentVersion, name: "agentVersion", parent: name, pattern: "^DRAFT$") - try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$") } - private enum CodingKeys: CodingKey {} + private enum CodingKeys: String, CodingKey { + case name = "name" + case type = "type" + } } - public struct DisassociateAgentKnowledgeBaseResponse: AWSDecodableShape { - public init() {} - } + public struct FlowSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the flow. + public let arn: String + /// The time at which the flow was created. + @CustomCoding + public var createdAt: Date + /// A description of the flow. + public let description: String? + /// The unique identifier of the flow. + public let id: String + /// The name of the flow. + public let name: String + /// The status of the flow. The following statuses are possible: NotPrepared – The flow has been created or updated, but hasn't been prepared. If you just created the flow, you can't test it. If you updated the flow, the DRAFT version won't contain the latest changes for testing. Send a PrepareFlow request to package the latest changes into the DRAFT version. Preparing – The flow is being prepared so that the DRAFT version contains the latest changes for testing. Prepared – The flow is prepared and the DRAFT version contains the latest changes for testing. Failed – The last API operation that you invoked on the flow failed. Send a GetFlow request and check the error message in the validations field. + public let status: FlowStatus + /// The time at which the flow was last updated. + @CustomCoding + public var updatedAt: Date + /// The latest version of the flow. + public let version: String - public struct EmbeddingModelConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The vector configuration details on the Bedrock embeddings model. - public let bedrockEmbeddingModelConfiguration: BedrockEmbeddingModelConfiguration? + public init(arn: String, createdAt: Date, description: String? = nil, id: String, name: String, status: FlowStatus, updatedAt: Date, version: String) { + self.arn = arn + self.createdAt = createdAt + self.description = description + self.id = id + self.name = name + self.status = status + self.updatedAt = updatedAt + self.version = version + } - public init(bedrockEmbeddingModelConfiguration: BedrockEmbeddingModelConfiguration? = nil) { - self.bedrockEmbeddingModelConfiguration = bedrockEmbeddingModelConfiguration + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case description = "description" + case id = "id" + case name = "name" + case status = "status" + case updatedAt = "updatedAt" + case version = "version" } + } - public func validate(name: String) throws { - try self.bedrockEmbeddingModelConfiguration?.validate(name: "\(name).bedrockEmbeddingModelConfiguration") + public struct FlowValidation: AWSDecodableShape { + /// A message describing the validation error. + public let message: String + /// The severity of the issue described in the message. + public let severity: FlowValidationSeverity + + public init(message: String, severity: FlowValidationSeverity) { + self.message = message + self.severity = severity } private enum CodingKeys: String, CodingKey { - case bedrockEmbeddingModelConfiguration = "bedrockEmbeddingModelConfiguration" + case message = "message" + case severity = "severity" } } - public struct FixedSizeChunkingConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The maximum number of tokens to include in a chunk. - public let maxTokens: Int - /// The percentage of overlap between adjacent chunks of a data source. - public let overlapPercentage: Int + public struct FlowVersionSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the flow that the version belongs to. + public let arn: String + /// The time at the flow version was created. + @CustomCoding + public var createdAt: Date + /// The unique identifier of the flow. + public let id: String + /// The status of the flow. + public let status: FlowStatus + /// The version of the flow. + public let version: String - public init(maxTokens: Int, overlapPercentage: Int) { - self.maxTokens = maxTokens - self.overlapPercentage = overlapPercentage + public init(arn: String, createdAt: Date, id: String, status: FlowStatus, version: String) { + self.arn = arn + self.createdAt = createdAt + self.id = id + self.status = status + self.version = version } private enum CodingKeys: String, CodingKey { - case maxTokens = "maxTokens" - case overlapPercentage = "overlapPercentage" + case arn = "arn" + case createdAt = "createdAt" + case id = "id" + case status = "status" + case version = "version" } } @@ -2064,6 +3706,231 @@ extension BedrockAgent { } } + public struct GetFlowAliasRequest: AWSEncodableShape { + /// The unique identifier of the alias for which to retrieve information. + public let aliasIdentifier: String + /// The unique identifier of the flow that the alias belongs to. + public let flowIdentifier: String + + public init(aliasIdentifier: String, flowIdentifier: String) { + self.aliasIdentifier = aliasIdentifier + self.flowIdentifier = flowIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aliasIdentifier, key: "aliasIdentifier") + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.aliasIdentifier, name: "aliasIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/[0-9a-zA-Z]{10})|(TSTALIASID|[0-9a-zA-Z]{10})$") + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetFlowAliasResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the flow. + public let arn: String + /// The time at which the flow was created. + @CustomCoding + public var createdAt: Date + /// The description of the flow. + public let description: String? + /// The unique identifier of the flow that the alias belongs to. + public let flowId: String + /// The unique identifier of the alias of the flow. + public let id: String + /// The name of the flow alias. + public let name: String + /// Contains information about the version that the alias is mapped to. + public let routingConfiguration: [FlowAliasRoutingConfigurationListItem] + /// The time at which the flow alias was last updated. + @CustomCoding + public var updatedAt: Date + + public init(arn: String, createdAt: Date, description: String? = nil, flowId: String, id: String, name: String, routingConfiguration: [FlowAliasRoutingConfigurationListItem], updatedAt: Date) { + self.arn = arn + self.createdAt = createdAt + self.description = description + self.flowId = flowId + self.id = id + self.name = name + self.routingConfiguration = routingConfiguration + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case description = "description" + case flowId = "flowId" + case id = "id" + case name = "name" + case routingConfiguration = "routingConfiguration" + case updatedAt = "updatedAt" + } + } + + public struct GetFlowRequest: AWSEncodableShape { + /// The unique identifier of the flow. + public let flowIdentifier: String + + public init(flowIdentifier: String) { + self.flowIdentifier = flowIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetFlowResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the flow. + public let arn: String + /// The time at which the flow was created. + @CustomCoding + public var createdAt: Date + /// The Amazon Resource Name (ARN) of the KMS key that the flow is encrypted with. + public let customerEncryptionKeyArn: String? + /// The definition of the nodes and connections between the nodes in the flow. + public let definition: FlowDefinition? + /// The description of the flow. + public let description: String? + /// The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service row for flows in the Amazon Bedrock User Guide. + public let executionRoleArn: String + /// The unique identifier of the flow. + public let id: String + /// The name of the flow. + public let name: String + /// The status of the flow. The following statuses are possible: NotPrepared – The flow has been created or updated, but hasn't been prepared. If you just created the flow, you can't test it. If you updated the flow, the DRAFT version won't contain the latest changes for testing. Send a PrepareFlow request to package the latest changes into the DRAFT version. Preparing – The flow is being prepared so that the DRAFT version contains the latest changes for testing. Prepared – The flow is prepared and the DRAFT version contains the latest changes for testing. Failed – The last API operation that you invoked on the flow failed. Send a GetFlow request and check the error message in the validations field. + public let status: FlowStatus + /// The time at which the flow was last updated. + @CustomCoding + public var updatedAt: Date + /// A list of validation error messages related to the last failed operation on the flow. + public let validations: [FlowValidation]? + /// The version of the flow for which information was retrieved. + public let version: String + + public init(arn: String, createdAt: Date, customerEncryptionKeyArn: String? = nil, definition: FlowDefinition? = nil, description: String? = nil, executionRoleArn: String, id: String, name: String, status: FlowStatus, updatedAt: Date, validations: [FlowValidation]? = nil, version: String) { + self.arn = arn + self.createdAt = createdAt + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.definition = definition + self.description = description + self.executionRoleArn = executionRoleArn + self.id = id + self.name = name + self.status = status + self.updatedAt = updatedAt + self.validations = validations + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case definition = "definition" + case description = "description" + case executionRoleArn = "executionRoleArn" + case id = "id" + case name = "name" + case status = "status" + case updatedAt = "updatedAt" + case validations = "validations" + case version = "version" + } + } + + public struct GetFlowVersionRequest: AWSEncodableShape { + /// The unique identifier of the flow for which to get information. + public let flowIdentifier: String + /// The version of the flow for which to get information. + public let flowVersion: String + + public init(flowIdentifier: String, flowVersion: String) { + self.flowIdentifier = flowIdentifier + self.flowVersion = flowVersion + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + request.encodePath(self.flowVersion, key: "flowVersion") + } + + public func validate(name: String) throws { + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") + try self.validate(self.flowVersion, name: "flowVersion", parent: name, pattern: "^[0-9]{1,5}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetFlowVersionResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the flow. + public let arn: String + /// The time at which the flow was created. + @CustomCoding + public var createdAt: Date + /// The Amazon Resource Name (ARN) of the KMS key that the version of the flow is encrypted with. + public let customerEncryptionKeyArn: String? + /// The definition of the nodes and connections between nodes in the flow. + public let definition: FlowDefinition? + /// The description of the flow. + public let description: String? + /// The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide. + public let executionRoleArn: String + /// The unique identifier of the flow. + public let id: String + /// The name of the flow version. + public let name: String + /// The status of the flow. + public let status: FlowStatus + /// The version of the flow for which information was retrieved. + public let version: String + + public init(arn: String, createdAt: Date, customerEncryptionKeyArn: String? = nil, definition: FlowDefinition? = nil, description: String? = nil, executionRoleArn: String, id: String, name: String, status: FlowStatus, version: String) { + self.arn = arn + self.createdAt = createdAt + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.definition = definition + self.description = description + self.executionRoleArn = executionRoleArn + self.id = id + self.name = name + self.status = status + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case definition = "definition" + case description = "description" + case executionRoleArn = "executionRoleArn" + case id = "id" + case name = "name" + case status = "status" + case version = "version" + } + } + public struct GetIngestionJobRequest: AWSEncodableShape { /// The unique identifier of the data source in the ingestion job. public let dataSourceId: String @@ -2142,10 +4009,89 @@ extension BedrockAgent { } } + public struct GetPromptRequest: AWSEncodableShape { + /// The unique identifier of the prompt. + public let promptIdentifier: String + /// The version of the prompt about which you want to retrieve information. + public let promptVersion: String? + + public init(promptIdentifier: String, promptVersion: String? = nil) { + self.promptIdentifier = promptIdentifier + self.promptVersion = promptVersion + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.promptIdentifier, key: "promptIdentifier") + request.encodeQuery(self.promptVersion, key: "promptVersion") + } + + public func validate(name: String) throws { + try self.validate(self.promptIdentifier, name: "promptIdentifier", parent: name, pattern: "^([0-9a-zA-Z]{10})|(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10})(?::[0-9]{1,5})?$") + try self.validate(self.promptVersion, name: "promptVersion", parent: name, max: 5) + try self.validate(self.promptVersion, name: "promptVersion", parent: name, min: 1) + try self.validate(self.promptVersion, name: "promptVersion", parent: name, pattern: "^(DRAFT|[0-9]{0,4}[1-9][0-9]{0,4})$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetPromptResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the prompt. + public let arn: String + /// The time at which the prompt was created. + @CustomCoding + public var createdAt: Date + /// The Amazon Resource Name (ARN) of the KMS key that the prompt is encrypted with. + public let customerEncryptionKeyArn: String? + /// The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object. + public let defaultVariant: String? + /// The descriptino of the prompt. + public let description: String? + /// The unique identifier of the prompt. + public let id: String + /// The name of the prompt. + public let name: String + /// The time at which the prompt was last updated. + @CustomCoding + public var updatedAt: Date + /// A list of objects, each containing details about a variant of the prompt. + public let variants: [PromptVariant]? + /// The version of the prompt. + public let version: String + + public init(arn: String, createdAt: Date, customerEncryptionKeyArn: String? = nil, defaultVariant: String? = nil, description: String? = nil, id: String, name: String, updatedAt: Date, variants: [PromptVariant]? = nil, version: String) { + self.arn = arn + self.createdAt = createdAt + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.defaultVariant = defaultVariant + self.description = description + self.id = id + self.name = name + self.updatedAt = updatedAt + self.variants = variants + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case defaultVariant = "defaultVariant" + case description = "description" + case id = "id" + case name = "name" + case updatedAt = "updatedAt" + case variants = "variants" + case version = "version" + } + } + public struct GuardrailConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The guardrails identifier assigned to the guardrails configuration. + /// The unique identifier of the guardrail. public let guardrailIdentifier: String? - /// The guardrails version assigned to the guardrails configuration. + /// The version of the guardrail. public let guardrailVersion: String? public init(guardrailIdentifier: String? = nil, guardrailVersion: String? = nil) { @@ -2165,6 +4111,41 @@ extension BedrockAgent { } } + public struct HierarchicalChunkingConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Token settings for each layer. + public let levelConfigurations: [HierarchicalChunkingLevelConfiguration] + /// The number of tokens to repeat across chunks in the same layer. + public let overlapTokens: Int + + public init(levelConfigurations: [HierarchicalChunkingLevelConfiguration], overlapTokens: Int) { + self.levelConfigurations = levelConfigurations + self.overlapTokens = overlapTokens + } + + public func validate(name: String) throws { + try self.validate(self.levelConfigurations, name: "levelConfigurations", parent: name, max: 2) + try self.validate(self.levelConfigurations, name: "levelConfigurations", parent: name, min: 2) + } + + private enum CodingKeys: String, CodingKey { + case levelConfigurations = "levelConfigurations" + case overlapTokens = "overlapTokens" + } + } + + public struct HierarchicalChunkingLevelConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The maximum number of tokens that a chunk can contain in this layer. + public let maxTokens: Int + + public init(maxTokens: Int) { + self.maxTokens = maxTokens + } + + private enum CodingKeys: String, CodingKey { + case maxTokens = "maxTokens" + } + } + public struct InferenceConfiguration: AWSEncodableShape & AWSDecodableShape { /// The maximum number of tokens to allow in the generated response. public let maximumLength: Int? @@ -2379,6 +4360,31 @@ extension BedrockAgent { } } + public struct InputFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + public init() {} + } + + public struct IntermediateStorage: AWSEncodableShape & AWSDecodableShape { + /// An S3 bucket path. + public let s3Location: S3Location + + public init(s3Location: S3Location) { + self.s3Location = s3Location + } + + public func validate(name: String) throws { + try self.s3Location.validate(name: "\(name).s3Location") + } + + private enum CodingKeys: String, CodingKey { + case s3Location = "s3Location" + } + } + + public struct IteratorFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + public init() {} + } + public struct KnowledgeBase: AWSDecodableShape { /// The time at which the knowledge base was created. @CustomCoding @@ -2455,6 +4461,31 @@ extension BedrockAgent { } } + public struct KnowledgeBaseFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The unique identifier of the knowledge base to query. + public let knowledgeBaseId: String + /// The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array. + public let modelId: String? + + public init(knowledgeBaseId: String, modelId: String? = nil) { + self.knowledgeBaseId = knowledgeBaseId + self.modelId = modelId + } + + public func validate(name: String) throws { + try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, max: 10) + try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]+$") + try self.validate(self.modelId, name: "modelId", parent: name, max: 2048) + try self.validate(self.modelId, name: "modelId", parent: name, min: 1) + try self.validate(self.modelId, name: "modelId", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)$") + } + + private enum CodingKeys: String, CodingKey { + case knowledgeBaseId = "knowledgeBaseId" + case modelId = "modelId" + } + } + public struct KnowledgeBaseSummary: AWSDecodableShape { /// The description of the knowledge base. public let description: String? @@ -2485,6 +4516,48 @@ extension BedrockAgent { } } + public struct LambdaFunctionFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Lambda function to invoke. + public let lambdaArn: String + + public init(lambdaArn: String) { + self.lambdaArn = lambdaArn + } + + public func validate(name: String) throws { + try self.validate(self.lambdaArn, name: "lambdaArn", parent: name, max: 2048) + try self.validate(self.lambdaArn, name: "lambdaArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$") + } + + private enum CodingKeys: String, CodingKey { + case lambdaArn = "lambdaArn" + } + } + + public struct LexFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke. + public let botAliasArn: String + /// The Region to invoke the Amazon Lex bot in. + public let localeId: String + + public init(botAliasArn: String, localeId: String) { + self.botAliasArn = botAliasArn + self.localeId = localeId + } + + public func validate(name: String) throws { + try self.validate(self.botAliasArn, name: "botAliasArn", parent: name, max: 78) + try self.validate(self.botAliasArn, name: "botAliasArn", parent: name, pattern: "^arn:aws(|-us-gov):lex:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:bot-alias/[0-9a-zA-Z]+/[0-9a-zA-Z]+$") + try self.validate(self.localeId, name: "localeId", parent: name, max: 10) + try self.validate(self.localeId, name: "localeId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case botAliasArn = "botAliasArn" + case localeId = "localeId" + } + } + public struct ListAgentActionGroupsRequest: AWSEncodableShape { /// The unique identifier of the agent. public let agentId: String @@ -2684,7 +4757,157 @@ extension BedrockAgent { } public func validate(name: String) throws { - try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S*$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "maxResults" + case nextToken = "nextToken" + } + } + + public struct ListAgentVersionsResponse: AWSDecodableShape { + /// A list of objects, each of which contains information about a version of the agent. + public let agentVersionSummaries: [AgentVersionSummary] + /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. + public let nextToken: String? + + public init(agentVersionSummaries: [AgentVersionSummary], nextToken: String? = nil) { + self.agentVersionSummaries = agentVersionSummaries + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case agentVersionSummaries = "agentVersionSummaries" + case nextToken = "nextToken" + } + } + + public struct ListAgentsRequest: AWSEncodableShape { + /// The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. + public let maxResults: Int? + /// If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. + public let nextToken: String? + + public init(maxResults: Int? = nil, nextToken: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S*$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "maxResults" + case nextToken = "nextToken" + } + } + + public struct ListAgentsResponse: AWSDecodableShape { + /// A list of objects, each of which contains information about an agent. + public let agentSummaries: [AgentSummary] + /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. + public let nextToken: String? + + public init(agentSummaries: [AgentSummary], nextToken: String? = nil) { + self.agentSummaries = agentSummaries + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case agentSummaries = "agentSummaries" + case nextToken = "nextToken" + } + } + + public struct ListDataSourcesRequest: AWSEncodableShape { + /// The unique identifier of the knowledge base for which to return a list of information. + public let knowledgeBaseId: String + /// The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. + public let maxResults: Int? + /// If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. + public let nextToken: String? + + public init(knowledgeBaseId: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.knowledgeBaseId = knowledgeBaseId + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.knowledgeBaseId, key: "knowledgeBaseId") + try container.encodeIfPresent(self.maxResults, forKey: .maxResults) + try container.encodeIfPresent(self.nextToken, forKey: .nextToken) + } + + public func validate(name: String) throws { + try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S*$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "maxResults" + case nextToken = "nextToken" + } + } + + public struct ListDataSourcesResponse: AWSDecodableShape { + /// A list of objects, each of which contains information about a data source. + public let dataSourceSummaries: [DataSourceSummary] + /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. + public let nextToken: String? + + public init(dataSourceSummaries: [DataSourceSummary], nextToken: String? = nil) { + self.dataSourceSummaries = dataSourceSummaries + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case dataSourceSummaries = "dataSourceSummaries" + case nextToken = "nextToken" + } + } + + public struct ListFlowAliasesRequest: AWSEncodableShape { + /// The unique identifier of the flow for which aliases are being returned. + public let flowIdentifier: String + /// The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. + public let maxResults: Int? + /// If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. + public let nextToken: String? + + public init(flowIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.flowIdentifier = flowIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) @@ -2692,41 +4915,50 @@ extension BedrockAgent { try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S*$") } - private enum CodingKeys: String, CodingKey { - case maxResults = "maxResults" - case nextToken = "nextToken" - } + private enum CodingKeys: CodingKey {} } - public struct ListAgentVersionsResponse: AWSDecodableShape { - /// A list of objects, each of which contains information about a version of the agent. - public let agentVersionSummaries: [AgentVersionSummary] + public struct ListFlowAliasesResponse: AWSDecodableShape { + /// A list, each member of which contains information about a flow alias. + public let flowAliasSummaries: [FlowAliasSummary] /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. public let nextToken: String? - public init(agentVersionSummaries: [AgentVersionSummary], nextToken: String? = nil) { - self.agentVersionSummaries = agentVersionSummaries + public init(flowAliasSummaries: [FlowAliasSummary], nextToken: String? = nil) { + self.flowAliasSummaries = flowAliasSummaries self.nextToken = nextToken } private enum CodingKeys: String, CodingKey { - case agentVersionSummaries = "agentVersionSummaries" + case flowAliasSummaries = "flowAliasSummaries" case nextToken = "nextToken" } } - public struct ListAgentsRequest: AWSEncodableShape { + public struct ListFlowVersionsRequest: AWSEncodableShape { + /// The unique identifier of the flow. + public let flowIdentifier: String /// The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. public let maxResults: Int? /// If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. public let nextToken: String? - public init(maxResults: Int? = nil, nextToken: String? = nil) { + public init(flowIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.flowIdentifier = flowIdentifier self.maxResults = maxResults self.nextToken = nextToken } + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + public func validate(name: String) throws { + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) @@ -2734,53 +4966,45 @@ extension BedrockAgent { try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S*$") } - private enum CodingKeys: String, CodingKey { - case maxResults = "maxResults" - case nextToken = "nextToken" - } + private enum CodingKeys: CodingKey {} } - public struct ListAgentsResponse: AWSDecodableShape { - /// A list of objects, each of which contains information about an agent. - public let agentSummaries: [AgentSummary] + public struct ListFlowVersionsResponse: AWSDecodableShape { + /// A list, each member of which contains information about a flow. + public let flowVersionSummaries: [FlowVersionSummary] /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. public let nextToken: String? - public init(agentSummaries: [AgentSummary], nextToken: String? = nil) { - self.agentSummaries = agentSummaries + public init(flowVersionSummaries: [FlowVersionSummary], nextToken: String? = nil) { + self.flowVersionSummaries = flowVersionSummaries self.nextToken = nextToken } private enum CodingKeys: String, CodingKey { - case agentSummaries = "agentSummaries" + case flowVersionSummaries = "flowVersionSummaries" case nextToken = "nextToken" } } - public struct ListDataSourcesRequest: AWSEncodableShape { - /// The unique identifier of the knowledge base for which to return a list of information. - public let knowledgeBaseId: String + public struct ListFlowsRequest: AWSEncodableShape { /// The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. public let maxResults: Int? /// If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. public let nextToken: String? - public init(knowledgeBaseId: String, maxResults: Int? = nil, nextToken: String? = nil) { - self.knowledgeBaseId = knowledgeBaseId + public init(maxResults: Int? = nil, nextToken: String? = nil) { self.maxResults = maxResults self.nextToken = nextToken } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.knowledgeBaseId, key: "knowledgeBaseId") - try container.encodeIfPresent(self.maxResults, forKey: .maxResults) - try container.encodeIfPresent(self.nextToken, forKey: .nextToken) + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") } public func validate(name: String) throws { - try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) @@ -2788,25 +5012,22 @@ extension BedrockAgent { try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S*$") } - private enum CodingKeys: String, CodingKey { - case maxResults = "maxResults" - case nextToken = "nextToken" - } + private enum CodingKeys: CodingKey {} } - public struct ListDataSourcesResponse: AWSDecodableShape { - /// A list of objects, each of which contains information about a data source. - public let dataSourceSummaries: [DataSourceSummary] + public struct ListFlowsResponse: AWSDecodableShape { + /// A list, each member of which contains information about a flow. + public let flowSummaries: [FlowSummary] /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. public let nextToken: String? - public init(dataSourceSummaries: [DataSourceSummary], nextToken: String? = nil) { - self.dataSourceSummaries = dataSourceSummaries + public init(flowSummaries: [FlowSummary], nextToken: String? = nil) { + self.flowSummaries = flowSummaries self.nextToken = nextToken } private enum CodingKeys: String, CodingKey { - case dataSourceSummaries = "dataSourceSummaries" + case flowSummaries = "flowSummaries" case nextToken = "nextToken" } } @@ -2927,6 +5148,57 @@ extension BedrockAgent { } } + public struct ListPromptsRequest: AWSEncodableShape { + /// The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. + public let maxResults: Int? + /// If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. + public let nextToken: String? + /// The unique identifier of the prompt. + public let promptIdentifier: String? + + public init(maxResults: Int? = nil, nextToken: String? = nil, promptIdentifier: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.promptIdentifier = promptIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.promptIdentifier, key: "promptIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S*$") + try self.validate(self.promptIdentifier, name: "promptIdentifier", parent: name, pattern: "^([0-9a-zA-Z]{10})|(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10})(?::[0-9]{1,5})?$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListPromptsResponse: AWSDecodableShape { + /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. + public let nextToken: String? + /// A list, each member of which contains information about a prompt using Prompt management. + public let promptSummaries: [PromptSummary] + + public init(nextToken: String? = nil, promptSummaries: [PromptSummary]) { + self.nextToken = nextToken + self.promptSummaries = promptSummaries + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case promptSummaries = "promptSummaries" + } + } + public struct ListTagsForResourceRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the resource for which to list tags. public let resourceArn: String @@ -2944,7 +5216,7 @@ extension BedrockAgent { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "(^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:(agent|agent-alias|knowledge-base)/[A-Z0-9]{10}(?:/[A-Z0-9]{10})?$)") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "(^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:(agent|agent-alias|knowledge-base|flow|prompt)/[A-Z0-9]{10}(?:/[A-Z0-9]{10})?$|^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:flow/([A-Z0-9]{10})/alias/([A-Z0-9]{10})$|^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:prompt/([A-Z0-9]{10})?(?::/d+)?$)") } private enum CodingKeys: CodingKey {} @@ -2963,6 +5235,30 @@ extension BedrockAgent { } } + public struct MemoryConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The type of memory that is stored. + public let enabledMemoryTypes: [MemoryType] + /// The number of days the agent is configured to retain the conversational context. + public let storageDays: Int? + + public init(enabledMemoryTypes: [MemoryType], storageDays: Int? = nil) { + self.enabledMemoryTypes = enabledMemoryTypes + self.storageDays = storageDays + } + + public func validate(name: String) throws { + try self.validate(self.enabledMemoryTypes, name: "enabledMemoryTypes", parent: name, max: 1) + try self.validate(self.enabledMemoryTypes, name: "enabledMemoryTypes", parent: name, min: 1) + try self.validate(self.storageDays, name: "storageDays", parent: name, max: 30) + try self.validate(self.storageDays, name: "storageDays", parent: name, min: 0) + } + + private enum CodingKeys: String, CodingKey { + case enabledMemoryTypes = "enabledMemoryTypes" + case storageDays = "storageDays" + } + } + public struct MongoDbAtlasConfiguration: AWSEncodableShape & AWSDecodableShape { /// The collection name of the knowledge base in MongoDB Atlas. public let collectionName: String @@ -3105,6 +5401,10 @@ extension BedrockAgent { } } + public struct OutputFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + public init() {} + } + public struct ParameterDetail: AWSEncodableShape & AWSDecodableShape { /// A description of the parameter. Helps the foundation model determine how to elicit the parameters from the user. public let description: String? @@ -3131,6 +5431,104 @@ extension BedrockAgent { } } + public struct ParsingConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Settings for a foundation model used to parse documents for a data source. + public let bedrockFoundationModelConfiguration: BedrockFoundationModelConfiguration? + /// The parsing strategy for the data source. + public let parsingStrategy: ParsingStrategy + + public init(bedrockFoundationModelConfiguration: BedrockFoundationModelConfiguration? = nil, parsingStrategy: ParsingStrategy) { + self.bedrockFoundationModelConfiguration = bedrockFoundationModelConfiguration + self.parsingStrategy = parsingStrategy + } + + public func validate(name: String) throws { + try self.bedrockFoundationModelConfiguration?.validate(name: "\(name).bedrockFoundationModelConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case bedrockFoundationModelConfiguration = "bedrockFoundationModelConfiguration" + case parsingStrategy = "parsingStrategy" + } + } + + public struct ParsingPrompt: AWSEncodableShape & AWSDecodableShape { + /// Instructions for interpreting the contents of a document. + public let parsingPromptText: String + + public init(parsingPromptText: String) { + self.parsingPromptText = parsingPromptText + } + + public func validate(name: String) throws { + try self.validate(self.parsingPromptText, name: "parsingPromptText", parent: name, max: 10000) + try self.validate(self.parsingPromptText, name: "parsingPromptText", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case parsingPromptText = "parsingPromptText" + } + } + + public struct PatternObjectFilter: AWSEncodableShape & AWSDecodableShape { + /// A list of one or more exclusion regular expression patterns to exclude certain object types that adhere to the pattern. If you specify an inclusion and exclusion filter/pattern and both match a document, the exclusion filter takes precedence and the document isn’t crawled. + public let exclusionFilters: [String]? + /// A list of one or more inclusion regular expression patterns to include certain object types that adhere to the pattern. If you specify an inclusion and exclusion filter/pattern and both match a document, the exclusion filter takes precedence and the document isn’t crawled. + public let inclusionFilters: [String]? + /// The supported object type or content type of the data source. + public let objectType: String + + public init(exclusionFilters: [String]? = nil, inclusionFilters: [String]? = nil, objectType: String) { + self.exclusionFilters = exclusionFilters + self.inclusionFilters = inclusionFilters + self.objectType = objectType + } + + public func validate(name: String) throws { + try self.exclusionFilters?.forEach { + try validate($0, name: "exclusionFilters[]", parent: name, max: 1000) + try validate($0, name: "exclusionFilters[]", parent: name, min: 1) + } + try self.validate(self.exclusionFilters, name: "exclusionFilters", parent: name, max: 25) + try self.validate(self.exclusionFilters, name: "exclusionFilters", parent: name, min: 1) + try self.inclusionFilters?.forEach { + try validate($0, name: "inclusionFilters[]", parent: name, max: 1000) + try validate($0, name: "inclusionFilters[]", parent: name, min: 1) + } + try self.validate(self.inclusionFilters, name: "inclusionFilters", parent: name, max: 25) + try self.validate(self.inclusionFilters, name: "inclusionFilters", parent: name, min: 1) + try self.validate(self.objectType, name: "objectType", parent: name, max: 50) + try self.validate(self.objectType, name: "objectType", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case exclusionFilters = "exclusionFilters" + case inclusionFilters = "inclusionFilters" + case objectType = "objectType" + } + } + + public struct PatternObjectFilterConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration of specific filters applied to your data source content. You can filter out or include certain content. + public let filters: [PatternObjectFilter] + + public init(filters: [PatternObjectFilter]) { + self.filters = filters + } + + public func validate(name: String) throws { + try self.filters.forEach { + try $0.validate(name: "\(name).filters[]") + } + try self.validate(self.filters, name: "filters", parent: name, max: 25) + try self.validate(self.filters, name: "filters", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case filters = "filters" + } + } + public struct PineconeConfiguration: AWSEncodableShape & AWSDecodableShape { /// The endpoint URL for your index management page. public let connectionString: String @@ -3236,42 +5634,205 @@ extension BedrockAgent { } } - public struct PromptConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see Prompt template placeholder variables. For more information, see Configure the prompt templates. - public let basePromptTemplate: String? - /// Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the promptType. For more information, see Inference parameters for foundation models. - public let inferenceConfiguration: InferenceConfiguration? - /// Specifies whether to override the default parser Lambda function when parsing the raw foundation model output in the part of the agent sequence defined by the promptType. If you set the field as OVERRIDEN, the overrideLambda field in the PromptOverrideConfiguration must be specified with the ARN of a Lambda function. - public let parserMode: CreationMode? - /// Specifies whether to override the default prompt template for this promptType. Set this value to OVERRIDDEN to use the prompt that you provide in the basePromptTemplate. If you leave it as DEFAULT, the agent uses a default prompt template. - public let promptCreationMode: CreationMode? - /// Specifies whether to allow the agent to carry out the step specified in the promptType. If you set this value to DISABLED, the agent skips that step. The default state for each promptType is as follows. PRE_PROCESSING – ENABLED ORCHESTRATION – ENABLED KNOWLEDGE_BASE_RESPONSE_GENERATION – ENABLED POST_PROCESSING – DISABLED - public let promptState: PromptState? - /// The step in the agent sequence that this prompt configuration applies to. - public let promptType: PromptType? + public struct PrepareFlowRequest: AWSEncodableShape { + /// The unique identifier of the flow. + public let flowIdentifier: String + + public init(flowIdentifier: String) { + self.flowIdentifier = flowIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + } + + public func validate(name: String) throws { + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct PrepareFlowResponse: AWSDecodableShape { + /// The unique identifier of the flow. + public let id: String + /// The status of the flow. When you submit this request, the status will be NotPrepared. If preparation succeeds, the status becomes Prepared. If it fails, the status becomes FAILED. + public let status: FlowStatus + + public init(id: String, status: FlowStatus) { + self.id = id + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case id = "id" + case status = "status" + } + } + + public struct PromptConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see Prompt template placeholder variables. For more information, see Configure the prompt templates. + public let basePromptTemplate: String? + /// Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the promptType. For more information, see Inference parameters for foundation models. + public let inferenceConfiguration: InferenceConfiguration? + /// Specifies whether to override the default parser Lambda function when parsing the raw foundation model output in the part of the agent sequence defined by the promptType. If you set the field as OVERRIDEN, the overrideLambda field in the PromptOverrideConfiguration must be specified with the ARN of a Lambda function. + public let parserMode: CreationMode? + /// Specifies whether to override the default prompt template for this promptType. Set this value to OVERRIDDEN to use the prompt that you provide in the basePromptTemplate. If you leave it as DEFAULT, the agent uses a default prompt template. + public let promptCreationMode: CreationMode? + /// Specifies whether to allow the agent to carry out the step specified in the promptType. If you set this value to DISABLED, the agent skips that step. The default state for each promptType is as follows. PRE_PROCESSING – ENABLED ORCHESTRATION – ENABLED KNOWLEDGE_BASE_RESPONSE_GENERATION – ENABLED POST_PROCESSING – DISABLED + public let promptState: PromptState? + /// The step in the agent sequence that this prompt configuration applies to. + public let promptType: PromptType? + + public init(basePromptTemplate: String? = nil, inferenceConfiguration: InferenceConfiguration? = nil, parserMode: CreationMode? = nil, promptCreationMode: CreationMode? = nil, promptState: PromptState? = nil, promptType: PromptType? = nil) { + self.basePromptTemplate = basePromptTemplate + self.inferenceConfiguration = inferenceConfiguration + self.parserMode = parserMode + self.promptCreationMode = promptCreationMode + self.promptState = promptState + self.promptType = promptType + } + + public func validate(name: String) throws { + try self.validate(self.basePromptTemplate, name: "basePromptTemplate", parent: name, max: 100000) + try self.validate(self.basePromptTemplate, name: "basePromptTemplate", parent: name, min: 1) + try self.inferenceConfiguration?.validate(name: "\(name).inferenceConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case basePromptTemplate = "basePromptTemplate" + case inferenceConfiguration = "inferenceConfiguration" + case parserMode = "parserMode" + case promptCreationMode = "promptCreationMode" + case promptState = "promptState" + case promptType = "promptType" + } + } + + public struct PromptFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Specifies whether the prompt is from Prompt management or defined inline. + public let sourceConfiguration: PromptFlowNodeSourceConfiguration + + public init(sourceConfiguration: PromptFlowNodeSourceConfiguration) { + self.sourceConfiguration = sourceConfiguration + } + + public func validate(name: String) throws { + try self.sourceConfiguration.validate(name: "\(name).sourceConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case sourceConfiguration = "sourceConfiguration" + } + } + + public struct PromptFlowNodeInlineConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Contains inference configurations for the prompt. + public let inferenceConfiguration: PromptInferenceConfiguration? + /// The unique identifier of the model to run inference with. + public let modelId: String + /// Contains a prompt and variables in the prompt that can be replaced with values at runtime. + public let templateConfiguration: PromptTemplateConfiguration + /// The type of prompt template. + public let templateType: PromptTemplateType + + public init(inferenceConfiguration: PromptInferenceConfiguration? = nil, modelId: String, templateConfiguration: PromptTemplateConfiguration, templateType: PromptTemplateType) { + self.inferenceConfiguration = inferenceConfiguration + self.modelId = modelId + self.templateConfiguration = templateConfiguration + self.templateType = templateType + } + + public func validate(name: String) throws { + try self.inferenceConfiguration?.validate(name: "\(name).inferenceConfiguration") + try self.validate(self.modelId, name: "modelId", parent: name, max: 2048) + try self.validate(self.modelId, name: "modelId", parent: name, min: 1) + try self.validate(self.modelId, name: "modelId", parent: name, pattern: "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$") + try self.templateConfiguration.validate(name: "\(name).templateConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case inferenceConfiguration = "inferenceConfiguration" + case modelId = "modelId" + case templateConfiguration = "templateConfiguration" + case templateType = "templateType" + } + } + + public struct PromptFlowNodeResourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the prompt from Prompt management. + public let promptArn: String + + public init(promptArn: String) { + self.promptArn = promptArn + } + + public func validate(name: String) throws { + try self.validate(self.promptArn, name: "promptArn", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10}(?::[0-9]{1,5})?)$") + } + + private enum CodingKeys: String, CodingKey { + case promptArn = "promptArn" + } + } + + public struct PromptInputVariable: AWSEncodableShape & AWSDecodableShape { + /// The name of the variable. + public let name: String? + + public init(name: String? = nil) { + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + } + } + + public struct PromptModelInferenceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The maximum number of tokens to return in the response. + public let maxTokens: Int? + /// A list of strings that define sequences after which the model will stop generating. + public let stopSequences: [String]? + /// Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs. + public let temperature: Float? + /// The number of most-likely candidates that the model considers for the next token during generation. + public let topK: Int? + /// The percentage of most-likely candidates that the model considers for the next token. + public let topP: Float? - public init(basePromptTemplate: String? = nil, inferenceConfiguration: InferenceConfiguration? = nil, parserMode: CreationMode? = nil, promptCreationMode: CreationMode? = nil, promptState: PromptState? = nil, promptType: PromptType? = nil) { - self.basePromptTemplate = basePromptTemplate - self.inferenceConfiguration = inferenceConfiguration - self.parserMode = parserMode - self.promptCreationMode = promptCreationMode - self.promptState = promptState - self.promptType = promptType + public init(maxTokens: Int? = nil, stopSequences: [String]? = nil, temperature: Float? = nil, topK: Int? = nil, topP: Float? = nil) { + self.maxTokens = maxTokens + self.stopSequences = stopSequences + self.temperature = temperature + self.topK = topK + self.topP = topP } public func validate(name: String) throws { - try self.validate(self.basePromptTemplate, name: "basePromptTemplate", parent: name, max: 100000) - try self.validate(self.basePromptTemplate, name: "basePromptTemplate", parent: name, min: 1) - try self.inferenceConfiguration?.validate(name: "\(name).inferenceConfiguration") + try self.validate(self.maxTokens, name: "maxTokens", parent: name, max: 4096) + try self.validate(self.maxTokens, name: "maxTokens", parent: name, min: 0) + try self.validate(self.stopSequences, name: "stopSequences", parent: name, max: 4) + try self.validate(self.temperature, name: "temperature", parent: name, max: 1.0) + try self.validate(self.temperature, name: "temperature", parent: name, min: 0.0) + try self.validate(self.topK, name: "topK", parent: name, max: 500) + try self.validate(self.topK, name: "topK", parent: name, min: 0) + try self.validate(self.topP, name: "topP", parent: name, max: 1.0) + try self.validate(self.topP, name: "topP", parent: name, min: 0.0) } private enum CodingKeys: String, CodingKey { - case basePromptTemplate = "basePromptTemplate" - case inferenceConfiguration = "inferenceConfiguration" - case parserMode = "parserMode" - case promptCreationMode = "promptCreationMode" - case promptState = "promptState" - case promptType = "promptType" + case maxTokens = "maxTokens" + case stopSequences = "stopSequences" + case temperature = "temperature" + case topK = "topK" + case topP = "topP" } } @@ -3301,6 +5862,83 @@ extension BedrockAgent { } } + public struct PromptSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the prompt. + public let arn: String + /// The time at which the prompt was created. + @CustomCoding + public var createdAt: Date + /// The description of the prompt. + public let description: String? + /// The unique identifier of the prompt. + public let id: String + /// The name of the prompt. + public let name: String + /// The time at which the prompt was last updated. + @CustomCoding + public var updatedAt: Date + /// The version of the prompt that this summary applies to. + public let version: String + + public init(arn: String, createdAt: Date, description: String? = nil, id: String, name: String, updatedAt: Date, version: String) { + self.arn = arn + self.createdAt = createdAt + self.description = description + self.id = id + self.name = name + self.updatedAt = updatedAt + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case description = "description" + case id = "id" + case name = "name" + case updatedAt = "updatedAt" + case version = "version" + } + } + + public struct PromptVariant: AWSEncodableShape & AWSDecodableShape { + /// Contains inference configurations for the prompt variant. + public let inferenceConfiguration: PromptInferenceConfiguration? + /// The unique identifier of the model with which to run inference on the prompt. + public let modelId: String? + /// The name of the prompt variant. + public let name: String + /// Contains configurations for the prompt template. + public let templateConfiguration: PromptTemplateConfiguration? + /// The type of prompt template to use. + public let templateType: PromptTemplateType + + public init(inferenceConfiguration: PromptInferenceConfiguration? = nil, modelId: String? = nil, name: String, templateConfiguration: PromptTemplateConfiguration? = nil, templateType: PromptTemplateType) { + self.inferenceConfiguration = inferenceConfiguration + self.modelId = modelId + self.name = name + self.templateConfiguration = templateConfiguration + self.templateType = templateType + } + + public func validate(name: String) throws { + try self.inferenceConfiguration?.validate(name: "\(name).inferenceConfiguration") + try self.validate(self.modelId, name: "modelId", parent: name, max: 2048) + try self.validate(self.modelId, name: "modelId", parent: name, min: 1) + try self.validate(self.modelId, name: "modelId", parent: name, pattern: "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") + try self.templateConfiguration?.validate(name: "\(name).templateConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case inferenceConfiguration = "inferenceConfiguration" + case modelId = "modelId" + case name = "name" + case templateConfiguration = "templateConfiguration" + case templateType = "templateType" + } + } + public struct RdsConfiguration: AWSEncodableShape & AWSDecodableShape { /// The Amazon Resource Name (ARN) of the secret that you created in Secrets Manager that is linked to your Amazon RDS database. public let credentialsSecretArn: String @@ -3418,106 +6056,350 @@ extension BedrockAgent { /// The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources. public let vectorField: String - public init(metadataField: String, textField: String, vectorField: String) { - self.metadataField = metadataField - self.textField = textField - self.vectorField = vectorField + public init(metadataField: String, textField: String, vectorField: String) { + self.metadataField = metadataField + self.textField = textField + self.vectorField = vectorField + } + + public func validate(name: String) throws { + try self.validate(self.metadataField, name: "metadataField", parent: name, max: 2048) + try self.validate(self.metadataField, name: "metadataField", parent: name, pattern: "^.*$") + try self.validate(self.textField, name: "textField", parent: name, max: 2048) + try self.validate(self.textField, name: "textField", parent: name, pattern: "^.*$") + try self.validate(self.vectorField, name: "vectorField", parent: name, max: 2048) + try self.validate(self.vectorField, name: "vectorField", parent: name, pattern: "^.*$") + } + + private enum CodingKeys: String, CodingKey { + case metadataField = "metadataField" + case textField = "textField" + case vectorField = "vectorField" + } + } + + public struct RetrievalFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Contains configurations for the service to use for retrieving data to return as the output from the node. + public let serviceConfiguration: RetrievalFlowNodeServiceConfiguration + + public init(serviceConfiguration: RetrievalFlowNodeServiceConfiguration) { + self.serviceConfiguration = serviceConfiguration + } + + public func validate(name: String) throws { + try self.serviceConfiguration.validate(name: "\(name).serviceConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case serviceConfiguration = "serviceConfiguration" + } + } + + public struct RetrievalFlowNodeS3Configuration: AWSEncodableShape & AWSDecodableShape { + /// The name of the Amazon S3 bucket from which to retrieve data. + public let bucketName: String + + public init(bucketName: String) { + self.bucketName = bucketName + } + + public func validate(name: String) throws { + try self.validate(self.bucketName, name: "bucketName", parent: name, max: 63) + try self.validate(self.bucketName, name: "bucketName", parent: name, min: 3) + try self.validate(self.bucketName, name: "bucketName", parent: name, pattern: "^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case bucketName = "bucketName" + } + } + + public struct S3DataSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the S3 bucket that contains your data. + public let bucketArn: String + /// The account ID for the owner of the S3 bucket. + public let bucketOwnerAccountId: String? + /// A list of S3 prefixes to include certain files or content. For more information, see Organizing objects using prefixes. + public let inclusionPrefixes: [String]? + + public init(bucketArn: String, bucketOwnerAccountId: String? = nil, inclusionPrefixes: [String]? = nil) { + self.bucketArn = bucketArn + self.bucketOwnerAccountId = bucketOwnerAccountId + self.inclusionPrefixes = inclusionPrefixes + } + + public func validate(name: String) throws { + try self.validate(self.bucketArn, name: "bucketArn", parent: name, max: 2048) + try self.validate(self.bucketArn, name: "bucketArn", parent: name, min: 1) + try self.validate(self.bucketArn, name: "bucketArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):s3:::[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$") + try self.validate(self.bucketOwnerAccountId, name: "bucketOwnerAccountId", parent: name, max: 12) + try self.validate(self.bucketOwnerAccountId, name: "bucketOwnerAccountId", parent: name, min: 12) + try self.validate(self.bucketOwnerAccountId, name: "bucketOwnerAccountId", parent: name, pattern: "^[0-9]{12}$") + try self.inclusionPrefixes?.forEach { + try validate($0, name: "inclusionPrefixes[]", parent: name, max: 300) + try validate($0, name: "inclusionPrefixes[]", parent: name, min: 1) + } + try self.validate(self.inclusionPrefixes, name: "inclusionPrefixes", parent: name, max: 1) + try self.validate(self.inclusionPrefixes, name: "inclusionPrefixes", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case bucketArn = "bucketArn" + case bucketOwnerAccountId = "bucketOwnerAccountId" + case inclusionPrefixes = "inclusionPrefixes" + } + } + + public struct S3Identifier: AWSEncodableShape & AWSDecodableShape { + /// The name of the S3 bucket. + public let s3BucketName: String? + /// The S3 object key for the S3 resource. + public let s3ObjectKey: String? + + public init(s3BucketName: String? = nil, s3ObjectKey: String? = nil) { + self.s3BucketName = s3BucketName + self.s3ObjectKey = s3ObjectKey + } + + public func validate(name: String) throws { + try self.validate(self.s3BucketName, name: "s3BucketName", parent: name, max: 63) + try self.validate(self.s3BucketName, name: "s3BucketName", parent: name, min: 3) + try self.validate(self.s3BucketName, name: "s3BucketName", parent: name, pattern: "^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$") + try self.validate(self.s3ObjectKey, name: "s3ObjectKey", parent: name, max: 1024) + try self.validate(self.s3ObjectKey, name: "s3ObjectKey", parent: name, min: 1) + try self.validate(self.s3ObjectKey, name: "s3ObjectKey", parent: name, pattern: "^[\\.\\-\\!\\*\\_\\'\\(\\)a-zA-Z0-9][\\.\\-\\!\\*\\_\\'\\(\\)\\/a-zA-Z0-9]*$") + } + + private enum CodingKeys: String, CodingKey { + case s3BucketName = "s3BucketName" + case s3ObjectKey = "s3ObjectKey" + } + } + + public struct S3Location: AWSEncodableShape & AWSDecodableShape { + /// The location's URI. For example, s3://my-bucket/chunk-processor/. + public let uri: String + + public init(uri: String) { + self.uri = uri + } + + public func validate(name: String) throws { + try self.validate(self.uri, name: "uri", parent: name, max: 2048) + try self.validate(self.uri, name: "uri", parent: name, min: 1) + try self.validate(self.uri, name: "uri", parent: name, pattern: "^s3://.{1,128}$") + } + + private enum CodingKeys: String, CodingKey { + case uri = "uri" + } + } + + public struct SalesforceCrawlerConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration of filtering the Salesforce content. For example, configuring regular expression patterns to include or exclude certain content. + public let filterConfiguration: CrawlFilterConfiguration? + + public init(filterConfiguration: CrawlFilterConfiguration? = nil) { + self.filterConfiguration = filterConfiguration + } + + public func validate(name: String) throws { + try self.filterConfiguration?.validate(name: "\(name).filterConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case filterConfiguration = "filterConfiguration" + } + } + + public struct SalesforceDataSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration of the Salesforce content. For example, configuring specific types of Salesforce content. + public let crawlerConfiguration: SalesforceCrawlerConfiguration? + /// The endpoint information to connect to your Salesforce data source. + public let sourceConfiguration: SalesforceSourceConfiguration + + public init(crawlerConfiguration: SalesforceCrawlerConfiguration? = nil, sourceConfiguration: SalesforceSourceConfiguration) { + self.crawlerConfiguration = crawlerConfiguration + self.sourceConfiguration = sourceConfiguration + } + + public func validate(name: String) throws { + try self.crawlerConfiguration?.validate(name: "\(name).crawlerConfiguration") + try self.sourceConfiguration.validate(name: "\(name).sourceConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case crawlerConfiguration = "crawlerConfiguration" + case sourceConfiguration = "sourceConfiguration" + } + } + + public struct SalesforceSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The supported authentication type to authenticate and connect to your Salesforce instance. + public let authType: SalesforceAuthType + /// The Amazon Resource Name of an Secrets Manager secret that stores your authentication credentials for your SharePoint site/sites. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see Salesforce connection configuration. + public let credentialsSecretArn: String + /// The Salesforce host URL or instance URL. + public let hostUrl: String + + public init(authType: SalesforceAuthType, credentialsSecretArn: String, hostUrl: String) { + self.authType = authType + self.credentialsSecretArn = credentialsSecretArn + self.hostUrl = hostUrl + } + + public func validate(name: String) throws { + try self.validate(self.credentialsSecretArn, name: "credentialsSecretArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):secretsmanager:[a-z0-9-]{1,20}:([0-9]{12}|):secret:[a-zA-Z0-9!/_+=.@-]{1,512}$") + try self.validate(self.hostUrl, name: "hostUrl", parent: name, pattern: "^https://[A-Za-z0-9][^\\s]*$") + } + + private enum CodingKeys: String, CodingKey { + case authType = "authType" + case credentialsSecretArn = "credentialsSecretArn" + case hostUrl = "hostUrl" + } + } + + public struct SeedUrl: AWSEncodableShape & AWSDecodableShape { + /// A seed or starting point URL. + public let url: String? + + public init(url: String? = nil) { + self.url = url + } + + public func validate(name: String) throws { + try self.validate(self.url, name: "url", parent: name, pattern: "^https?://[A-Za-z0-9][^\\s]*$") + } + + private enum CodingKeys: String, CodingKey { + case url = "url" + } + } + + public struct SemanticChunkingConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The dissimilarity threshold for splitting chunks. + public let breakpointPercentileThreshold: Int + /// The buffer size. + public let bufferSize: Int + /// The maximum number of tokens that a chunk can contain. + public let maxTokens: Int + + public init(breakpointPercentileThreshold: Int, bufferSize: Int, maxTokens: Int) { + self.breakpointPercentileThreshold = breakpointPercentileThreshold + self.bufferSize = bufferSize + self.maxTokens = maxTokens + } + + private enum CodingKeys: String, CodingKey { + case breakpointPercentileThreshold = "breakpointPercentileThreshold" + case bufferSize = "bufferSize" + case maxTokens = "maxTokens" + } + } + + public struct ServerSideEncryptionConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the KMS key used to encrypt the resource. + public let kmsKeyArn: String? + + public init(kmsKeyArn: String? = nil) { + self.kmsKeyArn = kmsKeyArn } public func validate(name: String) throws { - try self.validate(self.metadataField, name: "metadataField", parent: name, max: 2048) - try self.validate(self.metadataField, name: "metadataField", parent: name, pattern: "^.*$") - try self.validate(self.textField, name: "textField", parent: name, max: 2048) - try self.validate(self.textField, name: "textField", parent: name, pattern: "^.*$") - try self.validate(self.vectorField, name: "vectorField", parent: name, max: 2048) - try self.validate(self.vectorField, name: "vectorField", parent: name, pattern: "^.*$") + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, max: 2048) + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, min: 1) + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") } private enum CodingKeys: String, CodingKey { - case metadataField = "metadataField" - case textField = "textField" - case vectorField = "vectorField" + case kmsKeyArn = "kmsKeyArn" } } - public struct S3DataSourceConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the bucket that contains the data source. - public let bucketArn: String - /// The bucket account owner ID for the S3 bucket. - public let bucketOwnerAccountId: String? - /// A list of S3 prefixes that define the object containing the data sources. For more information, see Organizing objects using prefixes. - public let inclusionPrefixes: [String]? + public struct SharePointCrawlerConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration of filtering the SharePoint content. For example, configuring regular expression patterns to include or exclude certain content. + public let filterConfiguration: CrawlFilterConfiguration? - public init(bucketArn: String, bucketOwnerAccountId: String? = nil, inclusionPrefixes: [String]? = nil) { - self.bucketArn = bucketArn - self.bucketOwnerAccountId = bucketOwnerAccountId - self.inclusionPrefixes = inclusionPrefixes + public init(filterConfiguration: CrawlFilterConfiguration? = nil) { + self.filterConfiguration = filterConfiguration } public func validate(name: String) throws { - try self.validate(self.bucketArn, name: "bucketArn", parent: name, max: 2048) - try self.validate(self.bucketArn, name: "bucketArn", parent: name, min: 1) - try self.validate(self.bucketArn, name: "bucketArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):s3:::[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$") - try self.validate(self.bucketOwnerAccountId, name: "bucketOwnerAccountId", parent: name, max: 12) - try self.validate(self.bucketOwnerAccountId, name: "bucketOwnerAccountId", parent: name, min: 12) - try self.validate(self.bucketOwnerAccountId, name: "bucketOwnerAccountId", parent: name, pattern: "^[0-9]{12}$") - try self.inclusionPrefixes?.forEach { - try validate($0, name: "inclusionPrefixes[]", parent: name, max: 300) - try validate($0, name: "inclusionPrefixes[]", parent: name, min: 1) - } - try self.validate(self.inclusionPrefixes, name: "inclusionPrefixes", parent: name, max: 1) - try self.validate(self.inclusionPrefixes, name: "inclusionPrefixes", parent: name, min: 1) + try self.filterConfiguration?.validate(name: "\(name).filterConfiguration") } private enum CodingKeys: String, CodingKey { - case bucketArn = "bucketArn" - case bucketOwnerAccountId = "bucketOwnerAccountId" - case inclusionPrefixes = "inclusionPrefixes" + case filterConfiguration = "filterConfiguration" } } - public struct S3Identifier: AWSEncodableShape & AWSDecodableShape { - /// The name of the S3 bucket. - public let s3BucketName: String? - /// The S3 object key containing the resource. - public let s3ObjectKey: String? + public struct SharePointDataSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration of the SharePoint content. For example, configuring specific types of SharePoint content. + public let crawlerConfiguration: SharePointCrawlerConfiguration? + /// The endpoint information to connect to your SharePoint data source. + public let sourceConfiguration: SharePointSourceConfiguration - public init(s3BucketName: String? = nil, s3ObjectKey: String? = nil) { - self.s3BucketName = s3BucketName - self.s3ObjectKey = s3ObjectKey + public init(crawlerConfiguration: SharePointCrawlerConfiguration? = nil, sourceConfiguration: SharePointSourceConfiguration) { + self.crawlerConfiguration = crawlerConfiguration + self.sourceConfiguration = sourceConfiguration } public func validate(name: String) throws { - try self.validate(self.s3BucketName, name: "s3BucketName", parent: name, max: 63) - try self.validate(self.s3BucketName, name: "s3BucketName", parent: name, min: 3) - try self.validate(self.s3BucketName, name: "s3BucketName", parent: name, pattern: "^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$") - try self.validate(self.s3ObjectKey, name: "s3ObjectKey", parent: name, max: 1024) - try self.validate(self.s3ObjectKey, name: "s3ObjectKey", parent: name, min: 1) - try self.validate(self.s3ObjectKey, name: "s3ObjectKey", parent: name, pattern: "^[\\.\\-\\!\\*\\_\\'\\(\\)a-zA-Z0-9][\\.\\-\\!\\*\\_\\'\\(\\)\\/a-zA-Z0-9]*$") + try self.crawlerConfiguration?.validate(name: "\(name).crawlerConfiguration") + try self.sourceConfiguration.validate(name: "\(name).sourceConfiguration") } private enum CodingKeys: String, CodingKey { - case s3BucketName = "s3BucketName" - case s3ObjectKey = "s3ObjectKey" + case crawlerConfiguration = "crawlerConfiguration" + case sourceConfiguration = "sourceConfiguration" } } - public struct ServerSideEncryptionConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the KMS key used to encrypt the resource. - public let kmsKeyArn: String? - - public init(kmsKeyArn: String? = nil) { - self.kmsKeyArn = kmsKeyArn + public struct SharePointSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The supported authentication type to authenticate and connect to your SharePoint site/sites. + public let authType: SharePointAuthType + /// The Amazon Resource Name of an Secrets Manager secret that stores your authentication credentials for your SharePoint site/sites. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see SharePoint connection configuration. + public let credentialsSecretArn: String + /// The domain of your SharePoint instance or site URL/URLs. + public let domain: String + /// The supported host type, whether online/cloud or server/on-premises. + public let hostType: SharePointHostType + /// A list of one or more SharePoint site URLs. + public let siteUrls: [String] + /// The identifier of your Microsoft 365 tenant. + public let tenantId: String? + + public init(authType: SharePointAuthType, credentialsSecretArn: String, domain: String, hostType: SharePointHostType, siteUrls: [String], tenantId: String? = nil) { + self.authType = authType + self.credentialsSecretArn = credentialsSecretArn + self.domain = domain + self.hostType = hostType + self.siteUrls = siteUrls + self.tenantId = tenantId } public func validate(name: String) throws { - try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, max: 2048) - try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, min: 1) - try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") + try self.validate(self.credentialsSecretArn, name: "credentialsSecretArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):secretsmanager:[a-z0-9-]{1,20}:([0-9]{12}|):secret:[a-zA-Z0-9!/_+=.@-]{1,512}$") + try self.validate(self.domain, name: "domain", parent: name, max: 50) + try self.validate(self.domain, name: "domain", parent: name, min: 1) + try self.siteUrls.forEach { + try validate($0, name: "siteUrls[]", parent: name, pattern: "^https://[A-Za-z0-9][^\\s]*$") + } + try self.validate(self.siteUrls, name: "siteUrls", parent: name, max: 100) + try self.validate(self.siteUrls, name: "siteUrls", parent: name, min: 1) + try self.validate(self.tenantId, name: "tenantId", parent: name, max: 36) + try self.validate(self.tenantId, name: "tenantId", parent: name, min: 36) + try self.validate(self.tenantId, name: "tenantId", parent: name, pattern: "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") } private enum CodingKeys: String, CodingKey { - case kmsKeyArn = "kmsKeyArn" + case authType = "authType" + case credentialsSecretArn = "credentialsSecretArn" + case domain = "domain" + case hostType = "hostType" + case siteUrls = "siteUrls" + case tenantId = "tenantId" } } @@ -3550,7 +6432,7 @@ extension BedrockAgent { public func validate(name: String) throws { try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) - try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$") + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$") try self.validate(self.dataSourceId, name: "dataSourceId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") try self.validate(self.description, name: "description", parent: name, max: 200) try self.validate(self.description, name: "description", parent: name, min: 1) @@ -3617,6 +6499,42 @@ extension BedrockAgent { } } + public struct StorageFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Contains configurations for the service to use for storing the input into the node. + public let serviceConfiguration: StorageFlowNodeServiceConfiguration + + public init(serviceConfiguration: StorageFlowNodeServiceConfiguration) { + self.serviceConfiguration = serviceConfiguration + } + + public func validate(name: String) throws { + try self.serviceConfiguration.validate(name: "\(name).serviceConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case serviceConfiguration = "serviceConfiguration" + } + } + + public struct StorageFlowNodeS3Configuration: AWSEncodableShape & AWSDecodableShape { + /// The name of the Amazon S3 bucket in which to store the input into the node. + public let bucketName: String + + public init(bucketName: String) { + self.bucketName = bucketName + } + + public func validate(name: String) throws { + try self.validate(self.bucketName, name: "bucketName", parent: name, max: 63) + try self.validate(self.bucketName, name: "bucketName", parent: name, min: 3) + try self.validate(self.bucketName, name: "bucketName", parent: name, pattern: "^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case bucketName = "bucketName" + } + } + public struct TagResourceRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the resource to tag. public let resourceArn: String @@ -3638,7 +6556,7 @@ extension BedrockAgent { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "(^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:(agent|agent-alias|knowledge-base)/[A-Z0-9]{10}(?:/[A-Z0-9]{10})?$)") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "(^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:(agent|agent-alias|knowledge-base|flow|prompt)/[A-Z0-9]{10}(?:/[A-Z0-9]{10})?$|^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:flow/([A-Z0-9]{10})/alias/([A-Z0-9]{10})$|^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:prompt/([A-Z0-9]{10})?(?::/d+)?$)") try self.tags.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -3657,6 +6575,88 @@ extension BedrockAgent { public init() {} } + public struct TextPromptTemplateConfiguration: AWSEncodableShape & AWSDecodableShape { + /// An array of the variables in the prompt template. + public let inputVariables: [PromptInputVariable]? + /// The message for the prompt. + public let text: String + + public init(inputVariables: [PromptInputVariable]? = nil, text: String) { + self.inputVariables = inputVariables + self.text = text + } + + public func validate(name: String) throws { + try self.inputVariables?.forEach { + try $0.validate(name: "\(name).inputVariables[]") + } + try self.validate(self.inputVariables, name: "inputVariables", parent: name, max: 5) + try self.validate(self.text, name: "text", parent: name, max: 200000) + try self.validate(self.text, name: "text", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case inputVariables = "inputVariables" + case text = "text" + } + } + + public struct Transformation: AWSEncodableShape & AWSDecodableShape { + /// When the service applies the transformation. + public let stepToApply: StepType + /// A Lambda function that processes documents. + public let transformationFunction: TransformationFunction + + public init(stepToApply: StepType, transformationFunction: TransformationFunction) { + self.stepToApply = stepToApply + self.transformationFunction = transformationFunction + } + + public func validate(name: String) throws { + try self.transformationFunction.validate(name: "\(name).transformationFunction") + } + + private enum CodingKeys: String, CodingKey { + case stepToApply = "stepToApply" + case transformationFunction = "transformationFunction" + } + } + + public struct TransformationFunction: AWSEncodableShape & AWSDecodableShape { + /// The Lambda function. + public let transformationLambdaConfiguration: TransformationLambdaConfiguration + + public init(transformationLambdaConfiguration: TransformationLambdaConfiguration) { + self.transformationLambdaConfiguration = transformationLambdaConfiguration + } + + public func validate(name: String) throws { + try self.transformationLambdaConfiguration.validate(name: "\(name).transformationLambdaConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case transformationLambdaConfiguration = "transformationLambdaConfiguration" + } + } + + public struct TransformationLambdaConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The function's ARN identifier. + public let lambdaArn: String + + public init(lambdaArn: String) { + self.lambdaArn = lambdaArn + } + + public func validate(name: String) throws { + try self.validate(self.lambdaArn, name: "lambdaArn", parent: name, max: 2048) + try self.validate(self.lambdaArn, name: "lambdaArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$") + } + + private enum CodingKeys: String, CodingKey { + case lambdaArn = "lambdaArn" + } + } + public struct UntagResourceRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the resource from which to remove tags. public let resourceArn: String @@ -3678,7 +6678,7 @@ extension BedrockAgent { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "(^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:(agent|agent-alias|knowledge-base)/[A-Z0-9]{10}(?:/[A-Z0-9]{10})?$)") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "(^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:(agent|agent-alias|knowledge-base|flow|prompt)/[A-Z0-9]{10}(?:/[A-Z0-9]{10})?$|^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:flow/([A-Z0-9]{10})/alias/([A-Z0-9]{10})$|^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:prompt/([A-Z0-9]{10})?(?::/d+)?$)") try self.tagKeys.forEach { try validate($0, name: "tagKeys[]", parent: name, max: 128) try validate($0, name: "tagKeys[]", parent: name, min: 1) @@ -3924,10 +6924,12 @@ extension BedrockAgent { public let idleSessionTTLInSeconds: Int? /// Specifies new instructions that tell the agent what it should do and how it should interact with users. public let instruction: String? + /// Specifies the new memory configuration for the agent. + public let memoryConfiguration: MemoryConfiguration? /// Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts. public let promptOverrideConfiguration: PromptOverrideConfiguration? - public init(agentId: String, agentName: String, agentResourceRoleArn: String, customerEncryptionKeyArn: String? = nil, description: String? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int? = nil, instruction: String? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil) { + public init(agentId: String, agentName: String, agentResourceRoleArn: String, customerEncryptionKeyArn: String? = nil, description: String? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int? = nil, instruction: String? = nil, memoryConfiguration: MemoryConfiguration? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil) { self.agentId = agentId self.agentName = agentName self.agentResourceRoleArn = agentResourceRoleArn @@ -3937,6 +6939,7 @@ extension BedrockAgent { self.guardrailConfiguration = guardrailConfiguration self.idleSessionTTLInSeconds = idleSessionTTLInSeconds self.instruction = instruction + self.memoryConfiguration = memoryConfiguration self.promptOverrideConfiguration = promptOverrideConfiguration } @@ -3952,6 +6955,7 @@ extension BedrockAgent { try container.encodeIfPresent(self.guardrailConfiguration, forKey: .guardrailConfiguration) try container.encodeIfPresent(self.idleSessionTTLInSeconds, forKey: .idleSessionTTLInSeconds) try container.encodeIfPresent(self.instruction, forKey: .instruction) + try container.encodeIfPresent(self.memoryConfiguration, forKey: .memoryConfiguration) try container.encodeIfPresent(self.promptOverrideConfiguration, forKey: .promptOverrideConfiguration) } @@ -3973,6 +6977,7 @@ extension BedrockAgent { try self.validate(self.idleSessionTTLInSeconds, name: "idleSessionTTLInSeconds", parent: name, min: 60) try self.validate(self.instruction, name: "instruction", parent: name, max: 4000) try self.validate(self.instruction, name: "instruction", parent: name, min: 40) + try self.memoryConfiguration?.validate(name: "\(name).memoryConfiguration") try self.promptOverrideConfiguration?.validate(name: "\(name).promptOverrideConfiguration") } @@ -3981,99 +6986,305 @@ extension BedrockAgent { case agentResourceRoleArn = "agentResourceRoleArn" case customerEncryptionKeyArn = "customerEncryptionKeyArn" case description = "description" - case foundationModel = "foundationModel" - case guardrailConfiguration = "guardrailConfiguration" - case idleSessionTTLInSeconds = "idleSessionTTLInSeconds" - case instruction = "instruction" - case promptOverrideConfiguration = "promptOverrideConfiguration" + case foundationModel = "foundationModel" + case guardrailConfiguration = "guardrailConfiguration" + case idleSessionTTLInSeconds = "idleSessionTTLInSeconds" + case instruction = "instruction" + case memoryConfiguration = "memoryConfiguration" + case promptOverrideConfiguration = "promptOverrideConfiguration" + } + } + + public struct UpdateAgentResponse: AWSDecodableShape { + /// Contains details about the agent that was updated. + public let agent: Agent + + public init(agent: Agent) { + self.agent = agent + } + + private enum CodingKeys: String, CodingKey { + case agent = "agent" + } + } + + public struct UpdateDataSourceRequest: AWSEncodableShape { + /// The data deletion policy for the data source that you want to update. + public let dataDeletionPolicy: DataDeletionPolicy? + /// The connection configuration for the data source that you want to update. + public let dataSourceConfiguration: DataSourceConfiguration + /// The unique identifier of the data source. + public let dataSourceId: String + /// Specifies a new description for the data source. + public let description: String? + /// The unique identifier of the knowledge base for the data source. + public let knowledgeBaseId: String + /// Specifies a new name for the data source. + public let name: String + /// Contains details about server-side encryption of the data source. + public let serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? + /// Contains details about how to ingest the documents in the data source. + public let vectorIngestionConfiguration: VectorIngestionConfiguration? + + public init(dataDeletionPolicy: DataDeletionPolicy? = nil, dataSourceConfiguration: DataSourceConfiguration, dataSourceId: String, description: String? = nil, knowledgeBaseId: String, name: String, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, vectorIngestionConfiguration: VectorIngestionConfiguration? = nil) { + self.dataDeletionPolicy = dataDeletionPolicy + self.dataSourceConfiguration = dataSourceConfiguration + self.dataSourceId = dataSourceId + self.description = description + self.knowledgeBaseId = knowledgeBaseId + self.name = name + self.serverSideEncryptionConfiguration = serverSideEncryptionConfiguration + self.vectorIngestionConfiguration = vectorIngestionConfiguration + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.dataDeletionPolicy, forKey: .dataDeletionPolicy) + try container.encode(self.dataSourceConfiguration, forKey: .dataSourceConfiguration) + request.encodePath(self.dataSourceId, key: "dataSourceId") + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.knowledgeBaseId, key: "knowledgeBaseId") + try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.serverSideEncryptionConfiguration, forKey: .serverSideEncryptionConfiguration) + try container.encodeIfPresent(self.vectorIngestionConfiguration, forKey: .vectorIngestionConfiguration) + } + + public func validate(name: String) throws { + try self.dataSourceConfiguration.validate(name: "\(name).dataSourceConfiguration") + try self.validate(self.dataSourceId, name: "dataSourceId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.description, name: "description", parent: name, max: 200) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") + try self.serverSideEncryptionConfiguration?.validate(name: "\(name).serverSideEncryptionConfiguration") + try self.vectorIngestionConfiguration?.validate(name: "\(name).vectorIngestionConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case dataDeletionPolicy = "dataDeletionPolicy" + case dataSourceConfiguration = "dataSourceConfiguration" + case description = "description" + case name = "name" + case serverSideEncryptionConfiguration = "serverSideEncryptionConfiguration" + case vectorIngestionConfiguration = "vectorIngestionConfiguration" + } + } + + public struct UpdateDataSourceResponse: AWSDecodableShape { + /// Contains details about the data source. + public let dataSource: DataSource + + public init(dataSource: DataSource) { + self.dataSource = dataSource + } + + private enum CodingKeys: String, CodingKey { + case dataSource = "dataSource" + } + } + + public struct UpdateFlowAliasRequest: AWSEncodableShape { + /// The unique identifier of the alias. + public let aliasIdentifier: String + /// A description for the flow alias. + public let description: String? + /// The unique identifier of the flow. + public let flowIdentifier: String + /// The name of the flow alias. + public let name: String + /// Contains information about the version to which to map the alias. + public let routingConfiguration: [FlowAliasRoutingConfigurationListItem] + + public init(aliasIdentifier: String, description: String? = nil, flowIdentifier: String, name: String, routingConfiguration: [FlowAliasRoutingConfigurationListItem]) { + self.aliasIdentifier = aliasIdentifier + self.description = description + self.flowIdentifier = flowIdentifier + self.name = name + self.routingConfiguration = routingConfiguration + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aliasIdentifier, key: "aliasIdentifier") + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + try container.encode(self.name, forKey: .name) + try container.encode(self.routingConfiguration, forKey: .routingConfiguration) + } + + public func validate(name: String) throws { + try self.validate(self.aliasIdentifier, name: "aliasIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/[0-9a-zA-Z]{10})|(TSTALIASID|[0-9a-zA-Z]{10})$") + try self.validate(self.description, name: "description", parent: name, max: 200) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") + try self.routingConfiguration.forEach { + try $0.validate(name: "\(name).routingConfiguration[]") + } + try self.validate(self.routingConfiguration, name: "routingConfiguration", parent: name, max: 1) + try self.validate(self.routingConfiguration, name: "routingConfiguration", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case name = "name" + case routingConfiguration = "routingConfiguration" } } - public struct UpdateAgentResponse: AWSDecodableShape { - /// Contains details about the agent that was updated. - public let agent: Agent + public struct UpdateFlowAliasResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the flow. + public let arn: String + /// The time at which the flow was created. + @CustomCoding + public var createdAt: Date + /// The description of the flow. + public let description: String? + /// The unique identifier of the flow. + public let flowId: String + /// The unique identifier of the alias. + public let id: String + /// The name of the flow alias. + public let name: String + /// Contains information about the version that the alias is mapped to. + public let routingConfiguration: [FlowAliasRoutingConfigurationListItem] + /// The time at which the flow alias was last updated. + @CustomCoding + public var updatedAt: Date - public init(agent: Agent) { - self.agent = agent + public init(arn: String, createdAt: Date, description: String? = nil, flowId: String, id: String, name: String, routingConfiguration: [FlowAliasRoutingConfigurationListItem], updatedAt: Date) { + self.arn = arn + self.createdAt = createdAt + self.description = description + self.flowId = flowId + self.id = id + self.name = name + self.routingConfiguration = routingConfiguration + self.updatedAt = updatedAt } private enum CodingKeys: String, CodingKey { - case agent = "agent" + case arn = "arn" + case createdAt = "createdAt" + case description = "description" + case flowId = "flowId" + case id = "id" + case name = "name" + case routingConfiguration = "routingConfiguration" + case updatedAt = "updatedAt" } } - public struct UpdateDataSourceRequest: AWSEncodableShape { - /// The data deletion policy of the updated data source. - public let dataDeletionPolicy: DataDeletionPolicy? - /// Contains details about the storage configuration of the data source. - public let dataSourceConfiguration: DataSourceConfiguration - /// The unique identifier of the data source. - public let dataSourceId: String - /// Specifies a new description for the data source. + public struct UpdateFlowRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the KMS key to encrypt the flow. + public let customerEncryptionKeyArn: String? + /// A definition of the nodes and the connections between the nodes in the flow. + public let definition: FlowDefinition? + /// A description for the flow. public let description: String? - /// The unique identifier of the knowledge base to which the data source belongs. - public let knowledgeBaseId: String - /// Specifies a new name for the data source. + /// The Amazon Resource Name (ARN) of the service role with permissions to create and manage a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide. + public let executionRoleArn: String + /// The unique identifier of the flow. + public let flowIdentifier: String + /// A name for the flow. public let name: String - /// Contains details about server-side encryption of the data source. - public let serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? - /// Contains details about how to ingest the documents in the data source. - public let vectorIngestionConfiguration: VectorIngestionConfiguration? - public init(dataDeletionPolicy: DataDeletionPolicy? = nil, dataSourceConfiguration: DataSourceConfiguration, dataSourceId: String, description: String? = nil, knowledgeBaseId: String, name: String, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, vectorIngestionConfiguration: VectorIngestionConfiguration? = nil) { - self.dataDeletionPolicy = dataDeletionPolicy - self.dataSourceConfiguration = dataSourceConfiguration - self.dataSourceId = dataSourceId + public init(customerEncryptionKeyArn: String? = nil, definition: FlowDefinition? = nil, description: String? = nil, executionRoleArn: String, flowIdentifier: String, name: String) { + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.definition = definition self.description = description - self.knowledgeBaseId = knowledgeBaseId + self.executionRoleArn = executionRoleArn + self.flowIdentifier = flowIdentifier self.name = name - self.serverSideEncryptionConfiguration = serverSideEncryptionConfiguration - self.vectorIngestionConfiguration = vectorIngestionConfiguration } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) - try container.encodeIfPresent(self.dataDeletionPolicy, forKey: .dataDeletionPolicy) - try container.encode(self.dataSourceConfiguration, forKey: .dataSourceConfiguration) - request.encodePath(self.dataSourceId, key: "dataSourceId") + try container.encodeIfPresent(self.customerEncryptionKeyArn, forKey: .customerEncryptionKeyArn) + try container.encodeIfPresent(self.definition, forKey: .definition) try container.encodeIfPresent(self.description, forKey: .description) - request.encodePath(self.knowledgeBaseId, key: "knowledgeBaseId") + try container.encode(self.executionRoleArn, forKey: .executionRoleArn) + request.encodePath(self.flowIdentifier, key: "flowIdentifier") try container.encode(self.name, forKey: .name) - try container.encodeIfPresent(self.serverSideEncryptionConfiguration, forKey: .serverSideEncryptionConfiguration) - try container.encodeIfPresent(self.vectorIngestionConfiguration, forKey: .vectorIngestionConfiguration) } public func validate(name: String) throws { - try self.dataSourceConfiguration.validate(name: "\(name).dataSourceConfiguration") - try self.validate(self.dataSourceId, name: "dataSourceId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, max: 2048) + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, min: 1) + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") + try self.definition?.validate(name: "\(name).definition") try self.validate(self.description, name: "description", parent: name, max: 200) try self.validate(self.description, name: "description", parent: name, min: 1) - try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, max: 2048) + try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, pattern: "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/(service-role/)?.+$") + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") - try self.serverSideEncryptionConfiguration?.validate(name: "\(name).serverSideEncryptionConfiguration") } private enum CodingKeys: String, CodingKey { - case dataDeletionPolicy = "dataDeletionPolicy" - case dataSourceConfiguration = "dataSourceConfiguration" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case definition = "definition" case description = "description" + case executionRoleArn = "executionRoleArn" case name = "name" - case serverSideEncryptionConfiguration = "serverSideEncryptionConfiguration" - case vectorIngestionConfiguration = "vectorIngestionConfiguration" } } - public struct UpdateDataSourceResponse: AWSDecodableShape { - /// Contains details about the data source. - public let dataSource: DataSource + public struct UpdateFlowResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the flow. + public let arn: String + /// The time at which the flow was created. + @CustomCoding + public var createdAt: Date + /// The Amazon Resource Name (ARN) of the KMS key that the flow was encrypted with. + public let customerEncryptionKeyArn: String? + /// A definition of the nodes and the connections between nodes in the flow. + public let definition: FlowDefinition? + /// The description of the flow. + public let description: String? + /// The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide. + public let executionRoleArn: String + /// The unique identifier of the flow. + public let id: String + /// The name of the flow. + public let name: String + /// The status of the flow. When you submit this request, the status will be NotPrepared. If updating fails, the status becomes Failed. + public let status: FlowStatus + /// The time at which the flow was last updated. + @CustomCoding + public var updatedAt: Date + /// The version of the flow. When you update a flow, the version updated is the DRAFT version. + public let version: String - public init(dataSource: DataSource) { - self.dataSource = dataSource + public init(arn: String, createdAt: Date, customerEncryptionKeyArn: String? = nil, definition: FlowDefinition? = nil, description: String? = nil, executionRoleArn: String, id: String, name: String, status: FlowStatus, updatedAt: Date, version: String) { + self.arn = arn + self.createdAt = createdAt + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.definition = definition + self.description = description + self.executionRoleArn = executionRoleArn + self.id = id + self.name = name + self.status = status + self.updatedAt = updatedAt + self.version = version } private enum CodingKeys: String, CodingKey { - case dataSource = "dataSource" + case arn = "arn" + case createdAt = "createdAt" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case definition = "definition" + case description = "description" + case executionRoleArn = "executionRoleArn" + case id = "id" + case name = "name" + case status = "status" + case updatedAt = "updatedAt" + case version = "version" } } @@ -4144,16 +7355,160 @@ extension BedrockAgent { } } + public struct UpdatePromptRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the KMS key to encrypt the prompt. + public let customerEncryptionKeyArn: String? + /// The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object. + public let defaultVariant: String? + /// A description for the prompt. + public let description: String? + /// A name for the prompt. + public let name: String + /// The unique identifier of the prompt. + public let promptIdentifier: String + /// A list of objects, each containing details about a variant of the prompt. + public let variants: [PromptVariant]? + + public init(customerEncryptionKeyArn: String? = nil, defaultVariant: String? = nil, description: String? = nil, name: String, promptIdentifier: String, variants: [PromptVariant]? = nil) { + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.defaultVariant = defaultVariant + self.description = description + self.name = name + self.promptIdentifier = promptIdentifier + self.variants = variants + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.customerEncryptionKeyArn, forKey: .customerEncryptionKeyArn) + try container.encodeIfPresent(self.defaultVariant, forKey: .defaultVariant) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encode(self.name, forKey: .name) + request.encodePath(self.promptIdentifier, key: "promptIdentifier") + try container.encodeIfPresent(self.variants, forKey: .variants) + } + + public func validate(name: String) throws { + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, max: 2048) + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, min: 1) + try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") + try self.validate(self.defaultVariant, name: "defaultVariant", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") + try self.validate(self.description, name: "description", parent: name, max: 200) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") + try self.validate(self.promptIdentifier, name: "promptIdentifier", parent: name, pattern: "^([0-9a-zA-Z]{10})|(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10})(?::[0-9]{1,5})?$") + try self.variants?.forEach { + try $0.validate(name: "\(name).variants[]") + } + try self.validate(self.variants, name: "variants", parent: name, max: 3) + } + + private enum CodingKeys: String, CodingKey { + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case defaultVariant = "defaultVariant" + case description = "description" + case name = "name" + case variants = "variants" + } + } + + public struct UpdatePromptResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the prompt. + public let arn: String + /// The time at which the prompt was created. + @CustomCoding + public var createdAt: Date + /// The Amazon Resource Name (ARN) of the KMS key to encrypt the prompt. + public let customerEncryptionKeyArn: String? + /// The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object. + public let defaultVariant: String? + /// The description of the prompt. + public let description: String? + /// The unique identifier of the prompt. + public let id: String + /// The name of the prompt. + public let name: String + /// The time at which the prompt was last updated. + @CustomCoding + public var updatedAt: Date + /// A list of objects, each containing details about a variant of the prompt. + public let variants: [PromptVariant]? + /// The version of the prompt. When you update a prompt, the version updated is the DRAFT version. + public let version: String + + public init(arn: String, createdAt: Date, customerEncryptionKeyArn: String? = nil, defaultVariant: String? = nil, description: String? = nil, id: String, name: String, updatedAt: Date, variants: [PromptVariant]? = nil, version: String) { + self.arn = arn + self.createdAt = createdAt + self.customerEncryptionKeyArn = customerEncryptionKeyArn + self.defaultVariant = defaultVariant + self.description = description + self.id = id + self.name = name + self.updatedAt = updatedAt + self.variants = variants + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case customerEncryptionKeyArn = "customerEncryptionKeyArn" + case defaultVariant = "defaultVariant" + case description = "description" + case id = "id" + case name = "name" + case updatedAt = "updatedAt" + case variants = "variants" + case version = "version" + } + } + + public struct UrlConfiguration: AWSEncodableShape & AWSDecodableShape { + /// One or more seed or starting point URLs. + public let seedUrls: [SeedUrl]? + + public init(seedUrls: [SeedUrl]? = nil) { + self.seedUrls = seedUrls + } + + public func validate(name: String) throws { + try self.seedUrls?.forEach { + try $0.validate(name: "\(name).seedUrls[]") + } + try self.validate(self.seedUrls, name: "seedUrls", parent: name, max: 100) + try self.validate(self.seedUrls, name: "seedUrls", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case seedUrls = "seedUrls" + } + } + public struct VectorIngestionConfiguration: AWSEncodableShape & AWSDecodableShape { /// Details about how to chunk the documents in the data source. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. public let chunkingConfiguration: ChunkingConfiguration? + /// A custom document transformer for parsed data source documents. + public let customTransformationConfiguration: CustomTransformationConfiguration? + /// A custom parser for data source documents. + public let parsingConfiguration: ParsingConfiguration? - public init(chunkingConfiguration: ChunkingConfiguration? = nil) { + public init(chunkingConfiguration: ChunkingConfiguration? = nil, customTransformationConfiguration: CustomTransformationConfiguration? = nil, parsingConfiguration: ParsingConfiguration? = nil) { self.chunkingConfiguration = chunkingConfiguration + self.customTransformationConfiguration = customTransformationConfiguration + self.parsingConfiguration = parsingConfiguration + } + + public func validate(name: String) throws { + try self.chunkingConfiguration?.validate(name: "\(name).chunkingConfiguration") + try self.customTransformationConfiguration?.validate(name: "\(name).customTransformationConfiguration") + try self.parsingConfiguration?.validate(name: "\(name).parsingConfiguration") } private enum CodingKeys: String, CodingKey { case chunkingConfiguration = "chunkingConfiguration" + case customTransformationConfiguration = "customTransformationConfiguration" + case parsingConfiguration = "parsingConfiguration" } } @@ -4181,6 +7536,98 @@ extension BedrockAgent { } } + public struct WebCrawlerConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration of crawl limits for the web URLs. + public let crawlerLimits: WebCrawlerLimits? + /// A list of one or more exclusion regular expression patterns to exclude certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn’t crawled. + public let exclusionFilters: [String]? + /// A list of one or more inclusion regular expression patterns to include certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn’t crawled. + public let inclusionFilters: [String]? + /// The scope of what is crawled for your URLs. You can choose to crawl only web pages that belong to the same host or primary domain. For example, only web pages that contain the seed URL "https://docs.aws.amazon.com/bedrock/latest/userguide/" and no other domains. You can choose to include sub domains in addition to the host or primary domain. For example, web pages that contain "aws.amazon.com" can also include sub domain "docs.aws.amazon.com". + public let scope: WebScopeType? + + public init(crawlerLimits: WebCrawlerLimits? = nil, exclusionFilters: [String]? = nil, inclusionFilters: [String]? = nil, scope: WebScopeType? = nil) { + self.crawlerLimits = crawlerLimits + self.exclusionFilters = exclusionFilters + self.inclusionFilters = inclusionFilters + self.scope = scope + } + + public func validate(name: String) throws { + try self.exclusionFilters?.forEach { + try validate($0, name: "exclusionFilters[]", parent: name, max: 1000) + try validate($0, name: "exclusionFilters[]", parent: name, min: 1) + } + try self.validate(self.exclusionFilters, name: "exclusionFilters", parent: name, max: 25) + try self.validate(self.exclusionFilters, name: "exclusionFilters", parent: name, min: 1) + try self.inclusionFilters?.forEach { + try validate($0, name: "inclusionFilters[]", parent: name, max: 1000) + try validate($0, name: "inclusionFilters[]", parent: name, min: 1) + } + try self.validate(self.inclusionFilters, name: "inclusionFilters", parent: name, max: 25) + try self.validate(self.inclusionFilters, name: "inclusionFilters", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case crawlerLimits = "crawlerLimits" + case exclusionFilters = "exclusionFilters" + case inclusionFilters = "inclusionFilters" + case scope = "scope" + } + } + + public struct WebCrawlerLimits: AWSEncodableShape & AWSDecodableShape { + /// The max rate at which pages are crawled, up to 300 per minute per host. + public let rateLimit: Int? + + public init(rateLimit: Int? = nil) { + self.rateLimit = rateLimit + } + + private enum CodingKeys: String, CodingKey { + case rateLimit = "rateLimit" + } + } + + public struct WebDataSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The Web Crawler configuration details for the web data source. + public let crawlerConfiguration: WebCrawlerConfiguration? + /// The source configuration details for the web data source. + public let sourceConfiguration: WebSourceConfiguration + + public init(crawlerConfiguration: WebCrawlerConfiguration? = nil, sourceConfiguration: WebSourceConfiguration) { + self.crawlerConfiguration = crawlerConfiguration + self.sourceConfiguration = sourceConfiguration + } + + public func validate(name: String) throws { + try self.crawlerConfiguration?.validate(name: "\(name).crawlerConfiguration") + try self.sourceConfiguration.validate(name: "\(name).sourceConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case crawlerConfiguration = "crawlerConfiguration" + case sourceConfiguration = "sourceConfiguration" + } + } + + public struct WebSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration of the URL/URLs. + public let urlConfiguration: UrlConfiguration + + public init(urlConfiguration: UrlConfiguration) { + self.urlConfiguration = urlConfiguration + } + + public func validate(name: String) throws { + try self.urlConfiguration.validate(name: "\(name).urlConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case urlConfiguration = "urlConfiguration" + } + } + public struct FunctionSchema: AWSEncodableShape & AWSDecodableShape { /// A list of functions that each define an action in the action group. public let functions: [Function]? @@ -4199,6 +7646,74 @@ extension BedrockAgent { case functions = "functions" } } + + public struct PromptInferenceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Contains inference configurations for a text prompt. + public let text: PromptModelInferenceConfiguration? + + public init(text: PromptModelInferenceConfiguration? = nil) { + self.text = text + } + + public func validate(name: String) throws { + try self.text?.validate(name: "\(name).text") + } + + private enum CodingKeys: String, CodingKey { + case text = "text" + } + } + + public struct PromptTemplateConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Contains configurations for the text in a message for a prompt. + public let text: TextPromptTemplateConfiguration? + + public init(text: TextPromptTemplateConfiguration? = nil) { + self.text = text + } + + public func validate(name: String) throws { + try self.text?.validate(name: "\(name).text") + } + + private enum CodingKeys: String, CodingKey { + case text = "text" + } + } + + public struct RetrievalFlowNodeServiceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Contains configurations for the Amazon S3 location from which to retrieve data to return as the output from the node. + public let s3: RetrievalFlowNodeS3Configuration? + + public init(s3: RetrievalFlowNodeS3Configuration? = nil) { + self.s3 = s3 + } + + public func validate(name: String) throws { + try self.s3?.validate(name: "\(name).s3") + } + + private enum CodingKeys: String, CodingKey { + case s3 = "s3" + } + } + + public struct StorageFlowNodeServiceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Contains configurations for the Amazon S3 location in which to store the input into the node. + public let s3: StorageFlowNodeS3Configuration? + + public init(s3: StorageFlowNodeS3Configuration? = nil) { + self.s3 = s3 + } + + public func validate(name: String) throws { + try self.s3?.validate(name: "\(name).s3") + } + + private enum CodingKeys: String, CodingKey { + case s3 = "s3" + } + } } // MARK: - Errors diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift index c49a563459..1af7eda121 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift @@ -74,7 +74,33 @@ public struct BedrockAgentRuntime: AWSService { // MARK: API Calls - /// The CLI doesn't support InvokeAgent. Sends a prompt for the agent to process and respond to. Note the following fields for the request: To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group. The response is returned in the bytes field of the chunk object. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field. Errors are also surfaced in the response. + /// Deletes memory from the specified memory identifier. + @Sendable + public func deleteAgentMemory(_ input: DeleteAgentMemoryRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAgentMemoryResponse { + return try await self.client.execute( + operation: "DeleteAgentMemory", + path: "/agents/{agentId}/agentAliases/{agentAliasId}/memories", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Gets the sessions stored in the memory of the agent. + @Sendable + public func getAgentMemory(_ input: GetAgentMemoryRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAgentMemoryResponse { + return try await self.client.execute( + operation: "GetAgentMemory", + path: "/agents/{agentId}/agentAliases/{agentAliasId}/memories", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent. Sends a prompt for the agent to process and respond to. Note the following fields for the request: To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group. The response is returned in the bytes field of the chunk object. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field. Errors are also surfaced in the response. @Sendable public func invokeAgent(_ input: InvokeAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InvokeAgentResponse { return try await self.client.execute( @@ -87,6 +113,19 @@ public struct BedrockAgentRuntime: AWSService { ) } + /// Invokes an alias of a flow to run the inputs that you specify and return the output of each node as a stream. If there's an error, the error is returned. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + @Sendable + public func invokeFlow(_ input: InvokeFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InvokeFlowResponse { + return try await self.client.execute( + operation: "InvokeFlow", + path: "/flows/{flowIdentifier}/aliases/{flowAliasIdentifier}", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Queries a knowledge base and retrieves information from it. @Sendable public func retrieve(_ input: RetrieveRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RetrieveResponse { @@ -127,6 +166,25 @@ extension BedrockAgentRuntime { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension BedrockAgentRuntime { + /// Gets the sessions stored in the memory of the agent. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func getAgentMemoryPaginator( + _ input: GetAgentMemoryRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.getAgentMemory, + inputKey: \GetAgentMemoryRequest.nextToken, + outputKey: \GetAgentMemoryResponse.nextToken, + logger: logger + ) + } + /// Queries a knowledge base and retrieves information from it. /// Return PaginatorSequence for operation. /// @@ -147,6 +205,19 @@ extension BedrockAgentRuntime { } } +extension BedrockAgentRuntime.GetAgentMemoryRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> BedrockAgentRuntime.GetAgentMemoryRequest { + return .init( + agentAliasId: self.agentAliasId, + agentId: self.agentId, + maxItems: self.maxItems, + memoryId: self.memoryId, + memoryType: self.memoryType, + nextToken: token + ) + } +} + extension BedrockAgentRuntime.RetrieveRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> BedrockAgentRuntime.RetrieveRequest { return .init( diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift index 522de95866..9d51cae626 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift @@ -32,12 +32,35 @@ extension BedrockAgentRuntime { public var description: String { return self.rawValue } } + public enum ExecutionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case lambda = "LAMBDA" + case returnControl = "RETURN_CONTROL" + public var description: String { return self.rawValue } + } + public enum ExternalSourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case byteContent = "BYTE_CONTENT" case s3 = "S3" public var description: String { return self.rawValue } } + public enum FileSourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case byteContent = "BYTE_CONTENT" + case s3 = "S3" + public var description: String { return self.rawValue } + } + + public enum FileUseCase: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case chat = "CHAT" + case codeInterpreter = "CODE_INTERPRETER" + public var description: String { return self.rawValue } + } + + public enum FlowCompletionReason: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case success = "SUCCESS" + public var description: String { return self.rawValue } + } + public enum GuadrailAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case intervened = "INTERVENED" case none = "NONE" @@ -136,11 +159,28 @@ extension BedrockAgentRuntime { public enum InvocationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case actionGroup = "ACTION_GROUP" + case actionGroupCodeInterpreter = "ACTION_GROUP_CODE_INTERPRETER" case finish = "FINISH" case knowledgeBase = "KNOWLEDGE_BASE" public var description: String { return self.rawValue } } + public enum MemoryType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case sessionSummary = "SESSION_SUMMARY" + public var description: String { return self.rawValue } + } + + public enum NodeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case conditionNode = "ConditionNode" + case flowInputNode = "FlowInputNode" + case flowOutputNode = "FlowOutputNode" + case knowledgeBaseNode = "KnowledgeBaseNode" + case lambdaFunctionNode = "LambdaFunctionNode" + case lexNode = "LexNode" + case promptNode = "PromptNode" + public var description: String { return self.rawValue } + } + public enum PromptType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case knowledgeBaseResponseGeneration = "KNOWLEDGE_BASE_RESPONSE_GENERATION" case orchestration = "ORCHESTRATION" @@ -149,6 +189,11 @@ extension BedrockAgentRuntime { public var description: String { return self.rawValue } } + public enum QueryTransformationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case queryDecomposition = "QUERY_DECOMPOSITION" + public var description: String { return self.rawValue } + } + public enum ResponseState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case failure = "FAILURE" case reprompt = "REPROMPT" @@ -156,7 +201,11 @@ extension BedrockAgentRuntime { } public enum RetrievalResultLocationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case confluence = "CONFLUENCE" case s3 = "S3" + case salesforce = "SALESFORCE" + case sharepoint = "SHAREPOINT" + case web = "WEB" public var description: String { return self.rawValue } } @@ -188,6 +237,91 @@ extension BedrockAgentRuntime { public var description: String { return self.rawValue } } + public enum FlowResponseStream: AWSDecodableShape, Sendable { + /// The request is denied because of missing access permissions. Check your permissions and retry your request. + case accessDeniedException(AccessDeniedException) + /// There was an issue with a dependency due to a server issue. Retry your request. + case badGatewayException(BadGatewayException) + /// There was a conflict performing an operation. Resolve the conflict and retry your request. + case conflictException(ConflictException) + /// There was an issue with a dependency. Check the resource configurations and retry the request. + case dependencyFailedException(DependencyFailedException) + /// Contains information about why the flow completed. + case flowCompletionEvent(FlowCompletionEvent) + /// Contains information about an output from flow invocation. + case flowOutputEvent(FlowOutputEvent) + /// An internal server error occurred. Retry your request. + case internalServerException(InternalServerException) + /// The specified resource Amazon Resource Name (ARN) was not found. Check the Amazon Resource Name (ARN) and try your request again. + case resourceNotFoundException(ResourceNotFoundException) + /// The number of requests exceeds the service quota. Resubmit your request later. + case serviceQuotaExceededException(ServiceQuotaExceededException) + /// The number of requests exceeds the limit. Resubmit your request later. + case throttlingException(ThrottlingException) + /// Input validation failed. Check your request parameters and retry the request. + case validationException(ValidationException) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .accessDeniedException: + let value = try container.decode(AccessDeniedException.self, forKey: .accessDeniedException) + self = .accessDeniedException(value) + case .badGatewayException: + let value = try container.decode(BadGatewayException.self, forKey: .badGatewayException) + self = .badGatewayException(value) + case .conflictException: + let value = try container.decode(ConflictException.self, forKey: .conflictException) + self = .conflictException(value) + case .dependencyFailedException: + let value = try container.decode(DependencyFailedException.self, forKey: .dependencyFailedException) + self = .dependencyFailedException(value) + case .flowCompletionEvent: + let value = try container.decode(FlowCompletionEvent.self, forKey: .flowCompletionEvent) + self = .flowCompletionEvent(value) + case .flowOutputEvent: + let value = try container.decode(FlowOutputEvent.self, forKey: .flowOutputEvent) + self = .flowOutputEvent(value) + case .internalServerException: + let value = try container.decode(InternalServerException.self, forKey: .internalServerException) + self = .internalServerException(value) + case .resourceNotFoundException: + let value = try container.decode(ResourceNotFoundException.self, forKey: .resourceNotFoundException) + self = .resourceNotFoundException(value) + case .serviceQuotaExceededException: + let value = try container.decode(ServiceQuotaExceededException.self, forKey: .serviceQuotaExceededException) + self = .serviceQuotaExceededException(value) + case .throttlingException: + let value = try container.decode(ThrottlingException.self, forKey: .throttlingException) + self = .throttlingException(value) + case .validationException: + let value = try container.decode(ValidationException.self, forKey: .validationException) + self = .validationException(value) + } + } + + private enum CodingKeys: String, CodingKey { + case accessDeniedException = "accessDeniedException" + case badGatewayException = "badGatewayException" + case conflictException = "conflictException" + case dependencyFailedException = "dependencyFailedException" + case flowCompletionEvent = "flowCompletionEvent" + case flowOutputEvent = "flowOutputEvent" + case internalServerException = "internalServerException" + case resourceNotFoundException = "resourceNotFoundException" + case serviceQuotaExceededException = "serviceQuotaExceededException" + case throttlingException = "throttlingException" + case validationException = "validationException" + } + } + public enum InvocationInputMember: AWSDecodableShape, Sendable { /// Contains information about the API operation that the agent predicts should be called. case apiInvocationInput(ApiInvocationInput) @@ -357,6 +491,8 @@ extension BedrockAgentRuntime { case conflictException(ConflictException) /// There was an issue with a dependency. Check the resource configurations and retry the request. case dependencyFailedException(DependencyFailedException) + /// Contains intermediate response for code interpreter if any files have been generated. + case files(FilePart) /// An internal server error occurred. Retry your request. case internalServerException(InternalServerException) /// The specified resource Amazon Resource Name (ARN) was not found. Check the Amazon Resource Name (ARN) and try your request again. @@ -397,6 +533,9 @@ extension BedrockAgentRuntime { case .dependencyFailedException: let value = try container.decode(DependencyFailedException.self, forKey: .dependencyFailedException) self = .dependencyFailedException(value) + case .files: + let value = try container.decode(FilePart.self, forKey: .files) + self = .files(value) case .internalServerException: let value = try container.decode(InternalServerException.self, forKey: .internalServerException) self = .internalServerException(value) @@ -427,6 +566,7 @@ extension BedrockAgentRuntime { case chunk = "chunk" case conflictException = "conflictException" case dependencyFailedException = "dependencyFailedException" + case files = "files" case internalServerException = "internalServerException" case resourceNotFoundException = "resourceNotFoundException" case returnControl = "returnControl" @@ -621,8 +761,12 @@ extension BedrockAgentRuntime { public let actionGroupName: String? /// The path to the API to call, based off the action group. public let apiPath: String? + /// How fulfillment of the action is handled. For more information, see Handling fulfillment of the action. + public let executionType: ExecutionType? /// The function in the action group to call. public let function: String? + /// The unique identifier of the invocation. Only returned if the executionType is RETURN_CONTROL. + public let invocationId: String? /// The parameters in the Lambda input event. public let parameters: [Parameter]? /// The parameters in the request body for the Lambda input event. @@ -630,10 +774,12 @@ extension BedrockAgentRuntime { /// The API method being used, based off the action group. public let verb: String? - public init(actionGroupName: String? = nil, apiPath: String? = nil, function: String? = nil, parameters: [Parameter]? = nil, requestBody: RequestBody? = nil, verb: String? = nil) { + public init(actionGroupName: String? = nil, apiPath: String? = nil, executionType: ExecutionType? = nil, function: String? = nil, invocationId: String? = nil, parameters: [Parameter]? = nil, requestBody: RequestBody? = nil, verb: String? = nil) { self.actionGroupName = actionGroupName self.apiPath = apiPath + self.executionType = executionType self.function = function + self.invocationId = invocationId self.parameters = parameters self.requestBody = requestBody self.verb = verb @@ -642,7 +788,9 @@ extension BedrockAgentRuntime { private enum CodingKeys: String, CodingKey { case actionGroupName = "actionGroupName" case apiPath = "apiPath" + case executionType = "executionType" case function = "function" + case invocationId = "invocationId" case parameters = "parameters" case requestBody = "requestBody" case verb = "verb" @@ -816,6 +964,28 @@ extension BedrockAgentRuntime { } } + public struct ByteContentFile: AWSEncodableShape { + /// The byte value of the file to attach, encoded as Base-64 string. The maximum size of all files that is attached is 10MB. You can attach a maximum of 5 files. + public let data: AWSBase64Data + /// The MIME type of data contained in the file used for chat. + public let mediaType: String + + public init(data: AWSBase64Data, mediaType: String) { + self.data = data + self.mediaType = mediaType + } + + public func validate(name: String) throws { + try self.validate(self.data, name: "data", parent: name, max: 10485760) + try self.validate(self.data, name: "data", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case data = "data" + case mediaType = "mediaType" + } + } + public struct Citation: AWSDecodableShape { /// Contains the generated response and metadata public let generatedResponsePart: GeneratedResponsePart? @@ -833,6 +1003,48 @@ extension BedrockAgentRuntime { } } + public struct CodeInterpreterInvocationInput: AWSDecodableShape { + /// The code for the code interpreter to use. + public let code: String? + /// Files that are uploaded for code interpreter to use. + public let files: [String]? + + public init(code: String? = nil, files: [String]? = nil) { + self.code = code + self.files = files + } + + private enum CodingKeys: String, CodingKey { + case code = "code" + case files = "files" + } + } + + public struct CodeInterpreterInvocationOutput: AWSDecodableShape { + /// Contains the error returned from code execution. + public let executionError: String? + /// Contains the successful output returned from code execution + public let executionOutput: String? + /// Indicates if the execution of the code timed out. + public let executionTimeout: Bool? + /// Contains output files, if generated by code execution. + public let files: [String]? + + public init(executionError: String? = nil, executionOutput: String? = nil, executionTimeout: Bool? = nil, files: [String]? = nil) { + self.executionError = executionError + self.executionOutput = executionOutput + self.executionTimeout = executionTimeout + self.files = files + } + + private enum CodingKeys: String, CodingKey { + case executionError = "executionError" + case executionOutput = "executionOutput" + case executionTimeout = "executionTimeout" + case files = "files" + } + } + public struct ConflictException: AWSDecodableShape { public let message: String? @@ -858,6 +1070,45 @@ extension BedrockAgentRuntime { } } + public struct DeleteAgentMemoryRequest: AWSEncodableShape { + /// The unique identifier of an alias of an agent. + public let agentAliasId: String + /// The unique identifier of the agent to which the alias belongs. + public let agentId: String + /// The unique identifier of the memory. + public let memoryId: String? + + public init(agentAliasId: String, agentId: String, memoryId: String? = nil) { + self.agentAliasId = agentAliasId + self.agentId = agentId + self.memoryId = memoryId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.agentAliasId, key: "agentAliasId") + request.encodePath(self.agentId, key: "agentId") + request.encodeQuery(self.memoryId, key: "memoryId") + } + + public func validate(name: String) throws { + try self.validate(self.agentAliasId, name: "agentAliasId", parent: name, max: 10) + try self.validate(self.agentAliasId, name: "agentAliasId", parent: name, pattern: "^[0-9a-zA-Z]+$") + try self.validate(self.agentId, name: "agentId", parent: name, max: 10) + try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]+$") + try self.validate(self.memoryId, name: "memoryId", parent: name, max: 100) + try self.validate(self.memoryId, name: "memoryId", parent: name, min: 2) + try self.validate(self.memoryId, name: "memoryId", parent: name, pattern: "^[0-9a-zA-Z._:-]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteAgentMemoryResponse: AWSDecodableShape { + public init() {} + } + public struct DependencyFailedException: AWSDecodableShape { public let message: String? /// The name of the dependency that caused the issue, such as Amazon Bedrock, Lambda, or STS. @@ -984,6 +1235,45 @@ extension BedrockAgentRuntime { } } + public struct FilePart: AWSDecodableShape { + /// Files containing intermediate response for the user. + public let files: [OutputFile]? + + public init(files: [OutputFile]? = nil) { + self.files = files + } + + private enum CodingKeys: String, CodingKey { + case files = "files" + } + } + + public struct FileSource: AWSEncodableShape { + /// The data and the text of the attached files. + public let byteContent: ByteContentFile? + /// The s3 location of the files to attach. + public let s3Location: S3ObjectFile? + /// The source type of the files to attach. + public let sourceType: FileSourceType + + public init(byteContent: ByteContentFile? = nil, s3Location: S3ObjectFile? = nil, sourceType: FileSourceType) { + self.byteContent = byteContent + self.s3Location = s3Location + self.sourceType = sourceType + } + + public func validate(name: String) throws { + try self.byteContent?.validate(name: "\(name).byteContent") + try self.s3Location?.validate(name: "\(name).s3Location") + } + + private enum CodingKeys: String, CodingKey { + case byteContent = "byteContent" + case s3Location = "s3Location" + case sourceType = "sourceType" + } + } + public struct FilterAttribute: AWSEncodableShape { /// The name that the metadata attribute must match. public let key: String @@ -1019,6 +1309,66 @@ extension BedrockAgentRuntime { } } + public struct FlowCompletionEvent: AWSDecodableShape { + /// The reason that the flow completed. + public let completionReason: FlowCompletionReason + + public init(completionReason: FlowCompletionReason) { + self.completionReason = completionReason + } + + private enum CodingKeys: String, CodingKey { + case completionReason = "completionReason" + } + } + + public struct FlowInput: AWSEncodableShape { + /// Contains information about an input into the flow. + public let content: FlowInputContent + /// A name for the input of the flow input node. + public let nodeName: String + /// A name for the output of the flow input node. + public let nodeOutputName: String + + public init(content: FlowInputContent, nodeName: String, nodeOutputName: String) { + self.content = content + self.nodeName = nodeName + self.nodeOutputName = nodeOutputName + } + + public func validate(name: String) throws { + try self.validate(self.nodeName, name: "nodeName", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$") + try self.validate(self.nodeOutputName, name: "nodeOutputName", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$") + } + + private enum CodingKeys: String, CodingKey { + case content = "content" + case nodeName = "nodeName" + case nodeOutputName = "nodeOutputName" + } + } + + public struct FlowOutputEvent: AWSDecodableShape { + /// The output of the node. + public let content: FlowOutputContent + /// The name of the node to which input was provided. + public let nodeName: String + /// The type of node to which input was provided. + public let nodeType: NodeType + + public init(content: FlowOutputContent, nodeName: String, nodeType: NodeType) { + self.content = content + self.nodeName = nodeName + self.nodeType = nodeType + } + + private enum CodingKeys: String, CodingKey { + case content = "content" + case nodeName = "nodeName" + case nodeType = "nodeType" + } + } + public struct FunctionInvocationInput: AWSDecodableShape { /// The action group that the function belongs to. public let actionGroup: String @@ -1133,6 +1483,75 @@ extension BedrockAgentRuntime { } } + public struct GetAgentMemoryRequest: AWSEncodableShape { + /// The unique identifier of an alias of an agent. + public let agentAliasId: String + /// The unique identifier of the agent to which the alias belongs. + public let agentId: String + /// The maximum number of items to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. + public let maxItems: Int? + /// The unique identifier of the memory. + public let memoryId: String + /// The type of memory. + public let memoryType: MemoryType + /// If the total number of results is greater than the maxItems value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. + public let nextToken: String? + + public init(agentAliasId: String, agentId: String, maxItems: Int? = nil, memoryId: String, memoryType: MemoryType, nextToken: String? = nil) { + self.agentAliasId = agentAliasId + self.agentId = agentId + self.maxItems = maxItems + self.memoryId = memoryId + self.memoryType = memoryType + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.agentAliasId, key: "agentAliasId") + request.encodePath(self.agentId, key: "agentId") + request.encodeQuery(self.maxItems, key: "maxItems") + request.encodeQuery(self.memoryId, key: "memoryId") + request.encodeQuery(self.memoryType, key: "memoryType") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.agentAliasId, name: "agentAliasId", parent: name, max: 10) + try self.validate(self.agentAliasId, name: "agentAliasId", parent: name, pattern: "^[0-9a-zA-Z]+$") + try self.validate(self.agentId, name: "agentId", parent: name, max: 10) + try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]+$") + try self.validate(self.maxItems, name: "maxItems", parent: name, max: 1000) + try self.validate(self.maxItems, name: "maxItems", parent: name, min: 1) + try self.validate(self.memoryId, name: "memoryId", parent: name, max: 100) + try self.validate(self.memoryId, name: "memoryId", parent: name, min: 2) + try self.validate(self.memoryId, name: "memoryId", parent: name, pattern: "^[0-9a-zA-Z._:-]+$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S*$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetAgentMemoryResponse: AWSDecodableShape { + /// Contains details of the sessions stored in the memory + public let memoryContents: [Memory]? + /// If the total number of results is greater than the maxItems value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. + public let nextToken: String? + + public init(memoryContents: [Memory]? = nil, nextToken: String? = nil) { + self.memoryContents = memoryContents + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case memoryContents = "memoryContents" + case nextToken = "nextToken" + } + } + public struct GuardrailAssessment: AWSDecodableShape { /// Content policy details of the Guardrail. public let contentPolicy: GuardrailContentPolicyAssessment? @@ -1432,6 +1851,31 @@ extension BedrockAgentRuntime { } } + public struct InputFile: AWSEncodableShape { + /// The name of the source file. + public let name: String + /// Specifies where the files are located. + public let source: FileSource + /// Specifies how the source files will be used by the code interpreter. + public let useCase: FileUseCase + + public init(name: String, source: FileSource, useCase: FileUseCase) { + self.name = name + self.source = source + self.useCase = useCase + } + + public func validate(name: String) throws { + try self.source.validate(name: "\(name).source") + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case source = "source" + case useCase = "useCase" + } + } + public struct InternalServerException: AWSDecodableShape { public let message: String? @@ -1447,6 +1891,8 @@ extension BedrockAgentRuntime { public struct InvocationInput: AWSDecodableShape { /// Contains information about the action group to be invoked. public let actionGroupInvocationInput: ActionGroupInvocationInput? + /// Contains information about the code interpreter to be invoked. + public let codeInterpreterInvocationInput: CodeInterpreterInvocationInput? /// Specifies whether the agent is invoking an action group or a knowledge base. public let invocationType: InvocationType? /// Contains details about the knowledge base to look up and the query to be made. @@ -1454,8 +1900,9 @@ extension BedrockAgentRuntime { /// The unique identifier of the trace. public let traceId: String? - public init(actionGroupInvocationInput: ActionGroupInvocationInput? = nil, invocationType: InvocationType? = nil, knowledgeBaseLookupInput: KnowledgeBaseLookupInput? = nil, traceId: String? = nil) { + public init(actionGroupInvocationInput: ActionGroupInvocationInput? = nil, codeInterpreterInvocationInput: CodeInterpreterInvocationInput? = nil, invocationType: InvocationType? = nil, knowledgeBaseLookupInput: KnowledgeBaseLookupInput? = nil, traceId: String? = nil) { self.actionGroupInvocationInput = actionGroupInvocationInput + self.codeInterpreterInvocationInput = codeInterpreterInvocationInput self.invocationType = invocationType self.knowledgeBaseLookupInput = knowledgeBaseLookupInput self.traceId = traceId @@ -1463,6 +1910,7 @@ extension BedrockAgentRuntime { private enum CodingKeys: String, CodingKey { case actionGroupInvocationInput = "actionGroupInvocationInput" + case codeInterpreterInvocationInput = "codeInterpreterInvocationInput" case invocationType = "invocationType" case knowledgeBaseLookupInput = "knowledgeBaseLookupInput" case traceId = "traceId" @@ -1480,17 +1928,20 @@ extension BedrockAgentRuntime { public let endSession: Bool? /// The prompt text to send the agent. If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored. public let inputText: String? + /// The unique identifier of the agent memory. + public let memoryId: String? /// The unique identifier of the session. Use the same value across requests to continue the same conversation. public let sessionId: String /// Contains parameters that specify various attributes of the session. For more information, see Control session context. If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored. public let sessionState: SessionState? - public init(agentAliasId: String, agentId: String, enableTrace: Bool? = nil, endSession: Bool? = nil, inputText: String? = nil, sessionId: String, sessionState: SessionState? = nil) { + public init(agentAliasId: String, agentId: String, enableTrace: Bool? = nil, endSession: Bool? = nil, inputText: String? = nil, memoryId: String? = nil, sessionId: String, sessionState: SessionState? = nil) { self.agentAliasId = agentAliasId self.agentId = agentId self.enableTrace = enableTrace self.endSession = endSession self.inputText = inputText + self.memoryId = memoryId self.sessionId = sessionId self.sessionState = sessionState } @@ -1503,6 +1954,7 @@ extension BedrockAgentRuntime { try container.encodeIfPresent(self.enableTrace, forKey: .enableTrace) try container.encodeIfPresent(self.endSession, forKey: .endSession) try container.encodeIfPresent(self.inputText, forKey: .inputText) + try container.encodeIfPresent(self.memoryId, forKey: .memoryId) request.encodePath(self.sessionId, key: "sessionId") try container.encodeIfPresent(self.sessionState, forKey: .sessionState) } @@ -1513,6 +1965,9 @@ extension BedrockAgentRuntime { try self.validate(self.agentId, name: "agentId", parent: name, max: 10) try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]+$") try self.validate(self.inputText, name: "inputText", parent: name, max: 25000000) + try self.validate(self.memoryId, name: "memoryId", parent: name, max: 100) + try self.validate(self.memoryId, name: "memoryId", parent: name, min: 2) + try self.validate(self.memoryId, name: "memoryId", parent: name, pattern: "^[0-9a-zA-Z._:-]+$") try self.validate(self.sessionId, name: "sessionId", parent: name, max: 100) try self.validate(self.sessionId, name: "sessionId", parent: name, min: 2) try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[0-9a-zA-Z._:-]+$") @@ -1523,6 +1978,7 @@ extension BedrockAgentRuntime { case enableTrace = "enableTrace" case endSession = "endSession" case inputText = "inputText" + case memoryId = "memoryId" case sessionState = "sessionState" } } @@ -1533,12 +1989,15 @@ extension BedrockAgentRuntime { public let completion: AWSEventStream /// The MIME type of the input data in the request. The default value is application/json. public let contentType: String + /// The unique identifier of the agent memory. + public let memoryId: String? /// The unique identifier of the session with the agent. public let sessionId: String - public init(completion: AWSEventStream, contentType: String, sessionId: String) { + public init(completion: AWSEventStream, contentType: String, memoryId: String? = nil, sessionId: String) { self.completion = completion self.contentType = contentType + self.memoryId = memoryId self.sessionId = sessionId } @@ -1547,12 +2006,92 @@ extension BedrockAgentRuntime { let container = try decoder.singleValueContainer() self.completion = try container.decode(AWSEventStream.self) self.contentType = try response.decodeHeader(String.self, key: "x-amzn-bedrock-agent-content-type") + self.memoryId = try response.decodeHeaderIfPresent(String.self, key: "x-amz-bedrock-agent-memory-id") self.sessionId = try response.decodeHeader(String.self, key: "x-amz-bedrock-agent-session-id") } private enum CodingKeys: CodingKey {} } + public struct InvokeFlowRequest: AWSEncodableShape { + /// The unique identifier of the flow alias. + public let flowAliasIdentifier: String + /// The unique identifier of the flow. + public let flowIdentifier: String + /// A list of objects, each containing information about an input into the flow. + public let inputs: [FlowInput] + + public init(flowAliasIdentifier: String, flowIdentifier: String, inputs: [FlowInput]) { + self.flowAliasIdentifier = flowAliasIdentifier + self.flowIdentifier = flowIdentifier + self.inputs = inputs + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.flowAliasIdentifier, key: "flowAliasIdentifier") + request.encodePath(self.flowIdentifier, key: "flowIdentifier") + try container.encode(self.inputs, forKey: .inputs) + } + + public func validate(name: String) throws { + try self.validate(self.flowAliasIdentifier, name: "flowAliasIdentifier", parent: name, max: 2048) + try self.validate(self.flowAliasIdentifier, name: "flowAliasIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/[0-9a-zA-Z]{10})|(\\bTSTALIASID\\b|[0-9a-zA-Z]+)$") + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, max: 2048) + try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$") + try self.inputs.forEach { + try $0.validate(name: "\(name).inputs[]") + } + try self.validate(self.inputs, name: "inputs", parent: name, max: 1) + try self.validate(self.inputs, name: "inputs", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case inputs = "inputs" + } + } + + public struct InvokeFlowResponse: AWSDecodableShape { + public static let _options: AWSShapeOptions = [.rawPayload] + /// The output of the flow, returned as a stream. If there's an error, the error is returned. + public let responseStream: AWSEventStream + + public init(responseStream: AWSEventStream) { + self.responseStream = responseStream + } + + public init(from decoder: Decoder) throws { + let container = try decoder.singleValueContainer() + self.responseStream = try container.decode(AWSEventStream.self) + } + + private enum CodingKeys: CodingKey {} + } + + public struct KnowledgeBaseConfiguration: AWSEncodableShape { + /// The unique identifier for a knowledge base attached to the agent. + public let knowledgeBaseId: String + /// The configurations to apply to the knowledge base during query. For more information, see Query configurations. + public let retrievalConfiguration: KnowledgeBaseRetrievalConfiguration + + public init(knowledgeBaseId: String, retrievalConfiguration: KnowledgeBaseRetrievalConfiguration) { + self.knowledgeBaseId = knowledgeBaseId + self.retrievalConfiguration = retrievalConfiguration + } + + public func validate(name: String) throws { + try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, max: 10) + try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]+$") + try self.retrievalConfiguration.validate(name: "\(name).retrievalConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case knowledgeBaseId = "knowledgeBaseId" + case retrievalConfiguration = "retrievalConfiguration" + } + } + public struct KnowledgeBaseLookupInput: AWSDecodableShape { /// The unique identifier of the knowledge base to look up. public let knowledgeBaseId: String? @@ -1639,19 +2178,22 @@ extension BedrockAgentRuntime { } public struct KnowledgeBaseRetrieveAndGenerateConfiguration: AWSEncodableShape { - /// Contains configurations for response generation based on the knowwledge base query results. + /// Contains configurations for response generation based on the knowledge base query results. public let generationConfiguration: GenerationConfiguration? /// The unique identifier of the knowledge base that is queried and the foundation model used for generation. public let knowledgeBaseId: String /// The ARN of the foundation model used to generate a response. public let modelArn: String + /// Settings for how the model processes the prompt prior to retrieval and generation. + public let orchestrationConfiguration: OrchestrationConfiguration? /// Contains configurations for how to retrieve and return the knowledge base query. public let retrievalConfiguration: KnowledgeBaseRetrievalConfiguration? - public init(generationConfiguration: GenerationConfiguration? = nil, knowledgeBaseId: String, modelArn: String, retrievalConfiguration: KnowledgeBaseRetrievalConfiguration? = nil) { + public init(generationConfiguration: GenerationConfiguration? = nil, knowledgeBaseId: String, modelArn: String, orchestrationConfiguration: OrchestrationConfiguration? = nil, retrievalConfiguration: KnowledgeBaseRetrievalConfiguration? = nil) { self.generationConfiguration = generationConfiguration self.knowledgeBaseId = knowledgeBaseId self.modelArn = modelArn + self.orchestrationConfiguration = orchestrationConfiguration self.retrievalConfiguration = retrievalConfiguration } @@ -1669,6 +2211,7 @@ extension BedrockAgentRuntime { case generationConfiguration = "generationConfiguration" case knowledgeBaseId = "knowledgeBaseId" case modelArn = "modelArn" + case orchestrationConfiguration = "orchestrationConfiguration" case retrievalConfiguration = "retrievalConfiguration" } } @@ -1698,6 +2241,37 @@ extension BedrockAgentRuntime { } } + public struct MemorySessionSummary: AWSDecodableShape { + /// The unique identifier of the memory where the session summary is stored. + public let memoryId: String? + /// The time when the memory duration for the session is set to end. + @OptionalCustomCoding + public var sessionExpiryTime: Date? + /// The identifier for this session. + public let sessionId: String? + /// The start time for this session. + @OptionalCustomCoding + public var sessionStartTime: Date? + /// The summarized text for this session. + public let summaryText: String? + + public init(memoryId: String? = nil, sessionExpiryTime: Date? = nil, sessionId: String? = nil, sessionStartTime: Date? = nil, summaryText: String? = nil) { + self.memoryId = memoryId + self.sessionExpiryTime = sessionExpiryTime + self.sessionId = sessionId + self.sessionStartTime = sessionStartTime + self.summaryText = summaryText + } + + private enum CodingKeys: String, CodingKey { + case memoryId = "memoryId" + case sessionExpiryTime = "sessionExpiryTime" + case sessionId = "sessionId" + case sessionStartTime = "sessionStartTime" + case summaryText = "summaryText" + } + } + public struct ModelInvocationInput: AWSDecodableShape { /// Specifications about the inference parameters that were provided alongside the prompt. These are specified in the PromptOverrideConfiguration object that was set when the agent was created or updated. For more information, see Inference parameters for foundation models. public let inferenceConfiguration: InferenceConfiguration? @@ -1738,6 +2312,8 @@ extension BedrockAgentRuntime { public struct Observation: AWSDecodableShape { /// Contains the JSON-formatted string returned by the API invoked by the action group. public let actionGroupInvocationOutput: ActionGroupInvocationOutput? + /// Contains the JSON-formatted string returned by the API invoked by the code interpreter. + public let codeInterpreterInvocationOutput: CodeInterpreterInvocationOutput? /// Contains details about the response to the user. public let finalResponse: FinalResponse? /// Contains details about the results from looking up the knowledge base. @@ -1749,8 +2325,9 @@ extension BedrockAgentRuntime { /// Specifies what kind of information the agent returns in the observation. The following values are possible. ACTION_GROUP – The agent returns the result of an action group. KNOWLEDGE_BASE – The agent returns information from a knowledge base. FINISH – The agent returns a final response to the user with no follow-up. ASK_USER – The agent asks the user a question. REPROMPT – The agent prompts the user again for the same information. public let type: `Type`? - public init(actionGroupInvocationOutput: ActionGroupInvocationOutput? = nil, finalResponse: FinalResponse? = nil, knowledgeBaseLookupOutput: KnowledgeBaseLookupOutput? = nil, repromptResponse: RepromptResponse? = nil, traceId: String? = nil, type: `Type`? = nil) { + public init(actionGroupInvocationOutput: ActionGroupInvocationOutput? = nil, codeInterpreterInvocationOutput: CodeInterpreterInvocationOutput? = nil, finalResponse: FinalResponse? = nil, knowledgeBaseLookupOutput: KnowledgeBaseLookupOutput? = nil, repromptResponse: RepromptResponse? = nil, traceId: String? = nil, type: `Type`? = nil) { self.actionGroupInvocationOutput = actionGroupInvocationOutput + self.codeInterpreterInvocationOutput = codeInterpreterInvocationOutput self.finalResponse = finalResponse self.knowledgeBaseLookupOutput = knowledgeBaseLookupOutput self.repromptResponse = repromptResponse @@ -1760,6 +2337,7 @@ extension BedrockAgentRuntime { private enum CodingKeys: String, CodingKey { case actionGroupInvocationOutput = "actionGroupInvocationOutput" + case codeInterpreterInvocationOutput = "codeInterpreterInvocationOutput" case finalResponse = "finalResponse" case knowledgeBaseLookupOutput = "knowledgeBaseLookupOutput" case repromptResponse = "repromptResponse" @@ -1768,6 +2346,40 @@ extension BedrockAgentRuntime { } } + public struct OrchestrationConfiguration: AWSEncodableShape { + /// To split up the prompt and retrieve multiple sources, set the transformation type to QUERY_DECOMPOSITION. + public let queryTransformationConfiguration: QueryTransformationConfiguration + + public init(queryTransformationConfiguration: QueryTransformationConfiguration) { + self.queryTransformationConfiguration = queryTransformationConfiguration + } + + private enum CodingKeys: String, CodingKey { + case queryTransformationConfiguration = "queryTransformationConfiguration" + } + } + + public struct OutputFile: AWSDecodableShape { + /// The byte count of files that contains response from code interpreter. + public let bytes: AWSBase64Data? + /// The name of the file containing response from code interpreter. + public let name: String? + /// The type of file that contains response from the code interpreter. + public let type: String? + + public init(bytes: AWSBase64Data? = nil, name: String? = nil, type: String? = nil) { + self.bytes = bytes + self.name = name + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case bytes = "bytes" + case name = "name" + case type = "type" + } + } + public struct Parameter: AWSDecodableShape { /// The name of the parameter. public let name: String? @@ -1901,6 +2513,19 @@ extension BedrockAgentRuntime { } } + public struct QueryTransformationConfiguration: AWSEncodableShape { + /// The type of transformation to apply to the prompt. + public let type: QueryTransformationType + + public init(type: QueryTransformationType) { + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case type = "type" + } + } + public struct Rationale: AWSDecodableShape { /// The reasoning or thought process of the agent, based on the input. public let text: String? @@ -1960,6 +2585,19 @@ extension BedrockAgentRuntime { } } + public struct RetrievalResultConfluenceLocation: AWSDecodableShape { + /// The Confluence host URL for the data source location. + public let url: String? + + public init(url: String? = nil) { + self.url = url + } + + private enum CodingKeys: String, CodingKey { + case url = "url" + } + } + public struct RetrievalResultContent: AWSDecodableShape { /// The cited text from the data source. public let text: String @@ -1974,24 +2612,40 @@ extension BedrockAgentRuntime { } public struct RetrievalResultLocation: AWSDecodableShape { - /// Contains the S3 location of the data source. + /// The Confluence data source location. + public let confluenceLocation: RetrievalResultConfluenceLocation? + /// The S3 data source location. public let s3Location: RetrievalResultS3Location? - /// The type of the location of the data source. + /// The Salesforce data source location. + public let salesforceLocation: RetrievalResultSalesforceLocation? + /// The SharePoint data source location. + public let sharePointLocation: RetrievalResultSharePointLocation? + /// The type of data source location. public let type: RetrievalResultLocationType + /// The web URL/URLs data source location. + public let webLocation: RetrievalResultWebLocation? - public init(s3Location: RetrievalResultS3Location? = nil, type: RetrievalResultLocationType) { + public init(confluenceLocation: RetrievalResultConfluenceLocation? = nil, s3Location: RetrievalResultS3Location? = nil, salesforceLocation: RetrievalResultSalesforceLocation? = nil, sharePointLocation: RetrievalResultSharePointLocation? = nil, type: RetrievalResultLocationType, webLocation: RetrievalResultWebLocation? = nil) { + self.confluenceLocation = confluenceLocation self.s3Location = s3Location + self.salesforceLocation = salesforceLocation + self.sharePointLocation = sharePointLocation self.type = type + self.webLocation = webLocation } private enum CodingKeys: String, CodingKey { + case confluenceLocation = "confluenceLocation" case s3Location = "s3Location" + case salesforceLocation = "salesforceLocation" + case sharePointLocation = "sharePointLocation" case type = "type" + case webLocation = "webLocation" } } public struct RetrievalResultS3Location: AWSDecodableShape { - /// The S3 URI of the data source. + /// The S3 URI for the data source location. public let uri: String? public init(uri: String? = nil) { @@ -2003,6 +2657,45 @@ extension BedrockAgentRuntime { } } + public struct RetrievalResultSalesforceLocation: AWSDecodableShape { + /// The Salesforce host URL for the data source location. + public let url: String? + + public init(url: String? = nil) { + self.url = url + } + + private enum CodingKeys: String, CodingKey { + case url = "url" + } + } + + public struct RetrievalResultSharePointLocation: AWSDecodableShape { + /// The SharePoint site URL for the data source location. + public let url: String? + + public init(url: String? = nil) { + self.url = url + } + + private enum CodingKeys: String, CodingKey { + case url = "url" + } + } + + public struct RetrievalResultWebLocation: AWSDecodableShape { + /// The web URL/URLs for the data source location. + public let url: String? + + public init(url: String? = nil) { + self.url = url + } + + private enum CodingKeys: String, CodingKey { + case url = "url" + } + } + public struct RetrieveAndGenerateConfiguration: AWSEncodableShape { /// The configuration used with the external source wrapper object in the retrieveAndGenerate function. public let externalSourcesConfiguration: ExternalSourcesRetrieveAndGenerateConfiguration? @@ -2062,7 +2755,7 @@ extension BedrockAgentRuntime { public let retrieveAndGenerateConfiguration: RetrieveAndGenerateConfiguration? /// Contains details about the session with the knowledge base. public let sessionConfiguration: RetrieveAndGenerateSessionConfiguration? - /// The unique identifier of the session. Reuse the same value to continue the same session with the knowledge base. + /// The unique identifier of the session. When you first make a RetrieveAndGenerate request, Amazon Bedrock automatically generates this value. You must reuse this value for all subsequent requests in the same conversational session. This value allows Amazon Bedrock to maintain context and knowledge from previous interactions. You can't explicitly set the sessionId yourself. public let sessionId: String? public init(input: RetrieveAndGenerateInput, retrieveAndGenerateConfiguration: RetrieveAndGenerateConfiguration? = nil, sessionConfiguration: RetrieveAndGenerateSessionConfiguration? = nil, sessionId: String? = nil) { @@ -2095,7 +2788,7 @@ extension BedrockAgentRuntime { public let guardrailAction: GuadrailAction? /// Contains the response generated from querying the knowledge base. public let output: RetrieveAndGenerateOutput - /// The unique identifier of the session. Reuse the same value to continue the same session with the knowledge base. + /// The unique identifier of the session. When you first make a RetrieveAndGenerate request, Amazon Bedrock automatically generates this value. You must reuse this value for all subsequent requests in the same conversational session. This value allows Amazon Bedrock to maintain context and knowledge from previous interactions. You can't explicitly set the sessionId yourself. public let sessionId: String public init(citations: [Citation]? = nil, guardrailAction: GuadrailAction? = nil, output: RetrieveAndGenerateOutput, sessionId: String) { @@ -2248,6 +2941,25 @@ extension BedrockAgentRuntime { } } + public struct S3ObjectFile: AWSEncodableShape { + /// The uri of the s3 object. + public let uri: String + + public init(uri: String) { + self.uri = uri + } + + public func validate(name: String) throws { + try self.validate(self.uri, name: "uri", parent: name, max: 1024) + try self.validate(self.uri, name: "uri", parent: name, min: 1) + try self.validate(self.uri, name: "uri", parent: name, pattern: "^s3://[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]/.{1,1024}$") + } + + private enum CodingKeys: String, CodingKey { + case uri = "uri" + } + } + public struct ServiceQuotaExceededException: AWSDecodableShape { public let message: String? @@ -2261,8 +2973,12 @@ extension BedrockAgentRuntime { } public struct SessionState: AWSEncodableShape { + /// Contains information about the files used by code interpreter. + public let files: [InputFile]? /// The identifier of the invocation of an action. This value must match the invocationId returned in the InvokeAgent response for the action whose results are provided in the returnControlInvocationResults field. For more information, see Return control to the agent developer and Control session context. public let invocationId: String? + /// An array of configurations, each of which applies to a knowledge base attached to the agent. + public let knowledgeBaseConfigurations: [KnowledgeBaseConfiguration]? /// Contains attributes that persist across a prompt and the values of those attributes. These attributes replace the $prompt_session_attributes$ placeholder variable in the orchestration prompt template. For more information, see Prompt template placeholder variables. public let promptSessionAttributes: [String: String]? /// Contains information about the results from the action group invocation. For more information, see Return control to the agent developer and Control session context. If you include this field, the inputText field will be ignored. @@ -2270,20 +2986,31 @@ extension BedrockAgentRuntime { /// Contains attributes that persist across a session and the values of those attributes. public let sessionAttributes: [String: String]? - public init(invocationId: String? = nil, promptSessionAttributes: [String: String]? = nil, returnControlInvocationResults: [InvocationResultMember]? = nil, sessionAttributes: [String: String]? = nil) { + public init(files: [InputFile]? = nil, invocationId: String? = nil, knowledgeBaseConfigurations: [KnowledgeBaseConfiguration]? = nil, promptSessionAttributes: [String: String]? = nil, returnControlInvocationResults: [InvocationResultMember]? = nil, sessionAttributes: [String: String]? = nil) { + self.files = files self.invocationId = invocationId + self.knowledgeBaseConfigurations = knowledgeBaseConfigurations self.promptSessionAttributes = promptSessionAttributes self.returnControlInvocationResults = returnControlInvocationResults self.sessionAttributes = sessionAttributes } public func validate(name: String) throws { + try self.files?.forEach { + try $0.validate(name: "\(name).files[]") + } + try self.knowledgeBaseConfigurations?.forEach { + try $0.validate(name: "\(name).knowledgeBaseConfigurations[]") + } + try self.validate(self.knowledgeBaseConfigurations, name: "knowledgeBaseConfigurations", parent: name, min: 1) try self.validate(self.returnControlInvocationResults, name: "returnControlInvocationResults", parent: name, max: 5) try self.validate(self.returnControlInvocationResults, name: "returnControlInvocationResults", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { + case files = "files" case invocationId = "invocationId" + case knowledgeBaseConfigurations = "knowledgeBaseConfigurations" case promptSessionAttributes = "promptSessionAttributes" case returnControlInvocationResults = "returnControlInvocationResults" case sessionAttributes = "sessionAttributes" @@ -2411,6 +3138,45 @@ extension BedrockAgentRuntime { case message = "message" } } + + public struct FlowInputContent: AWSEncodableShape { + /// The input for the flow input node. + public let document: String? + + public init(document: String? = nil) { + self.document = document + } + + private enum CodingKeys: String, CodingKey { + case document = "document" + } + } + + public struct FlowOutputContent: AWSDecodableShape { + /// A name for the output of the flow. + public let document: String? + + public init(document: String? = nil) { + self.document = document + } + + private enum CodingKeys: String, CodingKey { + case document = "document" + } + } + + public struct Memory: AWSDecodableShape { + /// Contains summary of a session. + public let sessionSummary: MemorySessionSummary? + + public init(sessionSummary: MemorySessionSummary? = nil) { + self.sessionSummary = sessionSummary + } + + private enum CodingKeys: String, CodingKey { + case sessionSummary = "sessionSummary" + } + } } // MARK: - Errors diff --git a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift index f3ce0c2f72..7a18162def 100644 --- a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift +++ b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift @@ -74,6 +74,19 @@ public struct BedrockRuntime: AWSService { // MARK: API Calls + /// The action to apply a guardrail. + @Sendable + public func applyGuardrail(_ input: ApplyGuardrailRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ApplyGuardrailResponse { + return try await self.client.execute( + operation: "ApplyGuardrail", + path: "/guardrail/{guardrailIdentifier}/version/{guardrailVersion}/apply", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, see Converse API examples in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action. @Sendable public func converse(_ input: ConverseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ConverseResponse { diff --git a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift index e96dc19f99..d8d218afbb 100644 --- a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift @@ -45,6 +45,12 @@ extension BedrockRuntime { public var description: String { return self.rawValue } } + public enum GuardrailAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case guardrailIntervened = "GUARDRAIL_INTERVENED" + case none = "NONE" + public var description: String { return self.rawValue } + } + public enum GuardrailContentFilterConfidence: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case high = "HIGH" case low = "LOW" @@ -68,6 +74,38 @@ extension BedrockRuntime { public var description: String { return self.rawValue } } + public enum GuardrailContentQualifier: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case groundingSource = "grounding_source" + case guardContent = "guard_content" + case query = "query" + public var description: String { return self.rawValue } + } + + public enum GuardrailContentSource: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case input = "INPUT" + case output = "OUTPUT" + public var description: String { return self.rawValue } + } + + public enum GuardrailContextualGroundingFilterType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case grounding = "GROUNDING" + case relevance = "RELEVANCE" + public var description: String { return self.rawValue } + } + + public enum GuardrailContextualGroundingPolicyAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case blocked = "BLOCKED" + case none = "NONE" + public var description: String { return self.rawValue } + } + + public enum GuardrailConverseContentQualifier: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case groundingSource = "grounding_source" + case guardContent = "guard_content" + case query = "query" + public var description: String { return self.rawValue } + } + public enum GuardrailManagedWordType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case profanity = "PROFANITY" public var description: String { return self.rawValue } @@ -422,7 +460,7 @@ extension BedrockRuntime { } public enum SystemContentBlock: AWSEncodableShape, Sendable { - /// A content block to assess with the guardrail. Use with the Converse API (Converse and ConverseStream). For more information, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. + /// A content block to assess with the guardrail. Use with the Converse or ConverseStream API operations. For more information, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. case guardContent(GuardrailConverseContentBlock) /// A system prompt for the model. case text(String) @@ -551,6 +589,69 @@ extension BedrockRuntime { public init() {} } + public struct ApplyGuardrailRequest: AWSEncodableShape { + /// The content details used in the request to apply the guardrail. + public let content: [GuardrailContentBlock] + /// The guardrail identifier used in the request to apply the guardrail. + public let guardrailIdentifier: String + /// The guardrail version used in the request to apply the guardrail. + public let guardrailVersion: String + /// The source of data used in the request to apply the guardrail. + public let source: GuardrailContentSource + + public init(content: [GuardrailContentBlock], guardrailIdentifier: String, guardrailVersion: String, source: GuardrailContentSource) { + self.content = content + self.guardrailIdentifier = guardrailIdentifier + self.guardrailVersion = guardrailVersion + self.source = source + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.content, forKey: .content) + request.encodePath(self.guardrailIdentifier, key: "guardrailIdentifier") + request.encodePath(self.guardrailVersion, key: "guardrailVersion") + try container.encode(self.source, forKey: .source) + } + + public func validate(name: String) throws { + try self.validate(self.guardrailIdentifier, name: "guardrailIdentifier", parent: name, max: 2048) + try self.validate(self.guardrailIdentifier, name: "guardrailIdentifier", parent: name, pattern: "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$") + try self.validate(self.guardrailVersion, name: "guardrailVersion", parent: name, pattern: "^(([1-9][0-9]{0,7})|(DRAFT))$") + } + + private enum CodingKeys: String, CodingKey { + case content = "content" + case source = "source" + } + } + + public struct ApplyGuardrailResponse: AWSDecodableShape { + /// The action taken in the response from the guardrail. + public let action: GuardrailAction + /// The assessment details in the response from the guardrail. + public let assessments: [GuardrailAssessment] + /// The output details in the response from the guardrail. + public let outputs: [GuardrailOutputContent] + /// The usage details in the response from the guardrail. + public let usage: GuardrailUsage + + public init(action: GuardrailAction, assessments: [GuardrailAssessment], outputs: [GuardrailOutputContent], usage: GuardrailUsage) { + self.action = action + self.assessments = assessments + self.outputs = outputs + self.usage = usage + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case assessments = "assessments" + case outputs = "outputs" + case usage = "usage" + } + } + public struct AutoToolChoice: AWSEncodableShape { public init() {} } @@ -866,7 +967,7 @@ extension BedrockRuntime { public struct DocumentBlock: AWSEncodableShape & AWSDecodableShape { /// The format of a document, or its extension. public let format: DocumentFormat - /// A name for the document. + /// A name for the document. The name can only contain the following characters: Alphanumeric characters Whitespace characters (no more than one in a row) Hyphens Parentheses Square brackets This field is vulnerable to prompt injections, because the model might inadvertently interpret it as instructions. Therefore, we recommend that you specify a neutral name. public let name: String /// Contains the content of the document. public let source: DocumentSource @@ -887,6 +988,8 @@ extension BedrockRuntime { public struct GuardrailAssessment: AWSDecodableShape { /// The content policy. public let contentPolicy: GuardrailContentPolicyAssessment? + /// The contextual grounding policy used for the guardrail assessment. + public let contextualGroundingPolicy: GuardrailContextualGroundingPolicyAssessment? /// The sensitive information policy. public let sensitiveInformationPolicy: GuardrailSensitiveInformationPolicyAssessment? /// The topic policy. @@ -894,8 +997,9 @@ extension BedrockRuntime { /// The word policy. public let wordPolicy: GuardrailWordPolicyAssessment? - public init(contentPolicy: GuardrailContentPolicyAssessment? = nil, sensitiveInformationPolicy: GuardrailSensitiveInformationPolicyAssessment? = nil, topicPolicy: GuardrailTopicPolicyAssessment? = nil, wordPolicy: GuardrailWordPolicyAssessment? = nil) { + public init(contentPolicy: GuardrailContentPolicyAssessment? = nil, contextualGroundingPolicy: GuardrailContextualGroundingPolicyAssessment? = nil, sensitiveInformationPolicy: GuardrailSensitiveInformationPolicyAssessment? = nil, topicPolicy: GuardrailTopicPolicyAssessment? = nil, wordPolicy: GuardrailWordPolicyAssessment? = nil) { self.contentPolicy = contentPolicy + self.contextualGroundingPolicy = contextualGroundingPolicy self.sensitiveInformationPolicy = sensitiveInformationPolicy self.topicPolicy = topicPolicy self.wordPolicy = wordPolicy @@ -903,6 +1007,7 @@ extension BedrockRuntime { private enum CodingKeys: String, CodingKey { case contentPolicy = "contentPolicy" + case contextualGroundingPolicy = "contextualGroundingPolicy" case sensitiveInformationPolicy = "sensitiveInformationPolicy" case topicPolicy = "topicPolicy" case wordPolicy = "wordPolicy" @@ -970,15 +1075,57 @@ extension BedrockRuntime { } } + public struct GuardrailContextualGroundingFilter: AWSDecodableShape { + /// The action performed by the guardrails contextual grounding filter. + public let action: GuardrailContextualGroundingPolicyAction + /// The score generated by contextual grounding filter. + public let score: Double + /// The threshold used by contextual grounding filter to determine whether the content is grounded or not. + public let threshold: Double + /// The contextual grounding filter type. + public let type: GuardrailContextualGroundingFilterType + + public init(action: GuardrailContextualGroundingPolicyAction, score: Double, threshold: Double, type: GuardrailContextualGroundingFilterType) { + self.action = action + self.score = score + self.threshold = threshold + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case score = "score" + case threshold = "threshold" + case type = "type" + } + } + + public struct GuardrailContextualGroundingPolicyAssessment: AWSDecodableShape { + /// The filter details for the guardrails contextual grounding filter. + public let filters: [GuardrailContextualGroundingFilter]? + + public init(filters: [GuardrailContextualGroundingFilter]? = nil) { + self.filters = filters + } + + private enum CodingKeys: String, CodingKey { + case filters = "filters" + } + } + public struct GuardrailConverseTextBlock: AWSEncodableShape & AWSDecodableShape { + /// The qualifier details for the guardrails contextual grounding filter. + public let qualifiers: [GuardrailConverseContentQualifier]? /// The text that you want to guard. public let text: String - public init(text: String) { + public init(qualifiers: [GuardrailConverseContentQualifier]? = nil, text: String) { + self.qualifiers = qualifiers self.text = text } private enum CodingKeys: String, CodingKey { + case qualifiers = "qualifiers" case text = "text" } } @@ -1021,6 +1168,19 @@ extension BedrockRuntime { } } + public struct GuardrailOutputContent: AWSDecodableShape { + /// The specific text for the output content produced by the guardrail. + public let text: String? + + public init(text: String? = nil) { + self.text = text + } + + private enum CodingKeys: String, CodingKey { + case text = "text" + } + } + public struct GuardrailPiiEntityFilter: AWSDecodableShape { /// The PII entity filter action. public let action: GuardrailSensitiveInformationPolicyAction @@ -1115,6 +1275,23 @@ extension BedrockRuntime { } } + public struct GuardrailTextBlock: AWSEncodableShape { + /// The qualifiers describing the text block. + public let qualifiers: [GuardrailContentQualifier]? + /// The input text details to be evaluated by the guardrail. + public let text: String + + public init(qualifiers: [GuardrailContentQualifier]? = nil, text: String) { + self.qualifiers = qualifiers + self.text = text + } + + private enum CodingKeys: String, CodingKey { + case qualifiers = "qualifiers" + case text = "text" + } + } + public struct GuardrailTopic: AWSDecodableShape { /// The action the guardrail should take when it intervenes on a topic. public let action: GuardrailTopicPolicyAction @@ -1170,6 +1347,39 @@ extension BedrockRuntime { } } + public struct GuardrailUsage: AWSDecodableShape { + /// The content policy units processed by the guardrail. + public let contentPolicyUnits: Int + /// The contextual grounding policy units processed by the guardrail. + public let contextualGroundingPolicyUnits: Int + /// The sensitive information policy free units processed by the guardrail. + public let sensitiveInformationPolicyFreeUnits: Int + /// The sensitive information policy units processed by the guardrail. + public let sensitiveInformationPolicyUnits: Int + /// The topic policy units processed by the guardrail. + public let topicPolicyUnits: Int + /// The word policy units processed by the guardrail. + public let wordPolicyUnits: Int + + public init(contentPolicyUnits: Int, contextualGroundingPolicyUnits: Int, sensitiveInformationPolicyFreeUnits: Int, sensitiveInformationPolicyUnits: Int, topicPolicyUnits: Int, wordPolicyUnits: Int) { + self.contentPolicyUnits = contentPolicyUnits + self.contextualGroundingPolicyUnits = contextualGroundingPolicyUnits + self.sensitiveInformationPolicyFreeUnits = sensitiveInformationPolicyFreeUnits + self.sensitiveInformationPolicyUnits = sensitiveInformationPolicyUnits + self.topicPolicyUnits = topicPolicyUnits + self.wordPolicyUnits = wordPolicyUnits + } + + private enum CodingKeys: String, CodingKey { + case contentPolicyUnits = "contentPolicyUnits" + case contextualGroundingPolicyUnits = "contextualGroundingPolicyUnits" + case sensitiveInformationPolicyFreeUnits = "sensitiveInformationPolicyFreeUnits" + case sensitiveInformationPolicyUnits = "sensitiveInformationPolicyUnits" + case topicPolicyUnits = "topicPolicyUnits" + case wordPolicyUnits = "wordPolicyUnits" + } + } + public struct GuardrailWordPolicyAssessment: AWSDecodableShape { /// Custom words in the assessment. public let customWords: [GuardrailCustomWord] @@ -1394,7 +1604,7 @@ extension BedrockRuntime { } public struct Message: AWSEncodableShape & AWSDecodableShape { - /// The message content. + /// The message content. Note the following restrictions: You can include up to 20 images. Each image's size, height, and width must be no more than 3.75 MB, 8000 px, and 8000 px, respectively. You can include up to five documents. Each document's size must be no more than 4.5 MB. If you include a ContentBlock with a document field in the array, you must also include a ContentBlock with a text field. You can only include images and documents if the role is user. public let content: [ContentBlock] /// The role that the message plays in the message. public let role: ConversationRole @@ -1721,7 +1931,7 @@ extension BedrockRuntime { } public struct DocumentSource: AWSEncodableShape & AWSDecodableShape { - /// A base64-encoded string of a UTF-8 encoded file, that is the document to include in the message. + /// The raw bytes for the document. If you use an Amazon Web Services SDK, you don't need to encode the bytes in base64. public let bytes: AWSBase64Data? public init(bytes: AWSBase64Data? = nil) { @@ -1733,6 +1943,19 @@ extension BedrockRuntime { } } + public struct GuardrailContentBlock: AWSEncodableShape { + /// Text within content block to be evaluated by the guardrail. + public let text: GuardrailTextBlock? + + public init(text: GuardrailTextBlock? = nil) { + self.text = text + } + + private enum CodingKeys: String, CodingKey { + case text = "text" + } + } + public struct GuardrailConverseContentBlock: AWSEncodableShape & AWSDecodableShape { /// The text to guard. public let text: GuardrailConverseTextBlock? @@ -1747,7 +1970,7 @@ extension BedrockRuntime { } public struct ImageSource: AWSEncodableShape & AWSDecodableShape { - /// The raw image bytes for the image. If you use an AWS SDK, you don't need to base64 encode the image bytes. + /// The raw image bytes for the image. If you use an AWS SDK, you don't need to encode the image bytes in base64. public let bytes: AWSBase64Data? public init(bytes: AWSBase64Data? = nil) { diff --git a/Sources/Soto/Services/Cloud9/Cloud9_api.swift b/Sources/Soto/Services/Cloud9/Cloud9_api.swift index 15bae11371..30c1ff6131 100644 --- a/Sources/Soto/Services/Cloud9/Cloud9_api.swift +++ b/Sources/Soto/Services/Cloud9/Cloud9_api.swift @@ -97,6 +97,20 @@ public struct Cloud9: AWSService { "us-east-2": "cloud9.us-east-2.api.aws", "us-west-1": "cloud9.us-west-1.api.aws", "us-west-2": "cloud9.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "cloud9-fips.ca-central-1.api.aws", + "us-east-1": "cloud9-fips.us-east-1.api.aws", + "us-east-2": "cloud9-fips.us-east-2.api.aws", + "us-west-1": "cloud9-fips.us-west-1.api.aws", + "us-west-2": "cloud9-fips.us-west-2.api.aws" + ]), + [.fips]: .init(endpoints: [ + "ca-central-1": "cloud9-fips.ca-central-1.amazonaws.com", + "us-east-1": "cloud9-fips.us-east-1.amazonaws.com", + "us-east-2": "cloud9-fips.us-east-2.amazonaws.com", + "us-west-1": "cloud9-fips.us-west-1.amazonaws.com", + "us-west-2": "cloud9-fips.us-west-2.amazonaws.com" ]) ]} diff --git a/Sources/Soto/Services/CloudControl/CloudControl_api.swift b/Sources/Soto/Services/CloudControl/CloudControl_api.swift index b1a1beba6d..56e05d9ea2 100644 --- a/Sources/Soto/Services/CloudControl/CloudControl_api.swift +++ b/Sources/Soto/Services/CloudControl/CloudControl_api.swift @@ -74,6 +74,51 @@ public struct CloudControl: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "cloudcontrolapi.af-south-1.api.aws", + "ap-east-1": "cloudcontrolapi.ap-east-1.api.aws", + "ap-northeast-1": "cloudcontrolapi.ap-northeast-1.api.aws", + "ap-northeast-2": "cloudcontrolapi.ap-northeast-2.api.aws", + "ap-northeast-3": "cloudcontrolapi.ap-northeast-3.api.aws", + "ap-south-1": "cloudcontrolapi.ap-south-1.api.aws", + "ap-south-2": "cloudcontrolapi.ap-south-2.api.aws", + "ap-southeast-1": "cloudcontrolapi.ap-southeast-1.api.aws", + "ap-southeast-2": "cloudcontrolapi.ap-southeast-2.api.aws", + "ap-southeast-3": "cloudcontrolapi.ap-southeast-3.api.aws", + "ap-southeast-4": "cloudcontrolapi.ap-southeast-4.api.aws", + "ca-central-1": "cloudcontrolapi.ca-central-1.api.aws", + "ca-west-1": "cloudcontrolapi.ca-west-1.api.aws", + "cn-north-1": "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "cloudcontrolapi.eu-central-1.api.aws", + "eu-central-2": "cloudcontrolapi.eu-central-2.api.aws", + "eu-north-1": "cloudcontrolapi.eu-north-1.api.aws", + "eu-south-1": "cloudcontrolapi.eu-south-1.api.aws", + "eu-south-2": "cloudcontrolapi.eu-south-2.api.aws", + "eu-west-1": "cloudcontrolapi.eu-west-1.api.aws", + "eu-west-2": "cloudcontrolapi.eu-west-2.api.aws", + "eu-west-3": "cloudcontrolapi.eu-west-3.api.aws", + "il-central-1": "cloudcontrolapi.il-central-1.api.aws", + "me-central-1": "cloudcontrolapi.me-central-1.api.aws", + "me-south-1": "cloudcontrolapi.me-south-1.api.aws", + "sa-east-1": "cloudcontrolapi.sa-east-1.api.aws", + "us-east-1": "cloudcontrolapi.us-east-1.api.aws", + "us-east-2": "cloudcontrolapi.us-east-2.api.aws", + "us-gov-east-1": "cloudcontrolapi.us-gov-east-1.api.aws", + "us-gov-west-1": "cloudcontrolapi.us-gov-west-1.api.aws", + "us-west-1": "cloudcontrolapi.us-west-1.api.aws", + "us-west-2": "cloudcontrolapi.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "cloudcontrolapi-fips.ca-central-1.api.aws", + "ca-west-1": "cloudcontrolapi-fips.ca-west-1.api.aws", + "us-east-1": "cloudcontrolapi-fips.us-east-1.api.aws", + "us-east-2": "cloudcontrolapi-fips.us-east-2.api.aws", + "us-gov-east-1": "cloudcontrolapi-fips.us-gov-east-1.api.aws", + "us-gov-west-1": "cloudcontrolapi-fips.us-gov-west-1.api.aws", + "us-west-1": "cloudcontrolapi-fips.us-west-1.api.aws", + "us-west-2": "cloudcontrolapi-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ "ca-central-1": "cloudcontrolapi-fips.ca-central-1.amazonaws.com", "ca-west-1": "cloudcontrolapi-fips.ca-west-1.amazonaws.com", diff --git a/Sources/Soto/Services/Connect/Connect_api.swift b/Sources/Soto/Services/Connect/Connect_api.swift index d028180a89..1841850e8f 100644 --- a/Sources/Soto/Services/Connect/Connect_api.swift +++ b/Sources/Soto/Services/Connect/Connect_api.swift @@ -2339,6 +2339,19 @@ public struct Connect: AWSService { ) } + /// Searches AgentStatuses in an Amazon Connect instance, with optional filtering. + @Sendable + public func searchAgentStatuses(_ input: SearchAgentStatusesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchAgentStatusesResponse { + return try await self.client.execute( + operation: "SearchAgentStatuses", + path: "/search-agent-statuses", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Searches for available phone numbers that you can claim to your Amazon Connect instance or traffic distribution group. If the provided TargetArn is a traffic distribution group, you can call this API in both Amazon Web Services Regions associated with the traffic distribution group. @Sendable public func searchAvailablePhoneNumbers(_ input: SearchAvailablePhoneNumbersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchAvailablePhoneNumbersResponse { @@ -2495,6 +2508,19 @@ public struct Connect: AWSService { ) } + /// Searches UserHierarchyGroups in an Amazon Connect instance, with optional filtering. The UserHierarchyGroup with "LevelId": "0" is the foundation for building levels on top of an instance. It is not user-definable, nor is it visible in the UI. + @Sendable + public func searchUserHierarchyGroups(_ input: SearchUserHierarchyGroupsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchUserHierarchyGroupsResponse { + return try await self.client.execute( + operation: "SearchUserHierarchyGroups", + path: "/search-user-hierarchy-groups", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Searches users in an Amazon Connect instance, with optional filtering. AfterContactWorkTimeLimit is returned in milliseconds. @Sendable public func searchUsers(_ input: SearchUsersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchUsersResponse { @@ -4290,6 +4316,25 @@ extension Connect { ) } + /// Searches AgentStatuses in an Amazon Connect instance, with optional filtering. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func searchAgentStatusesPaginator( + _ input: SearchAgentStatusesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchAgentStatuses, + inputKey: \SearchAgentStatusesRequest.nextToken, + outputKey: \SearchAgentStatusesResponse.nextToken, + logger: logger + ) + } + /// Searches for available phone numbers that you can claim to your Amazon Connect instance or traffic distribution group. If the provided TargetArn is a traffic distribution group, you can call this API in both Amazon Web Services Regions associated with the traffic distribution group. /// Return PaginatorSequence for operation. /// @@ -4518,6 +4563,25 @@ extension Connect { ) } + /// Searches UserHierarchyGroups in an Amazon Connect instance, with optional filtering. The UserHierarchyGroup with "LevelId": "0" is the foundation for building levels on top of an instance. It is not user-definable, nor is it visible in the UI. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func searchUserHierarchyGroupsPaginator( + _ input: SearchUserHierarchyGroupsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchUserHierarchyGroups, + inputKey: \SearchUserHierarchyGroupsRequest.nextToken, + outputKey: \SearchUserHierarchyGroupsResponse.nextToken, + logger: logger + ) + } + /// Searches users in an Amazon Connect instance, with optional filtering. AfterContactWorkTimeLimit is returned in milliseconds. /// Return PaginatorSequence for operation. /// @@ -5076,6 +5140,18 @@ extension Connect.ListViewsRequest: AWSPaginateToken { } } +extension Connect.SearchAgentStatusesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Connect.SearchAgentStatusesRequest { + return .init( + instanceId: self.instanceId, + maxResults: self.maxResults, + nextToken: token, + searchCriteria: self.searchCriteria, + searchFilter: self.searchFilter + ) + } +} + extension Connect.SearchAvailablePhoneNumbersRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Connect.SearchAvailablePhoneNumbersRequest { return .init( @@ -5222,6 +5298,18 @@ extension Connect.SearchSecurityProfilesRequest: AWSPaginateToken { } } +extension Connect.SearchUserHierarchyGroupsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Connect.SearchUserHierarchyGroupsRequest { + return .init( + instanceId: self.instanceId, + maxResults: self.maxResults, + nextToken: token, + searchCriteria: self.searchCriteria, + searchFilter: self.searchFilter + ) + } +} + extension Connect.SearchUsersRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Connect.SearchUsersRequest { return .init( diff --git a/Sources/Soto/Services/Connect/Connect_shapes.swift b/Sources/Soto/Services/Connect/Connect_shapes.swift index 506f6da711..944712bf9a 100644 --- a/Sources/Soto/Services/Connect/Connect_shapes.swift +++ b/Sources/Soto/Services/Connect/Connect_shapes.swift @@ -438,6 +438,17 @@ extension Connect { public var description: String { return self.rawValue } } + public enum NumberComparisonType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case equal = "EQUAL" + case greater = "GREATER" + case greaterOrEqual = "GREATER_OR_EQUAL" + case lesser = "LESSER" + case lesserOrEqual = "LESSER_OR_EQUAL" + case notEqual = "NOT_EQUAL" + case range = "RANGE" + public var description: String { return self.rawValue } + } + public enum NumericQuestionPropertyAutomationLabel: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case agentInteractionDuration = "AGENT_INTERACTION_DURATION" case contactDuration = "CONTACT_DURATION" @@ -895,6 +906,11 @@ extension Connect { public var description: String { return self.rawValue } } + public enum TargetListType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case proficiencies = "PROFICIENCIES" + public var description: String { return self.rawValue } + } + public enum TaskTemplateFieldType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case boolean = "BOOLEAN" case dateTime = "DATE_TIME" @@ -1633,6 +1649,40 @@ extension Connect { } } + public struct AgentStatusSearchCriteria: AWSEncodableShape { + /// A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are name,

 description, state, type, displayOrder,
 and resourceID. + public let andConditions: [AgentStatusSearchCriteria]? + /// A list of conditions which would be applied together with an OR condition. + public let orConditions: [AgentStatusSearchCriteria]? + /// A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are name,

 description, state, type, displayOrder,
 and resourceID. + public let stringCondition: StringCondition? + + public init(andConditions: [AgentStatusSearchCriteria]? = nil, orConditions: [AgentStatusSearchCriteria]? = nil, stringCondition: StringCondition? = nil) { + self.andConditions = andConditions + self.orConditions = orConditions + self.stringCondition = stringCondition + } + + private enum CodingKeys: String, CodingKey { + case andConditions = "AndConditions" + case orConditions = "OrConditions" + case stringCondition = "StringCondition" + } + } + + public struct AgentStatusSearchFilter: AWSEncodableShape { + /// An object that can be used to specify Tag conditions inside the SearchFilter. This accepts an OR of AND (List of List) input where: The top level list specifies conditions that need to be applied with OR operator. The inner list specifies conditions that need to be applied with AND operator. + public let attributeFilter: ControlPlaneAttributeFilter? + + public init(attributeFilter: ControlPlaneAttributeFilter? = nil) { + self.attributeFilter = attributeFilter + } + + private enum CodingKeys: String, CodingKey { + case attributeFilter = "AttributeFilter" + } + } + public struct AgentStatusSummary: AWSDecodableShape { /// The Amazon Resource Name (ARN) for the agent status. public let arn: String? @@ -3022,7 +3072,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -3109,6 +3159,19 @@ extension Connect { } } + public struct CommonAttributeAndCondition: AWSEncodableShape { + /// A leaf node condition which can be used to specify a tag condition. + public let tagConditions: [TagCondition]? + + public init(tagConditions: [TagCondition]? = nil) { + self.tagConditions = tagConditions + } + + private enum CodingKeys: String, CodingKey { + case tagConditions = "TagConditions" + } + } + public struct CompleteAttachedFileUploadRequest: AWSEncodableShape { /// The resource to which the attached file is (being) uploaded to. Cases are the only current supported resource. This value must be a valid ARN. public let associatedResourceArn: String @@ -3145,6 +3208,23 @@ extension Connect { public init() {} } + public struct Condition: AWSEncodableShape { + /// A leaf node condition which can be used to specify a numeric condition. + public let numberCondition: NumberCondition? + /// A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are name and
 value. + public let stringCondition: StringCondition? + + public init(numberCondition: NumberCondition? = nil, stringCondition: StringCondition? = nil) { + self.numberCondition = numberCondition + self.stringCondition = stringCondition + } + + private enum CodingKeys: String, CodingKey { + case numberCondition = "NumberCondition" + case stringCondition = "StringCondition" + } + } + public struct ConnectionData: AWSDecodableShape { /// The attendee information, including attendee ID and join token. public let attendee: Attendee? @@ -3684,6 +3764,26 @@ extension Connect { } } + public struct ControlPlaneAttributeFilter: AWSEncodableShape { + /// A list of conditions which would be applied together with an AND condition. + public let andCondition: CommonAttributeAndCondition? + /// A list of conditions which would be applied together with an OR condition. + public let orConditions: [CommonAttributeAndCondition]? + public let tagCondition: TagCondition? + + public init(andCondition: CommonAttributeAndCondition? = nil, orConditions: [CommonAttributeAndCondition]? = nil, tagCondition: TagCondition? = nil) { + self.andCondition = andCondition + self.orConditions = orConditions + self.tagCondition = tagCondition + } + + private enum CodingKeys: String, CodingKey { + case andCondition = "AndCondition" + case orConditions = "OrConditions" + case tagCondition = "TagCondition" + } + } + public struct ControlPlaneTagFilter: AWSEncodableShape { /// A list of conditions which would be applied together with an AND condition. public let andConditions: [TagCondition]? @@ -3774,7 +3874,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -3880,7 +3980,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -3958,7 +4058,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -4114,7 +4214,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -4184,7 +4284,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -4268,7 +4368,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -4493,7 +4593,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -4581,7 +4681,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -4657,7 +4757,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -4749,7 +4849,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -4946,7 +5046,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -5110,7 +5210,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -5179,7 +5279,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -5241,7 +5341,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -5333,7 +5433,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -5426,7 +5526,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -5560,7 +5660,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -9518,7 +9618,7 @@ extension Connect { public let interval: IntervalDetails? /// The maximum number of results to return per page. public let maxResults: Int? - /// The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: This metric is not available in Amazon Connect admin website. CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. SUM_CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts + /// The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. SUM_CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts public let metrics: [MetricV2] /// The token for the next set of results. Use the value returned in the previous /// response in the next request to retrieve the next set of results. @@ -10320,7 +10420,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -10963,6 +11063,23 @@ extension Connect { } } + public struct ListCondition: AWSEncodableShape { + /// A list of Condition objects which would be applied together with an AND condition. + public let conditions: [Condition]? + /// The type of target list that will be used to filter the users. + public let targetListType: TargetListType? + + public init(conditions: [Condition]? = nil, targetListType: TargetListType? = nil) { + self.conditions = conditions + self.targetListType = targetListType + } + + private enum CodingKeys: String, CodingKey { + case conditions = "Conditions" + case targetListType = "TargetListType" + } + } + public struct ListContactEvaluationsRequest: AWSEncodableShape { /// The identifier of the contact in this instance of Amazon Connect. public let contactId: String @@ -13533,6 +13650,31 @@ extension Connect { } } + public struct NumberCondition: AWSEncodableShape { + /// The type of comparison to be made when evaluating the number condition. + public let comparisonType: NumberComparisonType? + /// The name of the field in the number condition. + public let fieldName: String? + /// The maxValue to be used while evaluating the number condition. + public let maxValue: Int? + /// The minValue to be used while evaluating the number condition. + public let minValue: Int? + + public init(comparisonType: NumberComparisonType? = nil, fieldName: String? = nil, maxValue: Int? = nil, minValue: Int? = nil) { + self.comparisonType = comparisonType + self.fieldName = fieldName + self.maxValue = maxValue + self.minValue = minValue + } + + private enum CodingKeys: String, CodingKey { + case comparisonType = "ComparisonType" + case fieldName = "FieldName" + case maxValue = "MaxValue" + case minValue = "MinValue" + } + } + public struct NumberReference: AWSDecodableShape { /// Identifier of the number reference. public let name: String? @@ -15278,6 +15420,65 @@ extension Connect { } } + public struct SearchAgentStatusesRequest: AWSEncodableShape { + /// The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. + public let instanceId: String + /// The maximum number of results to return per page. + public let maxResults: Int? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + /// The search criteria to be used to return agent statuses. + public let searchCriteria: AgentStatusSearchCriteria? + /// Filters to be applied to search results. + public let searchFilter: AgentStatusSearchFilter? + + public init(instanceId: String, maxResults: Int? = nil, nextToken: String? = nil, searchCriteria: AgentStatusSearchCriteria? = nil, searchFilter: AgentStatusSearchFilter? = nil) { + self.instanceId = instanceId + self.maxResults = maxResults + self.nextToken = nextToken + self.searchCriteria = searchCriteria + self.searchFilter = searchFilter + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2500) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case instanceId = "InstanceId" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case searchCriteria = "SearchCriteria" + case searchFilter = "SearchFilter" + } + } + + public struct SearchAgentStatusesResponse: AWSDecodableShape { + /// The search criteria to be used to return agent statuses. + public let agentStatuses: [AgentStatus]? + /// The total number of agent statuses which matched your search query. + public let approximateTotalCount: Int64? + /// If there are additional results, this is the token for the next set of results. + public let nextToken: String? + + public init(agentStatuses: [AgentStatus]? = nil, approximateTotalCount: Int64? = nil, nextToken: String? = nil) { + self.agentStatuses = agentStatuses + self.approximateTotalCount = approximateTotalCount + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case agentStatuses = "AgentStatuses" + case approximateTotalCount = "ApproximateTotalCount" + case nextToken = "NextToken" + } + } + public struct SearchAvailablePhoneNumbersRequest: AWSEncodableShape { /// The identifier of the Amazon Connect instance that phone numbers are claimed to. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. You must enter InstanceId or TargetArn. public let instanceId: String? @@ -15897,7 +16098,7 @@ extension Connect { public let maxResults: Int? /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. public let nextToken: String? - /// The list of resource types to be used to search tags from. If not provided or if any empty list is provided, this API will search from all supported resource types. + /// The list of resource types to be used to search tags from. If not provided or if any empty list is provided, this API will search from all supported resource types. Supported resource types AGENT ROUTING_PROFILE STANDARD_QUEUE SECURITY_PROFILE OPERATING_HOURS PROMPT CONTACT_FLOW FLOW_MODULE public let resourceTypes: [String]? /// The search criteria to be used to return tags. public let searchCriteria: ResourceTagsSearchCriteria? @@ -16067,6 +16268,65 @@ extension Connect { } } + public struct SearchUserHierarchyGroupsRequest: AWSEncodableShape { + /// The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. + public let instanceId: String + /// The maximum number of results to return per page. + public let maxResults: Int? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + /// The search criteria to be used to return UserHierarchyGroups. + public let searchCriteria: UserHierarchyGroupSearchCriteria? + /// Filters to be applied to search results. + public let searchFilter: UserHierarchyGroupSearchFilter? + + public init(instanceId: String, maxResults: Int? = nil, nextToken: String? = nil, searchCriteria: UserHierarchyGroupSearchCriteria? = nil, searchFilter: UserHierarchyGroupSearchFilter? = nil) { + self.instanceId = instanceId + self.maxResults = maxResults + self.nextToken = nextToken + self.searchCriteria = searchCriteria + self.searchFilter = searchFilter + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2500) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case instanceId = "InstanceId" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case searchCriteria = "SearchCriteria" + case searchFilter = "SearchFilter" + } + } + + public struct SearchUserHierarchyGroupsResponse: AWSDecodableShape { + /// The total number of userHierarchyGroups which matched your search query. + public let approximateTotalCount: Int64? + /// If there are additional results, this is the token for the next set of results. + public let nextToken: String? + /// Information about the userHierarchyGroups. + public let userHierarchyGroups: [HierarchyGroup]? + + public init(approximateTotalCount: Int64? = nil, nextToken: String? = nil, userHierarchyGroups: [HierarchyGroup]? = nil) { + self.approximateTotalCount = approximateTotalCount + self.nextToken = nextToken + self.userHierarchyGroups = userHierarchyGroups + } + + private enum CodingKeys: String, CodingKey { + case approximateTotalCount = "ApproximateTotalCount" + case nextToken = "NextToken" + case userHierarchyGroups = "UserHierarchyGroups" + } + } + public struct SearchUsersRequest: AWSEncodableShape { /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. InstanceID is a required field. The "Required: No" below is incorrect. public let instanceId: String @@ -16681,7 +16941,7 @@ extension Connect { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -17648,7 +17908,7 @@ extension Connect { try self.tags.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 50) @@ -18243,7 +18503,7 @@ extension Connect { try self.tagKeys.forEach { try validate($0, name: "tagKeys[]", parent: name, max: 128) try validate($0, name: "tagKeys[]", parent: name, min: 1) - try validate($0, name: "tagKeys[]", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0, name: "tagKeys[]", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") } try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 50) try self.validate(self.tagKeys, name: "tagKeys", parent: name, min: 1) @@ -20621,6 +20881,40 @@ extension Connect { } } + public struct UserHierarchyGroupSearchCriteria: AWSEncodableShape { + /// A list of conditions which would be applied together with an AND condition. + public let andConditions: [UserHierarchyGroupSearchCriteria]? + /// A list of conditions which would be applied together with an OR condition. + public let orConditions: [UserHierarchyGroupSearchCriteria]? + /// A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are name,

 parentId, levelId, and resourceID. + public let stringCondition: StringCondition? + + public init(andConditions: [UserHierarchyGroupSearchCriteria]? = nil, orConditions: [UserHierarchyGroupSearchCriteria]? = nil, stringCondition: StringCondition? = nil) { + self.andConditions = andConditions + self.orConditions = orConditions + self.stringCondition = stringCondition + } + + private enum CodingKeys: String, CodingKey { + case andConditions = "AndConditions" + case orConditions = "OrConditions" + case stringCondition = "StringCondition" + } + } + + public struct UserHierarchyGroupSearchFilter: AWSEncodableShape { + /// An object that can be used to specify Tag conditions inside the SearchFilter. This accepts an OR or AND (List of List) input where: The top level list specifies conditions that need to be applied with OR operator. The inner list specifies conditions that need to be applied with AND operator. + public let attributeFilter: ControlPlaneAttributeFilter? + + public init(attributeFilter: ControlPlaneAttributeFilter? = nil) { + self.attributeFilter = attributeFilter + } + + private enum CodingKeys: String, CodingKey { + case attributeFilter = "AttributeFilter" + } + } + public struct UserIdentityInfo: AWSEncodableShape & AWSDecodableShape { /// The email address. If you are using SAML for identity management and include this parameter, an error is returned. public let email: String? @@ -20802,14 +21096,17 @@ extension Connect { public let andConditions: [UserSearchCriteria]? /// A leaf node condition which can be used to specify a hierarchy group condition. public let hierarchyGroupCondition: HierarchyGroupCondition? + /// A leaf node condition which can be used to specify a List condition to search users with attributes included in Lists like Proficiencies. + public let listCondition: ListCondition? /// A list of conditions which would be applied together with an OR condition. public let orConditions: [UserSearchCriteria]? /// A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are Username, FirstName, LastName, RoutingProfileId, SecurityProfileId, ResourceId. public let stringCondition: StringCondition? - public init(andConditions: [UserSearchCriteria]? = nil, hierarchyGroupCondition: HierarchyGroupCondition? = nil, orConditions: [UserSearchCriteria]? = nil, stringCondition: StringCondition? = nil) { + public init(andConditions: [UserSearchCriteria]? = nil, hierarchyGroupCondition: HierarchyGroupCondition? = nil, listCondition: ListCondition? = nil, orConditions: [UserSearchCriteria]? = nil, stringCondition: StringCondition? = nil) { self.andConditions = andConditions self.hierarchyGroupCondition = hierarchyGroupCondition + self.listCondition = listCondition self.orConditions = orConditions self.stringCondition = stringCondition } @@ -20817,6 +21114,7 @@ extension Connect { private enum CodingKeys: String, CodingKey { case andConditions = "AndConditions" case hierarchyGroupCondition = "HierarchyGroupCondition" + case listCondition = "ListCondition" case orConditions = "OrConditions" case stringCondition = "StringCondition" } diff --git a/Sources/Soto/Services/DirectConnect/DirectConnect_api.swift b/Sources/Soto/Services/DirectConnect/DirectConnect_api.swift index b4d334b940..19a3e178e5 100644 --- a/Sources/Soto/Services/DirectConnect/DirectConnect_api.swift +++ b/Sources/Soto/Services/DirectConnect/DirectConnect_api.swift @@ -60,7 +60,6 @@ public struct DirectConnect: AWSService { serviceProtocol: .json(version: "1.1"), apiVersion: "2012-10-25", endpoint: endpoint, - serviceEndpoints: Self.serviceEndpoints, variantEndpoints: Self.variantEndpoints, errorType: DirectConnectErrorType.self, xmlNamespace: "http://directconnect.amazonaws.com/doc/2012-10-25/", @@ -72,11 +71,6 @@ public struct DirectConnect: AWSService { } - /// custom endpoints for regions - static var serviceEndpoints: [String: String] {[ - "us-gov-east-1": "directconnect.us-gov-east-1.amazonaws.com", - "us-gov-west-1": "directconnect.us-gov-west-1.amazonaws.com" - ]} /// FIPS and dualstack endpoints @@ -86,6 +80,8 @@ public struct DirectConnect: AWSService { "ca-west-1": "directconnect-fips.ca-west-1.amazonaws.com", "us-east-1": "directconnect-fips.us-east-1.amazonaws.com", "us-east-2": "directconnect-fips.us-east-2.amazonaws.com", + "us-gov-east-1": "directconnect-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "directconnect-fips.us-gov-west-1.amazonaws.com", "us-west-1": "directconnect-fips.us-west-1.amazonaws.com", "us-west-2": "directconnect-fips.us-west-2.amazonaws.com" ]) diff --git a/Sources/Soto/Services/EC2/EC2_api.swift b/Sources/Soto/Services/EC2/EC2_api.swift index bd1bb22c73..01841fcaa2 100644 --- a/Sources/Soto/Services/EC2/EC2_api.swift +++ b/Sources/Soto/Services/EC2/EC2_api.swift @@ -1142,6 +1142,19 @@ public struct EC2: AWSService { ) } + /// Create a verification token. A verification token is an Amazon Web Services-generated random value that you can use to prove ownership of an external resource. For example, you can use a verification token to validate that you control a public IP address range when you bring an IP address range to Amazon Web Services (BYOIP). + @Sendable + public func createIpamExternalResourceVerificationToken(_ input: CreateIpamExternalResourceVerificationTokenRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateIpamExternalResourceVerificationTokenResult { + return try await self.client.execute( + operation: "CreateIpamExternalResourceVerificationToken", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Create an IP address pool for Amazon VPC IP Address Manager (IPAM). In IPAM, a pool is a collection of contiguous IP addresses CIDRs. Pools enable you to organize your IP addresses according to your routing and security needs. For example, if you have separate routing and security needs for development and production applications, you can create a pool for each. For more information, see Create a top-level pool in the Amazon VPC IPAM User Guide. @Sendable public func createIpamPool(_ input: CreateIpamPoolRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateIpamPoolResult { @@ -2158,6 +2171,19 @@ public struct EC2: AWSService { ) } + /// Delete a verification token. A verification token is an Amazon Web Services-generated random value that you can use to prove ownership of an external resource. For example, you can use a verification token to validate that you control a public IP address range when you bring an IP address range to Amazon Web Services (BYOIP). + @Sendable + public func deleteIpamExternalResourceVerificationToken(_ input: DeleteIpamExternalResourceVerificationTokenRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteIpamExternalResourceVerificationTokenResult { + return try await self.client.execute( + operation: "DeleteIpamExternalResourceVerificationToken", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Delete an IPAM pool. You cannot delete an IPAM pool if there are allocations in it or CIDRs provisioned to it. To release allocations, see ReleaseIpamPoolAllocation. To deprovision pool CIDRs, see DeprovisionIpamPoolCidr. For more information, see Delete a pool in the Amazon VPC IPAM User Guide. @Sendable public func deleteIpamPool(_ input: DeleteIpamPoolRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteIpamPoolResult { @@ -3788,6 +3814,19 @@ public struct EC2: AWSService { ) } + /// Describe verification tokens. A verification token is an Amazon Web Services-generated random value that you can use to prove ownership of an external resource. For example, you can use a verification token to validate that you control a public IP address range when you bring an IP address range to Amazon Web Services (BYOIP). + @Sendable + public func describeIpamExternalResourceVerificationTokens(_ input: DescribeIpamExternalResourceVerificationTokensRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeIpamExternalResourceVerificationTokensResult { + return try await self.client.execute( + operation: "DescribeIpamExternalResourceVerificationTokens", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Get information about your IPAM pools. @Sendable public func describeIpamPools(_ input: DescribeIpamPoolsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeIpamPoolsResult { @@ -4154,7 +4193,7 @@ public struct EC2: AWSService { ) } - /// Describes the specified placement groups or all of your placement groups. For more information, see Placement groups in the Amazon EC2 User Guide. + /// Describes the specified placement groups or all of your placement groups. To describe a specific placement group that is shared with your account, you must specify the ID of the placement group using the GroupId parameter. Specifying the name of a shared placement group using the GroupNames parameter will result in an error. For more information, see Placement groups in the Amazon EC2 User Guide. @Sendable public func describePlacementGroups(_ input: DescribePlacementGroupsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribePlacementGroupsResult { return try await self.client.execute( @@ -4833,7 +4872,7 @@ public struct EC2: AWSService { ) } - /// Describes the most recent volume modification request for the specified EBS volumes. If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification request. For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide. + /// Describes the most recent volume modification request for the specified EBS volumes. For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide. @Sendable public func describeVolumesModifications(_ input: DescribeVolumesModificationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeVolumesModificationsResult { return try await self.client.execute( @@ -5890,7 +5929,7 @@ public struct EC2: AWSService { ) } - /// Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes the last three system event log errors. By default, the console output returns buffered information that was posted shortly after an instance transition state (start, stop, reboot, or terminate). This information is available for at least one hour after the most recent post. Only the most recent 64 KB of console output is available. You can optionally retrieve the latest serial console output at any time during the instance lifecycle. This option is supported on instance types that use the Nitro hypervisor. For more information, see Instance console output in the Amazon EC2 User Guide. + /// Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes the last three system event log errors. For more information, see Instance console output in the Amazon EC2 User Guide. @Sendable public func getConsoleOutput(_ input: GetConsoleOutputRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetConsoleOutputResult { return try await self.client.execute( @@ -10390,7 +10429,7 @@ extension EC2 { ) } - /// Describes the most recent volume modification request for the specified EBS volumes. If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification request. For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide. + /// Describes the most recent volume modification request for the specified EBS volumes. For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/EC2/EC2_shapes.swift b/Sources/Soto/Services/EC2/EC2_shapes.swift index b6601f52a3..d25fa481f0 100644 --- a/Sources/Soto/Services/EC2/EC2_shapes.swift +++ b/Sources/Soto/Services/EC2/EC2_shapes.swift @@ -1968,6 +1968,16 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum IpamExternalResourceVerificationTokenState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case createComplete = "create-complete" + case createFailed = "create-failed" + case createInProgress = "create-in-progress" + case deleteComplete = "delete-complete" + case deleteFailed = "delete-failed" + case deleteInProgress = "delete-in-progress" + public var description: String { return self.rawValue } + } + public enum IpamManagementState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case ignored = "ignored" case managed = "managed" @@ -1975,6 +1985,12 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum IpamNetworkInterfaceAttachmentStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case available = "available" + case inUse = "in-use" + public var description: String { return self.rawValue } + } + public enum IpamOverlapStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case ignored = "ignored" case nonoverlapping = "nonoverlapping" @@ -2664,6 +2680,7 @@ extension EC2 { case instanceEventWindow = "instance-event-window" case internetGateway = "internet-gateway" case ipam = "ipam" + case ipamExternalResourceVerificationToken = "ipam-external-resource-verification-token" case ipamPool = "ipam-pool" case ipamResourceDiscovery = "ipam-resource-discovery" case ipamResourceDiscoveryAssociation = "ipam-resource-discovery-association" @@ -3001,6 +3018,12 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum TokenState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case expired = "expired" + case valid = "valid" + public var description: String { return self.rawValue } + } + public enum TpmSupportValues: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case v20 = "v2.0" public var description: String { return self.rawValue } @@ -3233,6 +3256,12 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum VerificationMethod: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case dnsToken = "dns-token" + case remarksX509 = "remarks-x509" + public var description: String { return self.rawValue } + } + public enum VerifiedAccessEndpointAttachmentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case vpc = "vpc" public var description: String { return self.rawValue } @@ -9920,6 +9949,47 @@ extension EC2 { } } + public struct CreateIpamExternalResourceVerificationTokenRequest: AWSEncodableShape { + public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } + + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. + public let clientToken: String? + /// A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// The ID of the IPAM that will create the token. + public let ipamId: String? + /// Token tags. + @OptionalCustomCoding> + public var tagSpecifications: [TagSpecification]? + + public init(clientToken: String? = CreateIpamExternalResourceVerificationTokenRequest.idempotencyToken(), dryRun: Bool? = nil, ipamId: String? = nil, tagSpecifications: [TagSpecification]? = nil) { + self.clientToken = clientToken + self.dryRun = dryRun + self.ipamId = ipamId + self.tagSpecifications = tagSpecifications + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case dryRun = "DryRun" + case ipamId = "IpamId" + case tagSpecifications = "TagSpecification" + } + } + + public struct CreateIpamExternalResourceVerificationTokenResult: AWSDecodableShape { + /// The verification token. + public let ipamExternalResourceVerificationToken: IpamExternalResourceVerificationToken? + + public init(ipamExternalResourceVerificationToken: IpamExternalResourceVerificationToken? = nil) { + self.ipamExternalResourceVerificationToken = ipamExternalResourceVerificationToken + } + + private enum CodingKeys: String, CodingKey { + case ipamExternalResourceVerificationToken = "ipamExternalResourceVerificationToken" + } + } + public struct CreateIpamPoolRequest: AWSEncodableShape { public struct _AllocationResourceTagsEncoding: ArrayCoderProperties { public static let member = "item" } public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } @@ -9947,7 +10017,7 @@ extension EC2 { public let dryRun: Bool? /// The ID of the scope in which you would like to create the IPAM pool. public let ipamScopeId: String? - /// In IPAM, the locale is the Amazon Web Services Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC’s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool. Possible values: Any Amazon Web Services Region, such as us-east-1. + /// The locale for the pool should be one of the following: An Amazon Web Services Region where you want this IPAM pool to be available for allocations. The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope. If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool. Possible values: Any Amazon Web Services Region or supported Amazon Web Services Local Zone. public let locale: String? /// The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is byoip. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool if PublicIpSource is amazon. For information on increasing the default limit, see Quotas for your IPAM in the Amazon VPC IPAM User Guide. public let publicIpSource: IpamPoolPublicIpSource? @@ -11081,17 +11151,21 @@ extension EC2 { /// A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? + /// The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide. + public let networkBorderGroup: String? /// The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. @OptionalCustomCoding> public var tagSpecifications: [TagSpecification]? - public init(dryRun: Bool? = nil, tagSpecifications: [TagSpecification]? = nil) { + public init(dryRun: Bool? = nil, networkBorderGroup: String? = nil, tagSpecifications: [TagSpecification]? = nil) { self.dryRun = dryRun + self.networkBorderGroup = networkBorderGroup self.tagSpecifications = tagSpecifications } private enum CodingKeys: String, CodingKey { case dryRun = "DryRun" + case networkBorderGroup = "NetworkBorderGroup" case tagSpecifications = "TagSpecification" } } @@ -14106,6 +14180,36 @@ extension EC2 { } } + public struct DeleteIpamExternalResourceVerificationTokenRequest: AWSEncodableShape { + /// A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// The token ID. + public let ipamExternalResourceVerificationTokenId: String? + + public init(dryRun: Bool? = nil, ipamExternalResourceVerificationTokenId: String? = nil) { + self.dryRun = dryRun + self.ipamExternalResourceVerificationTokenId = ipamExternalResourceVerificationTokenId + } + + private enum CodingKeys: String, CodingKey { + case dryRun = "DryRun" + case ipamExternalResourceVerificationTokenId = "IpamExternalResourceVerificationTokenId" + } + } + + public struct DeleteIpamExternalResourceVerificationTokenResult: AWSDecodableShape { + /// The verification token. + public let ipamExternalResourceVerificationToken: IpamExternalResourceVerificationToken? + + public init(ipamExternalResourceVerificationToken: IpamExternalResourceVerificationToken? = nil) { + self.ipamExternalResourceVerificationToken = ipamExternalResourceVerificationToken + } + + private enum CodingKeys: String, CodingKey { + case ipamExternalResourceVerificationToken = "ipamExternalResourceVerificationToken" + } + } + public struct DeleteIpamPoolRequest: AWSEncodableShape { /// Enables you to quickly delete an IPAM pool and all resources within that pool, including provisioned CIDRs, allocations, and other pools. You can only use this option to delete pools in the private scope or pools in the public scope with a source resource. A source resource is a resource used to provision CIDRs to a resource planning pool. public let cascade: Bool? @@ -14838,16 +14942,20 @@ extension EC2 { public struct DeletePublicIpv4PoolRequest: AWSEncodableShape { /// A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? + /// The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide. + public let networkBorderGroup: String? /// The ID of the public IPv4 pool you want to delete. public let poolId: String? - public init(dryRun: Bool? = nil, poolId: String? = nil) { + public init(dryRun: Bool? = nil, networkBorderGroup: String? = nil, poolId: String? = nil) { self.dryRun = dryRun + self.networkBorderGroup = networkBorderGroup self.poolId = poolId } private enum CodingKeys: String, CodingKey { case dryRun = "DryRun" + case networkBorderGroup = "NetworkBorderGroup" case poolId = "PoolId" } } @@ -19385,6 +19493,65 @@ extension EC2 { } } + public struct DescribeIpamExternalResourceVerificationTokensRequest: AWSEncodableShape { + public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } + public struct _IpamExternalResourceVerificationTokenIdsEncoding: ArrayCoderProperties { public static let member = "item" } + + /// A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// One or more filters for the request. For more information about filtering, see Filtering CLI output. Available filters: ipam-arn ipam-external-resource-verification-token-arn ipam-external-resource-verification-token-id ipam-id ipam-region state status token-name token-value + @OptionalCustomCoding> + public var filters: [Filter]? + /// Verification token IDs. + @OptionalCustomCoding> + public var ipamExternalResourceVerificationTokenIds: [String]? + /// The maximum number of tokens to return in one page of results. + public let maxResults: Int? + /// The token for the next page of results. + public let nextToken: String? + + public init(dryRun: Bool? = nil, filters: [Filter]? = nil, ipamExternalResourceVerificationTokenIds: [String]? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.dryRun = dryRun + self.filters = filters + self.ipamExternalResourceVerificationTokenIds = ipamExternalResourceVerificationTokenIds + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 5) + } + + private enum CodingKeys: String, CodingKey { + case dryRun = "DryRun" + case filters = "Filter" + case ipamExternalResourceVerificationTokenIds = "IpamExternalResourceVerificationTokenId" + case maxResults = "MaxResults" + case nextToken = "NextToken" + } + } + + public struct DescribeIpamExternalResourceVerificationTokensResult: AWSDecodableShape { + public struct _IpamExternalResourceVerificationTokensEncoding: ArrayCoderProperties { public static let member = "item" } + + /// Verification tokens. + @OptionalCustomCoding> + public var ipamExternalResourceVerificationTokens: [IpamExternalResourceVerificationToken]? + /// The token to use to retrieve the next page of results. This value is null when there are no more results to return. + public let nextToken: String? + + public init(ipamExternalResourceVerificationTokens: [IpamExternalResourceVerificationToken]? = nil, nextToken: String? = nil) { + self.ipamExternalResourceVerificationTokens = ipamExternalResourceVerificationTokens + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case ipamExternalResourceVerificationTokens = "ipamExternalResourceVerificationTokenSet" + case nextToken = "nextToken" + } + } + public struct DescribeIpamPoolsRequest: AWSEncodableShape { public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } public struct _IpamPoolIdsEncoding: ArrayCoderProperties { public static let member = "item" } @@ -21135,7 +21302,7 @@ extension EC2 { /// The IDs of the placement groups. @OptionalCustomCoding> public var groupIds: [String]? - /// The names of the placement groups. Default: Describes all your placement groups, or only those otherwise specified. + /// The names of the placement groups. Constraints: You can specify a name only if the placement group is owned by your account. If a placement group is shared with your account, specifying the name results in an error. You must use the GroupId parameter instead. @OptionalCustomCoding> public var groupNames: [String]? @@ -28841,7 +29008,7 @@ extension EC2 { public let priority: Double? /// The ID of the subnet in which to launch the instances. public let subnetId: String? - /// The number of units provided by the specified instance type. When specifying weights, the price used in the lowest-price and price-capacity-optimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour. + /// The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O. If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1. When specifying weights, the price used in the lowest-price and price-capacity-optimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour. public let weightedCapacity: Double? public init(availabilityZone: String? = nil, imageId: String? = nil, instanceRequirements: InstanceRequirements? = nil, instanceType: InstanceType? = nil, maxPrice: String? = nil, placement: PlacementResponse? = nil, priority: Double? = nil, subnetId: String? = nil, weightedCapacity: Double? = nil) { @@ -28886,7 +29053,7 @@ extension EC2 { public let priority: Double? /// The IDs of the subnets in which to launch the instances. Separate multiple subnet IDs using commas (for example, subnet-1234abcdeexample1, subnet-0987cdef6example2). A request of type instant can have only one subnet ID. public let subnetId: String? - /// The number of units provided by the specified instance type. When specifying weights, the price used in the lowest-price and price-capacity-optimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour. + /// The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O. If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1. When specifying weights, the price used in the lowest-price and price-capacity-optimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour. public let weightedCapacity: Double? public init(availabilityZone: String? = nil, imageId: String? = nil, instanceRequirements: InstanceRequirementsRequest? = nil, instanceType: InstanceType? = nil, maxPrice: String? = nil, placement: Placement? = nil, priority: Double? = nil, subnetId: String? = nil, weightedCapacity: Double? = nil) { @@ -35601,7 +35768,7 @@ extension EC2 { public let instanceId: String? /// The resource discovery ID. public let ipamResourceDiscoveryId: String? - /// The network border group that the resource that the IP address is assigned to is in. + /// The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide. public let networkBorderGroup: String? /// The description of the network interface that IP address is assigned to. public let networkInterfaceDescription: String? @@ -35673,10 +35840,14 @@ extension EC2 { public struct IpamDiscoveredResourceCidr: AWSDecodableShape { public struct _ResourceTagsEncoding: ArrayCoderProperties { public static let member = "item" } + /// The Availability Zone ID. + public let availabilityZoneId: String? /// The resource discovery ID. public let ipamResourceDiscoveryId: String? /// The percentage of IP address space in use. To convert the decimal to a percentage, multiply the decimal by 100. Note the following: For resources that are VPCs, this is the percentage of IP address space in the VPC that's taken up by subnet CIDRs. For resources that are subnets, if the subnet has an IPv4 CIDR provisioned to it, this is the percentage of IPv4 address space in the subnet that's in use. If the subnet has an IPv6 CIDR provisioned to it, the percentage of IPv6 address space in use is not represented. The percentage of IPv6 address space in use cannot currently be calculated. For resources that are public IPv4 pools, this is the percentage of IP address space in the pool that's been allocated to Elastic IP addresses (EIPs). public let ipUsage: Double? + /// For elastic network interfaces, this is the status of whether or not the elastic network interface is attached. + public let networkInterfaceAttachmentStatus: IpamNetworkInterfaceAttachmentStatus? /// The resource CIDR. public let resourceCidr: String? /// The resource ID. @@ -35695,9 +35866,11 @@ extension EC2 { /// The VPC ID. public let vpcId: String? - public init(ipamResourceDiscoveryId: String? = nil, ipUsage: Double? = nil, resourceCidr: String? = nil, resourceId: String? = nil, resourceOwnerId: String? = nil, resourceRegion: String? = nil, resourceTags: [IpamResourceTag]? = nil, resourceType: IpamResourceType? = nil, sampleTime: Date? = nil, vpcId: String? = nil) { + public init(availabilityZoneId: String? = nil, ipamResourceDiscoveryId: String? = nil, ipUsage: Double? = nil, networkInterfaceAttachmentStatus: IpamNetworkInterfaceAttachmentStatus? = nil, resourceCidr: String? = nil, resourceId: String? = nil, resourceOwnerId: String? = nil, resourceRegion: String? = nil, resourceTags: [IpamResourceTag]? = nil, resourceType: IpamResourceType? = nil, sampleTime: Date? = nil, vpcId: String? = nil) { + self.availabilityZoneId = availabilityZoneId self.ipamResourceDiscoveryId = ipamResourceDiscoveryId self.ipUsage = ipUsage + self.networkInterfaceAttachmentStatus = networkInterfaceAttachmentStatus self.resourceCidr = resourceCidr self.resourceId = resourceId self.resourceOwnerId = resourceOwnerId @@ -35709,8 +35882,10 @@ extension EC2 { } private enum CodingKeys: String, CodingKey { + case availabilityZoneId = "availabilityZoneId" case ipamResourceDiscoveryId = "ipamResourceDiscoveryId" case ipUsage = "ipUsage" + case networkInterfaceAttachmentStatus = "networkInterfaceAttachmentStatus" case resourceCidr = "resourceCidr" case resourceId = "resourceId" case resourceOwnerId = "resourceOwnerId" @@ -35739,6 +35914,62 @@ extension EC2 { } } + public struct IpamExternalResourceVerificationToken: AWSDecodableShape { + public struct _TagsEncoding: ArrayCoderProperties { public static let member = "item" } + + /// ARN of the IPAM that created the token. + public let ipamArn: String? + /// Token ARN. + public let ipamExternalResourceVerificationTokenArn: String? + /// The ID of the token. + public let ipamExternalResourceVerificationTokenId: String? + /// The ID of the IPAM that created the token. + public let ipamId: String? + /// Region of the IPAM that created the token. + public let ipamRegion: String? + /// Token expiration. + public let notAfter: Date? + /// Token state. + public let state: IpamExternalResourceVerificationTokenState? + /// Token status. + public let status: TokenState? + /// Token tags. + @OptionalCustomCoding> + public var tags: [Tag]? + /// Token name. + public let tokenName: String? + /// Token value. + public let tokenValue: String? + + public init(ipamArn: String? = nil, ipamExternalResourceVerificationTokenArn: String? = nil, ipamExternalResourceVerificationTokenId: String? = nil, ipamId: String? = nil, ipamRegion: String? = nil, notAfter: Date? = nil, state: IpamExternalResourceVerificationTokenState? = nil, status: TokenState? = nil, tags: [Tag]? = nil, tokenName: String? = nil, tokenValue: String? = nil) { + self.ipamArn = ipamArn + self.ipamExternalResourceVerificationTokenArn = ipamExternalResourceVerificationTokenArn + self.ipamExternalResourceVerificationTokenId = ipamExternalResourceVerificationTokenId + self.ipamId = ipamId + self.ipamRegion = ipamRegion + self.notAfter = notAfter + self.state = state + self.status = status + self.tags = tags + self.tokenName = tokenName + self.tokenValue = tokenValue + } + + private enum CodingKeys: String, CodingKey { + case ipamArn = "ipamArn" + case ipamExternalResourceVerificationTokenArn = "ipamExternalResourceVerificationTokenArn" + case ipamExternalResourceVerificationTokenId = "ipamExternalResourceVerificationTokenId" + case ipamId = "ipamId" + case ipamRegion = "ipamRegion" + case notAfter = "notAfter" + case state = "state" + case status = "status" + case tags = "tagSet" + case tokenName = "tokenName" + case tokenValue = "tokenValue" + } + } + public struct IpamOperatingRegion: AWSDecodableShape { /// The name of the operating Region. public let regionName: String? @@ -35785,7 +36016,7 @@ extension EC2 { public let ipamScopeArn: String? /// In IPAM, a scope is the highest-level container within IPAM. An IPAM contains two default scopes. Each scope represents the IP space for a single network. The private scope is intended for all private IP address space. The public scope is intended for all public IP address space. Scopes enable you to reuse IP addresses across multiple unconnected networks without causing IP address overlap or conflict. public let ipamScopeType: IpamScopeType? - /// The locale of the IPAM pool. In IPAM, the locale is the Amazon Web Services Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC’s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an Amazon Web Services Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error. + /// The locale of the IPAM pool. The locale for the pool should be one of the following: An Amazon Web Services Region where you want this IPAM pool to be available for allocations. The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope. If you choose an Amazon Web Services Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error. public let locale: String? /// The Amazon Web Services account ID of the owner of the IPAM pool. public let ownerId: String? @@ -36048,6 +36279,8 @@ extension EC2 { public struct IpamResourceCidr: AWSDecodableShape { public struct _ResourceTagsEncoding: ArrayCoderProperties { public static let member = "item" } + /// The Availability Zone ID. + public let availabilityZoneId: String? /// The compliance status of the IPAM resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide. public let complianceStatus: IpamComplianceStatus? /// The IPAM ID for an IPAM resource. @@ -36080,7 +36313,8 @@ extension EC2 { /// The ID of a VPC. public let vpcId: String? - public init(complianceStatus: IpamComplianceStatus? = nil, ipamId: String? = nil, ipamPoolId: String? = nil, ipamScopeId: String? = nil, ipUsage: Double? = nil, managementState: IpamManagementState? = nil, overlapStatus: IpamOverlapStatus? = nil, resourceCidr: String? = nil, resourceId: String? = nil, resourceName: String? = nil, resourceOwnerId: String? = nil, resourceRegion: String? = nil, resourceTags: [IpamResourceTag]? = nil, resourceType: IpamResourceType? = nil, vpcId: String? = nil) { + public init(availabilityZoneId: String? = nil, complianceStatus: IpamComplianceStatus? = nil, ipamId: String? = nil, ipamPoolId: String? = nil, ipamScopeId: String? = nil, ipUsage: Double? = nil, managementState: IpamManagementState? = nil, overlapStatus: IpamOverlapStatus? = nil, resourceCidr: String? = nil, resourceId: String? = nil, resourceName: String? = nil, resourceOwnerId: String? = nil, resourceRegion: String? = nil, resourceTags: [IpamResourceTag]? = nil, resourceType: IpamResourceType? = nil, vpcId: String? = nil) { + self.availabilityZoneId = availabilityZoneId self.complianceStatus = complianceStatus self.ipamId = ipamId self.ipamPoolId = ipamPoolId @@ -36099,6 +36333,7 @@ extension EC2 { } private enum CodingKeys: String, CodingKey { + case availabilityZoneId = "availabilityZoneId" case complianceStatus = "complianceStatus" case ipamId = "ipamId" case ipamPoolId = "ipamPoolId" @@ -37490,7 +37725,7 @@ extension EC2 { public let spotPrice: String? /// The ID of the subnet in which to launch the instances. public let subnetId: String? - /// The number of units provided by the specified instance type. When specifying weights, the price used in the lowest-price and price-capacity-optimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour. + /// The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O. If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1. When specifying weights, the price used in the lowestPrice and priceCapacityOptimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour. public let weightedCapacity: Double? public init(availabilityZone: String? = nil, instanceRequirements: InstanceRequirements? = nil, instanceType: InstanceType? = nil, priority: Double? = nil, spotPrice: String? = nil, subnetId: String? = nil, weightedCapacity: Double? = nil) { @@ -44598,24 +44833,30 @@ extension EC2 { public struct ProvisionIpamPoolCidrRequest: AWSEncodableShape { /// The CIDR you want to assign to the IPAM pool. Either "NetmaskLength" or "Cidr" is required. This value will be null if you specify "NetmaskLength" and will be filled in during the provisioning process. public let cidr: String? - /// A signed document that proves that you are authorized to bring a specified IP address range to Amazon using BYOIP. This option applies to public pools only. + /// A signed document that proves that you are authorized to bring a specified IP address range to Amazon using BYOIP. This option only applies to IPv4 and IPv6 pools in the public scope. public let cidrAuthorizationContext: IpamCidrAuthorizationContext? /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? + /// Verification token ID. This option only applies to IPv4 and IPv6 pools in the public scope. + public let ipamExternalResourceVerificationTokenId: String? /// The ID of the IPAM pool to which you want to assign a CIDR. public let ipamPoolId: String? /// The netmask length of the CIDR you'd like to provision to a pool. Can be used for provisioning Amazon-provided IPv6 CIDRs to top-level pools and for provisioning CIDRs to pools with source pools. Cannot be used to provision BYOIP CIDRs to top-level pools. Either "NetmaskLength" or "Cidr" is required. public let netmaskLength: Int? + /// The method for verifying control of a public IP address range. Defaults to remarks-x509 if not specified. This option only applies to IPv4 and IPv6 pools in the public scope. + public let verificationMethod: VerificationMethod? - public init(cidr: String? = nil, cidrAuthorizationContext: IpamCidrAuthorizationContext? = nil, clientToken: String? = ProvisionIpamPoolCidrRequest.idempotencyToken(), dryRun: Bool? = nil, ipamPoolId: String? = nil, netmaskLength: Int? = nil) { + public init(cidr: String? = nil, cidrAuthorizationContext: IpamCidrAuthorizationContext? = nil, clientToken: String? = ProvisionIpamPoolCidrRequest.idempotencyToken(), dryRun: Bool? = nil, ipamExternalResourceVerificationTokenId: String? = nil, ipamPoolId: String? = nil, netmaskLength: Int? = nil, verificationMethod: VerificationMethod? = nil) { self.cidr = cidr self.cidrAuthorizationContext = cidrAuthorizationContext self.clientToken = clientToken self.dryRun = dryRun + self.ipamExternalResourceVerificationTokenId = ipamExternalResourceVerificationTokenId self.ipamPoolId = ipamPoolId self.netmaskLength = netmaskLength + self.verificationMethod = verificationMethod } private enum CodingKeys: String, CodingKey { @@ -44623,8 +44864,10 @@ extension EC2 { case cidrAuthorizationContext = "CidrAuthorizationContext" case clientToken = "ClientToken" case dryRun = "DryRun" + case ipamExternalResourceVerificationTokenId = "IpamExternalResourceVerificationTokenId" case ipamPoolId = "IpamPoolId" case netmaskLength = "NetmaskLength" + case verificationMethod = "VerificationMethod" } } @@ -44648,13 +44891,16 @@ extension EC2 { public let ipamPoolId: String? /// The netmask length of the CIDR you would like to allocate to the public IPv4 pool. public let netmaskLength: Int? + /// The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide. + public let networkBorderGroup: String? /// The ID of the public IPv4 pool you would like to use for this CIDR. public let poolId: String? - public init(dryRun: Bool? = nil, ipamPoolId: String? = nil, netmaskLength: Int? = nil, poolId: String? = nil) { + public init(dryRun: Bool? = nil, ipamPoolId: String? = nil, netmaskLength: Int? = nil, networkBorderGroup: String? = nil, poolId: String? = nil) { self.dryRun = dryRun self.ipamPoolId = ipamPoolId self.netmaskLength = netmaskLength + self.networkBorderGroup = networkBorderGroup self.poolId = poolId } @@ -44662,6 +44908,7 @@ extension EC2 { case dryRun = "DryRun" case ipamPoolId = "IpamPoolId" case netmaskLength = "NetmaskLength" + case networkBorderGroup = "NetworkBorderGroup" case poolId = "PoolId" } } @@ -49872,7 +50119,7 @@ extension EC2 { public var tagSpecifications: [SpotFleetTagSpecification]? /// The base64-encoded user data that instances use when starting up. User data is limited to 16 KB. public let userData: String? - /// The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O. If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1. + /// The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O. If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1. When specifying weights, the price used in the lowestPrice and priceCapacityOptimized allocation strategies is per unit hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested TargetCapacity, resulting in only 1 instance being launched, the price used is per instance hour. public let weightedCapacity: Double? public init(addressingType: String? = nil, blockDeviceMappings: [BlockDeviceMapping]? = nil, ebsOptimized: Bool? = nil, iamInstanceProfile: IamInstanceProfileSpecification? = nil, imageId: String? = nil, instanceRequirements: InstanceRequirements? = nil, instanceType: InstanceType? = nil, kernelId: String? = nil, keyName: String? = nil, monitoring: SpotFleetMonitoring? = nil, networkInterfaces: [InstanceNetworkInterfaceSpecification]? = nil, placement: SpotPlacement? = nil, ramdiskId: String? = nil, securityGroups: [GroupIdentifier]? = nil, spotPrice: String? = nil, subnetId: String? = nil, tagSpecifications: [SpotFleetTagSpecification]? = nil, userData: String? = nil, weightedCapacity: Double? = nil) { @@ -54505,7 +54752,7 @@ extension EC2 { public struct VolumeModification: AWSDecodableShape { /// The modification completion or failure time. public let endTime: Date? - /// The current modification state. The modification state is null for unmodified volumes. + /// The current modification state. public let modificationState: VolumeModificationState? /// The original IOPS rate of the volume. public let originalIops: Int? diff --git a/Sources/Soto/Services/FSx/FSx_shapes.swift b/Sources/Soto/Services/FSx/FSx_shapes.swift index 6e1b13a327..c9e5f8222b 100644 --- a/Sources/Soto/Services/FSx/FSx_shapes.swift +++ b/Sources/Soto/Services/FSx/FSx_shapes.swift @@ -27,6 +27,7 @@ extension FSx { // MARK: Enums public enum AdministrativeActionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case downloadDataFromBackup = "DOWNLOAD_DATA_FROM_BACKUP" case fileSystemAliasAssociation = "FILE_SYSTEM_ALIAS_ASSOCIATION" case fileSystemAliasDisassociation = "FILE_SYSTEM_ALIAS_DISASSOCIATION" case fileSystemUpdate = "FILE_SYSTEM_UPDATE" @@ -257,6 +258,7 @@ extension FSx { public enum OntapDeploymentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case multiAz1 = "MULTI_AZ_1" + case multiAz2 = "MULTI_AZ_2" case singleAz1 = "SINGLE_AZ_1" case singleAz2 = "SINGLE_AZ_2" public var description: String { return self.rawValue } @@ -287,6 +289,8 @@ extension FSx { case multiAz1 = "MULTI_AZ_1" case singleAz1 = "SINGLE_AZ_1" case singleAz2 = "SINGLE_AZ_2" + case singleAzHa1 = "SINGLE_AZ_HA_1" + case singleAzHa2 = "SINGLE_AZ_HA_2" public var description: String { return self.rawValue } } @@ -368,6 +372,7 @@ extension FSx { case completed = "COMPLETED" case failed = "FAILED" case inProgress = "IN_PROGRESS" + case optimizing = "OPTIMIZING" case pending = "PENDING" case updatedOptimizing = "UPDATED_OPTIMIZING" public var description: String { return self.rawValue } @@ -498,13 +503,13 @@ extension FSx { public struct AdministrativeAction: AWSDecodableShape { public let administrativeActionType: AdministrativeActionType? public let failureDetails: AdministrativeActionFailureDetails? - /// The percentage-complete status of a STORAGE_OPTIMIZATION administrative action. Does not apply to any other administrative action type. + /// The percentage-complete status of a STORAGE_OPTIMIZATION or DOWNLOAD_DATA_FROM_BACKUP administrative action. Does not apply to any other administrative action type. public let progressPercent: Int? /// The remaining bytes to transfer for the FSx for OpenZFS snapshot that you're copying. public let remainingTransferBytes: Int64? /// The time that the administrative action request was received. public let requestTime: Date? - /// The status of the administrative action, as follows: FAILED - Amazon FSx failed to process the administrative action successfully. IN_PROGRESS - Amazon FSx is processing the administrative action. PENDING - Amazon FSx is waiting to process the administrative action. COMPLETED - Amazon FSx has finished processing the administrative task. UPDATED_OPTIMIZING - For a storage-capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage-optimization process. + /// The status of the administrative action, as follows: FAILED - Amazon FSx failed to process the administrative action successfully. IN_PROGRESS - Amazon FSx is processing the administrative action. PENDING - Amazon FSx is waiting to process the administrative action. COMPLETED - Amazon FSx has finished processing the administrative task. For a backup restore to a second-generation FSx for ONTAP file system, indicates that all data has been downloaded to the volume, and clients now have read-write access to volume. UPDATED_OPTIMIZING - For a storage-capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage-optimization process. PENDING - For a backup restore to a second-generation FSx for ONTAP file system, indicates that the file metadata is being downloaded onto the volume. The volume's Lifecycle state is CREATING. IN_PROGRESS - For a backup restore to a second-generation FSx for ONTAP file system, indicates that all metadata has been downloaded to the new volume and client can access data with read-only access while Amazon FSx downloads the file data to the volume. Track the progress of this process with the ProgressPercent element. public let status: Status? /// The target value for the administration action, provided in the UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative actions. public let targetFileSystemValues: FileSystem? @@ -554,7 +559,7 @@ extension FSx { } public struct AggregateConfiguration: AWSDecodableShape { - /// The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The strings in the value of Aggregates are not are not formatted as aggrX, where X is a number between 1 and 6. The value of Aggregates contains aggregates that are not present. One or more of the aggregates supplied are too close to the volume limit to support adding more volumes. + /// The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The strings in the value of Aggregates are not are not formatted as aggrX, where X is a number between 1 and 12. The value of Aggregates contains aggregates that are not present. One or more of the aggregates supplied are too close to the volume limit to support adding more volumes. public let aggregates: [String]? /// The total number of constituents this FlexGroup volume has. Not applicable for FlexVols. public let totalConstituents: Int? @@ -1547,7 +1552,7 @@ extension FSx { public struct CreateFileSystemOntapConfiguration: AWSEncodableShape { public let automaticBackupRetentionDays: Int? public let dailyAutomaticBackupStartTime: String? - /// Specifies the FSx for ONTAP file system deployment type to use in creating the file system. MULTI_AZ_1 - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing a file system deployment type. + /// Specifies the FSx for ONTAP file system deployment type to use in creating the file system. MULTI_AZ_1 - A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system. MULTI_AZ_2 - A high availability file system configured for Multi-AZ redundancy to tolerate temporary AZ unavailability. This is a second-generation FSx for ONTAP file system. SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system. SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for ONTAP file system. For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing a file system deployment type. public let deploymentType: OntapDeploymentType? /// The SSD IOPS configuration for the FSx for ONTAP file system. public let diskIopsConfiguration: DiskIopsConfiguration? @@ -1555,15 +1560,15 @@ extension FSx { public let endpointIpAddressRange: String? /// The ONTAP administrative password for the fsxadmin user with which you administer your file system using the NetApp ONTAP CLI and REST API. public let fsxAdminPassword: String? - /// Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of HAPairs is less than 1 or greater than 12. The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1 or MULTI_AZ_1. + /// Specifies how many high-availability (HA) pairs of file servers will power your file system. First-generation file systems are powered by 1 HA pair. Second-generation multi-AZ file systems are powered by 1 HA pair. Second generation single-AZ file systems are powered by up to 12 HA pairs. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see Using block storage protocols. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of HAPairs is less than 1 or greater than 12. The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1, MULTI_AZ_1, or MULTI_AZ_2. public let haPairs: Int? - /// Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located. + /// Required when DeploymentType is set to MULTI_AZ_1 or MULTI_AZ_2. This specifies the subnet in which you want the preferred file server to be located. public let preferredSubnetId: String? /// (Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table. Amazon FSx manages these route tables for Multi-AZ file systems using tag-based authentication. These route tables are tagged with Key: AmazonFSx; Value: ManagedByAmazonFSx. When creating FSx for ONTAP Multi-AZ file systems using CloudFormation we recommend that you add the Key: AmazonFSx; Value: ManagedByAmazonFSx tag manually. public let routeTableIds: [String]? /// Sets the throughput capacity for the file system that you're creating in megabytes per second (MBps). For more information, see Managing throughput capacity in the FSx for ONTAP User Guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value. The value of ThroughputCapacity when divided by the value of HAPairs is outside of the valid range for ThroughputCapacity. public let throughputCapacity: Int? - /// Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system. You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity when creating a file system, but not both. This field and ThroughputCapacity are the same for scale-up file systems powered by one HA pair. For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps. For SINGLE_AZ_2 file systems, valid values are 3072 or 6144 MBps. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair. The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and 12). The value of ThroughputCapacityPerHAPair is not a valid value. + /// Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system. You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity when creating a file system, but not both. This field and ThroughputCapacity are the same for file systems powered by one HA pair. For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps. For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps. For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair. The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12). The value of ThroughputCapacityPerHAPair is not a valid value. public let throughputCapacityPerHAPair: Int? public let weeklyMaintenanceStartTime: String? @@ -1638,7 +1643,7 @@ extension FSx { /// A Boolean value indicating whether tags for the file system should be copied to volumes. This value defaults to false. If it's set to true, all tags for the file system are copied to volumes where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to volumes. If you specify one or more tags when creating the volume, no tags are copied from the file system, regardless of this value. public let copyTagsToVolumes: Bool? public let dailyAutomaticBackupStartTime: String? - /// Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following: MULTI_AZ_1- Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). Multi_AZ_1 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services Regions. SINGLE_AZ_1- Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available. SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services Regions. For more information, see Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide. + /// Specifies the file system deployment type. Valid values are the following: MULTI_AZ_1- Creates file systems with high availability and durability by replicating your data and supporting failover across multiple Availability Zones in the same Amazon Web Services Region. SINGLE_AZ_HA_2- Creates file systems with high availability and throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache by deploying a primary and standby file system within the same Availability Zone. SINGLE_AZ_HA_1- Creates file systems with high availability and throughput capacities of 64 - 4,096 MB/s by deploying a primary and standby file system within the same Availability Zone. SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache that automatically recover within a single Availability Zone. SINGLE_AZ_1- Creates file systems with throughput capacities of 64 - 4,096 MBs that automatically recover within a single Availability Zone. For a list of which Amazon Web Services Regions each deployment type is available in, see Deployment type availability. For more information on the differences in performance between deployment types, see File system performance in the Amazon FSx for OpenZFS User Guide. public let deploymentType: OpenZFSDeploymentType? public let diskIopsConfiguration: DiskIopsConfiguration? /// (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables. @@ -3853,7 +3858,7 @@ extension FSx { public let dataRepositoryAssociationIds: [String]? /// The Domain Name System (DNS) name for the cache. public let dnsName: String? - /// A structure providing details of any failures that occurred. + /// A structure providing details of any failures that occurred in creating a cache. public let failureDetails: FileCacheFailureDetails? /// The system-generated, unique ID of the cache. public let fileCacheId: String? @@ -4441,7 +4446,7 @@ extension FSx { public struct OntapFileSystemConfiguration: AWSDecodableShape { public let automaticBackupRetentionDays: Int? public let dailyAutomaticBackupStartTime: String? - /// Specifies the FSx for ONTAP file system deployment type in use in the file system. MULTI_AZ_1 - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment. + /// Specifies the FSx for ONTAP file system deployment type in use in the file system. MULTI_AZ_1 - A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system. MULTI_AZ_2 - A high availability file system configured for Multi-AZ redundancy to tolerate temporary AZ unavailability. This is a second-generation FSx for ONTAP file system. SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system. SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for ONTAP file system. For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment. public let deploymentType: OntapDeploymentType? /// The SSD IOPS configuration for the ONTAP file system, specifying the number of provisioned IOPS and the provision mode. public let diskIopsConfiguration: DiskIopsConfiguration? @@ -4451,13 +4456,13 @@ extension FSx { public let endpoints: FileSystemEndpoints? /// You can use the fsxadmin user account to access the NetApp ONTAP CLI and REST API. The password value is always redacted in the response. public let fsxAdminPassword: String? - /// Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of HAPairs is less than 1 or greater than 12. The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1 or MULTI_AZ_1. + /// Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of HAPairs is less than 1 or greater than 12. The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1, MULTI_AZ_1, or MULTI_AZ_2. public let haPairs: Int? public let preferredSubnetId: String? /// (Multi-AZ only) The VPC route tables in which your file system's endpoints are created. public let routeTableIds: [String]? public let throughputCapacity: Int? - /// Use to choose the throughput capacity per HA pair. When the value of HAPairs is equal to 1, the value of ThroughputCapacityPerHAPair is the total throughput for the file system. This field and ThroughputCapacity cannot be defined in the same API call, but one is required. This field and ThroughputCapacity are the same for file systems with one HA pair. For SINGLE_AZ_1 and MULTI_AZ_1, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps. For SINGLE_AZ_2, valid values are 3072 or 6144 MBps. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value. The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and 12). The value of ThroughputCapacityPerHAPair is not a valid value. + /// Use to choose the throughput capacity per HA pair. When the value of HAPairs is equal to 1, the value of ThroughputCapacityPerHAPair is the total throughput for the file system. This field and ThroughputCapacity cannot be defined in the same API call, but one is required. This field and ThroughputCapacity are the same for file systems with one HA pair. For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps. For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps. For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value. The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12). The value of ThroughputCapacityPerHAPair is not a valid value. public let throughputCapacityPerHAPair: Int? public let weeklyMaintenanceStartTime: String? @@ -4650,7 +4655,7 @@ extension FSx { /// A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to false. If it's set to true, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is true and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value. public let copyTagsToVolumes: Bool? public let dailyAutomaticBackupStartTime: String? - /// Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 MULTI_AZ_1, SINGLE_AZ_1, and SINGLE_AZ_2. + /// Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 MULTI_AZ_1, SINGLE_AZ_HA_2, SINGLE_AZ_HA_1, SINGLE_AZ_2, and SINGLE_AZ_1. public let deploymentType: OpenZFSDeploymentType? public let diskIopsConfiguration: DiskIopsConfiguration? /// The IP address of the endpoint that is used to access data or to manage the file system. @@ -5761,20 +5766,23 @@ extension FSx { public let diskIopsConfiguration: DiskIopsConfiguration? /// Update the password for the fsxadmin user by entering a new password. You use the fsxadmin user to access the NetApp ONTAP CLI and REST API to manage your file system resources. For more information, see Managing resources using NetApp Applicaton. public let fsxAdminPassword: String? + /// Use to update the number of high-availability (HA) pairs for a second-generation single-AZ file system. If you increase the number of HA pairs for your file system, you must specify proportional increases for StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see Using block storage protocols. + public let haPairs: Int? /// (Multi-AZ only) A list of IDs of existing virtual private cloud (VPC) route tables to disassociate (remove) from your Amazon FSx for NetApp ONTAP file system. You can use the API operation to retrieve the list of VPC route table IDs for a file system. public let removeRouteTableIds: [String]? /// Enter a new value to change the amount of throughput capacity for the file system in megabytes per second (MBps). For more information, see Managing throughput capacity in the FSx for ONTAP User Guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value. The value of ThroughputCapacity when divided by the value of HAPairs is outside of the valid range for ThroughputCapacity. public let throughputCapacity: Int? - /// Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system. This field and ThroughputCapacity cannot be defined in the same API call, but one is required. This field and ThroughputCapacity are the same for file systems with one HA pair. For SINGLE_AZ_1 and MULTI_AZ_1, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps. For SINGLE_AZ_2, valid values are 3072 or 6144 MBps. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair. The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and 12). The value of ThroughputCapacityPerHAPair is not a valid value. + /// Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system. This field and ThroughputCapacity cannot be defined in the same API call, but one is required. This field and ThroughputCapacity are the same for file systems with one HA pair. For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps. For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps. For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair. The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12). The value of ThroughputCapacityPerHAPair is not a valid value. public let throughputCapacityPerHAPair: Int? public let weeklyMaintenanceStartTime: String? - public init(addRouteTableIds: [String]? = nil, automaticBackupRetentionDays: Int? = nil, dailyAutomaticBackupStartTime: String? = nil, diskIopsConfiguration: DiskIopsConfiguration? = nil, fsxAdminPassword: String? = nil, removeRouteTableIds: [String]? = nil, throughputCapacity: Int? = nil, throughputCapacityPerHAPair: Int? = nil, weeklyMaintenanceStartTime: String? = nil) { + public init(addRouteTableIds: [String]? = nil, automaticBackupRetentionDays: Int? = nil, dailyAutomaticBackupStartTime: String? = nil, diskIopsConfiguration: DiskIopsConfiguration? = nil, fsxAdminPassword: String? = nil, haPairs: Int? = nil, removeRouteTableIds: [String]? = nil, throughputCapacity: Int? = nil, throughputCapacityPerHAPair: Int? = nil, weeklyMaintenanceStartTime: String? = nil) { self.addRouteTableIds = addRouteTableIds self.automaticBackupRetentionDays = automaticBackupRetentionDays self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime self.diskIopsConfiguration = diskIopsConfiguration self.fsxAdminPassword = fsxAdminPassword + self.haPairs = haPairs self.removeRouteTableIds = removeRouteTableIds self.throughputCapacity = throughputCapacity self.throughputCapacityPerHAPair = throughputCapacityPerHAPair @@ -5797,6 +5805,8 @@ extension FSx { try self.validate(self.fsxAdminPassword, name: "fsxAdminPassword", parent: name, max: 50) try self.validate(self.fsxAdminPassword, name: "fsxAdminPassword", parent: name, min: 8) try self.validate(self.fsxAdminPassword, name: "fsxAdminPassword", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{8,50}$") + try self.validate(self.haPairs, name: "haPairs", parent: name, max: 12) + try self.validate(self.haPairs, name: "haPairs", parent: name, min: 1) try self.removeRouteTableIds?.forEach { try validate($0, name: "removeRouteTableIds[]", parent: name, max: 21) try validate($0, name: "removeRouteTableIds[]", parent: name, min: 12) @@ -5818,6 +5828,7 @@ extension FSx { case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime" case diskIopsConfiguration = "DiskIopsConfiguration" case fsxAdminPassword = "FsxAdminPassword" + case haPairs = "HAPairs" case removeRouteTableIds = "RemoveRouteTableIds" case throughputCapacity = "ThroughputCapacity" case throughputCapacityPerHAPair = "ThroughputCapacityPerHAPair" diff --git a/Sources/Soto/Services/Firehose/Firehose_shapes.swift b/Sources/Soto/Services/Firehose/Firehose_shapes.swift index 0e8951bf27..604f6d64c2 100644 --- a/Sources/Soto/Services/Firehose/Firehose_shapes.swift +++ b/Sources/Soto/Services/Firehose/Firehose_shapes.swift @@ -146,6 +146,12 @@ extension Firehose { public var description: String { return self.rawValue } } + public enum IcebergS3BackupMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case allData = "AllData" + case failedDataOnly = "FailedDataOnly" + public var description: String { return self.rawValue } + } + public enum KeyType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case awsOwnedCmk = "AWS_OWNED_CMK" case customerManagedCmk = "CUSTOMER_MANAGED_CMK" @@ -756,6 +762,25 @@ extension Firehose { } } + public struct CatalogConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Specifies the Glue catalog ARN indentifier of the destination Apache Iceberg Tables. You must specify the ARN in the format arn:aws:glue:region:account-id:catalog. Amazon Data Firehose is in preview release and is subject to change. + public let catalogARN: String? + + public init(catalogARN: String? = nil) { + self.catalogARN = catalogARN + } + + public func validate(name: String) throws { + try self.validate(self.catalogARN, name: "catalogARN", parent: name, max: 512) + try self.validate(self.catalogARN, name: "catalogARN", parent: name, min: 1) + try self.validate(self.catalogARN, name: "catalogARN", parent: name, pattern: "^arn:") + } + + private enum CodingKeys: String, CodingKey { + case catalogARN = "CatalogARN" + } + } + public struct CloudWatchLoggingOptions: AWSEncodableShape & AWSDecodableShape { /// Enables or disables CloudWatch logging. public let enabled: Bool? @@ -832,6 +857,8 @@ extension Firehose { public let extendedS3DestinationConfiguration: ExtendedS3DestinationConfiguration? /// Enables configuring Kinesis Firehose to deliver data to any HTTP endpoint destination. You can specify only one destination. public let httpEndpointDestinationConfiguration: HttpEndpointDestinationConfiguration? + /// Configure Apache Iceberg Tables destination. Amazon Data Firehose is in preview release and is subject to change. + public let icebergDestinationConfiguration: IcebergDestinationConfiguration? /// When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream. public let kinesisStreamSourceConfiguration: KinesisStreamSourceConfiguration? public let mskSourceConfiguration: MSKSourceConfiguration? @@ -846,7 +873,7 @@ extension Firehose { /// A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. You can specify up to 50 tags when creating a delivery stream. If you specify tags in the CreateDeliveryStream action, Amazon Data Firehose performs an additional authorization on the firehose:TagDeliveryStream action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose delivery streams with IAM resource tags will fail with an AccessDeniedException such as following. AccessDeniedException User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy. For an example IAM policy, see Tag example. public let tags: [Tag]? - public init(amazonOpenSearchServerlessDestinationConfiguration: AmazonOpenSearchServerlessDestinationConfiguration? = nil, amazonopensearchserviceDestinationConfiguration: AmazonopensearchserviceDestinationConfiguration? = nil, deliveryStreamEncryptionConfigurationInput: DeliveryStreamEncryptionConfigurationInput? = nil, deliveryStreamName: String, deliveryStreamType: DeliveryStreamType? = nil, elasticsearchDestinationConfiguration: ElasticsearchDestinationConfiguration? = nil, extendedS3DestinationConfiguration: ExtendedS3DestinationConfiguration? = nil, httpEndpointDestinationConfiguration: HttpEndpointDestinationConfiguration? = nil, kinesisStreamSourceConfiguration: KinesisStreamSourceConfiguration? = nil, mskSourceConfiguration: MSKSourceConfiguration? = nil, redshiftDestinationConfiguration: RedshiftDestinationConfiguration? = nil, snowflakeDestinationConfiguration: SnowflakeDestinationConfiguration? = nil, splunkDestinationConfiguration: SplunkDestinationConfiguration? = nil, tags: [Tag]? = nil) { + public init(amazonOpenSearchServerlessDestinationConfiguration: AmazonOpenSearchServerlessDestinationConfiguration? = nil, amazonopensearchserviceDestinationConfiguration: AmazonopensearchserviceDestinationConfiguration? = nil, deliveryStreamEncryptionConfigurationInput: DeliveryStreamEncryptionConfigurationInput? = nil, deliveryStreamName: String, deliveryStreamType: DeliveryStreamType? = nil, elasticsearchDestinationConfiguration: ElasticsearchDestinationConfiguration? = nil, extendedS3DestinationConfiguration: ExtendedS3DestinationConfiguration? = nil, httpEndpointDestinationConfiguration: HttpEndpointDestinationConfiguration? = nil, icebergDestinationConfiguration: IcebergDestinationConfiguration? = nil, kinesisStreamSourceConfiguration: KinesisStreamSourceConfiguration? = nil, mskSourceConfiguration: MSKSourceConfiguration? = nil, redshiftDestinationConfiguration: RedshiftDestinationConfiguration? = nil, snowflakeDestinationConfiguration: SnowflakeDestinationConfiguration? = nil, splunkDestinationConfiguration: SplunkDestinationConfiguration? = nil, tags: [Tag]? = nil) { self.amazonOpenSearchServerlessDestinationConfiguration = amazonOpenSearchServerlessDestinationConfiguration self.amazonopensearchserviceDestinationConfiguration = amazonopensearchserviceDestinationConfiguration self.deliveryStreamEncryptionConfigurationInput = deliveryStreamEncryptionConfigurationInput @@ -855,6 +882,7 @@ extension Firehose { self.elasticsearchDestinationConfiguration = elasticsearchDestinationConfiguration self.extendedS3DestinationConfiguration = extendedS3DestinationConfiguration self.httpEndpointDestinationConfiguration = httpEndpointDestinationConfiguration + self.icebergDestinationConfiguration = icebergDestinationConfiguration self.kinesisStreamSourceConfiguration = kinesisStreamSourceConfiguration self.mskSourceConfiguration = mskSourceConfiguration self.redshiftDestinationConfiguration = redshiftDestinationConfiguration @@ -865,7 +893,7 @@ extension Firehose { } @available(*, deprecated, message: "Members s3DestinationConfiguration have been deprecated") - public init(amazonOpenSearchServerlessDestinationConfiguration: AmazonOpenSearchServerlessDestinationConfiguration? = nil, amazonopensearchserviceDestinationConfiguration: AmazonopensearchserviceDestinationConfiguration? = nil, deliveryStreamEncryptionConfigurationInput: DeliveryStreamEncryptionConfigurationInput? = nil, deliveryStreamName: String, deliveryStreamType: DeliveryStreamType? = nil, elasticsearchDestinationConfiguration: ElasticsearchDestinationConfiguration? = nil, extendedS3DestinationConfiguration: ExtendedS3DestinationConfiguration? = nil, httpEndpointDestinationConfiguration: HttpEndpointDestinationConfiguration? = nil, kinesisStreamSourceConfiguration: KinesisStreamSourceConfiguration? = nil, mskSourceConfiguration: MSKSourceConfiguration? = nil, redshiftDestinationConfiguration: RedshiftDestinationConfiguration? = nil, s3DestinationConfiguration: S3DestinationConfiguration? = nil, snowflakeDestinationConfiguration: SnowflakeDestinationConfiguration? = nil, splunkDestinationConfiguration: SplunkDestinationConfiguration? = nil, tags: [Tag]? = nil) { + public init(amazonOpenSearchServerlessDestinationConfiguration: AmazonOpenSearchServerlessDestinationConfiguration? = nil, amazonopensearchserviceDestinationConfiguration: AmazonopensearchserviceDestinationConfiguration? = nil, deliveryStreamEncryptionConfigurationInput: DeliveryStreamEncryptionConfigurationInput? = nil, deliveryStreamName: String, deliveryStreamType: DeliveryStreamType? = nil, elasticsearchDestinationConfiguration: ElasticsearchDestinationConfiguration? = nil, extendedS3DestinationConfiguration: ExtendedS3DestinationConfiguration? = nil, httpEndpointDestinationConfiguration: HttpEndpointDestinationConfiguration? = nil, icebergDestinationConfiguration: IcebergDestinationConfiguration? = nil, kinesisStreamSourceConfiguration: KinesisStreamSourceConfiguration? = nil, mskSourceConfiguration: MSKSourceConfiguration? = nil, redshiftDestinationConfiguration: RedshiftDestinationConfiguration? = nil, s3DestinationConfiguration: S3DestinationConfiguration? = nil, snowflakeDestinationConfiguration: SnowflakeDestinationConfiguration? = nil, splunkDestinationConfiguration: SplunkDestinationConfiguration? = nil, tags: [Tag]? = nil) { self.amazonOpenSearchServerlessDestinationConfiguration = amazonOpenSearchServerlessDestinationConfiguration self.amazonopensearchserviceDestinationConfiguration = amazonopensearchserviceDestinationConfiguration self.deliveryStreamEncryptionConfigurationInput = deliveryStreamEncryptionConfigurationInput @@ -874,6 +902,7 @@ extension Firehose { self.elasticsearchDestinationConfiguration = elasticsearchDestinationConfiguration self.extendedS3DestinationConfiguration = extendedS3DestinationConfiguration self.httpEndpointDestinationConfiguration = httpEndpointDestinationConfiguration + self.icebergDestinationConfiguration = icebergDestinationConfiguration self.kinesisStreamSourceConfiguration = kinesisStreamSourceConfiguration self.mskSourceConfiguration = mskSourceConfiguration self.redshiftDestinationConfiguration = redshiftDestinationConfiguration @@ -893,6 +922,7 @@ extension Firehose { try self.elasticsearchDestinationConfiguration?.validate(name: "\(name).elasticsearchDestinationConfiguration") try self.extendedS3DestinationConfiguration?.validate(name: "\(name).extendedS3DestinationConfiguration") try self.httpEndpointDestinationConfiguration?.validate(name: "\(name).httpEndpointDestinationConfiguration") + try self.icebergDestinationConfiguration?.validate(name: "\(name).icebergDestinationConfiguration") try self.kinesisStreamSourceConfiguration?.validate(name: "\(name).kinesisStreamSourceConfiguration") try self.mskSourceConfiguration?.validate(name: "\(name).mskSourceConfiguration") try self.redshiftDestinationConfiguration?.validate(name: "\(name).redshiftDestinationConfiguration") @@ -915,6 +945,7 @@ extension Firehose { case elasticsearchDestinationConfiguration = "ElasticsearchDestinationConfiguration" case extendedS3DestinationConfiguration = "ExtendedS3DestinationConfiguration" case httpEndpointDestinationConfiguration = "HttpEndpointDestinationConfiguration" + case icebergDestinationConfiguration = "IcebergDestinationConfiguration" case kinesisStreamSourceConfiguration = "KinesisStreamSourceConfiguration" case mskSourceConfiguration = "MSKSourceConfiguration" case redshiftDestinationConfiguration = "RedshiftDestinationConfiguration" @@ -1181,6 +1212,8 @@ extension Firehose { public let extendedS3DestinationDescription: ExtendedS3DestinationDescription? /// Describes the specified HTTP endpoint destination. public let httpEndpointDestinationDescription: HttpEndpointDestinationDescription? + /// Describes a destination in Apache Iceberg Tables. Amazon Data Firehose is in preview release and is subject to change. + public let icebergDestinationDescription: IcebergDestinationDescription? /// The destination in Amazon Redshift. public let redshiftDestinationDescription: RedshiftDestinationDescription? /// [Deprecated] The destination in Amazon S3. @@ -1190,13 +1223,14 @@ extension Firehose { /// The destination in Splunk. public let splunkDestinationDescription: SplunkDestinationDescription? - public init(amazonOpenSearchServerlessDestinationDescription: AmazonOpenSearchServerlessDestinationDescription? = nil, amazonopensearchserviceDestinationDescription: AmazonopensearchserviceDestinationDescription? = nil, destinationId: String, elasticsearchDestinationDescription: ElasticsearchDestinationDescription? = nil, extendedS3DestinationDescription: ExtendedS3DestinationDescription? = nil, httpEndpointDestinationDescription: HttpEndpointDestinationDescription? = nil, redshiftDestinationDescription: RedshiftDestinationDescription? = nil, s3DestinationDescription: S3DestinationDescription? = nil, snowflakeDestinationDescription: SnowflakeDestinationDescription? = nil, splunkDestinationDescription: SplunkDestinationDescription? = nil) { + public init(amazonOpenSearchServerlessDestinationDescription: AmazonOpenSearchServerlessDestinationDescription? = nil, amazonopensearchserviceDestinationDescription: AmazonopensearchserviceDestinationDescription? = nil, destinationId: String, elasticsearchDestinationDescription: ElasticsearchDestinationDescription? = nil, extendedS3DestinationDescription: ExtendedS3DestinationDescription? = nil, httpEndpointDestinationDescription: HttpEndpointDestinationDescription? = nil, icebergDestinationDescription: IcebergDestinationDescription? = nil, redshiftDestinationDescription: RedshiftDestinationDescription? = nil, s3DestinationDescription: S3DestinationDescription? = nil, snowflakeDestinationDescription: SnowflakeDestinationDescription? = nil, splunkDestinationDescription: SplunkDestinationDescription? = nil) { self.amazonOpenSearchServerlessDestinationDescription = amazonOpenSearchServerlessDestinationDescription self.amazonopensearchserviceDestinationDescription = amazonopensearchserviceDestinationDescription self.destinationId = destinationId self.elasticsearchDestinationDescription = elasticsearchDestinationDescription self.extendedS3DestinationDescription = extendedS3DestinationDescription self.httpEndpointDestinationDescription = httpEndpointDestinationDescription + self.icebergDestinationDescription = icebergDestinationDescription self.redshiftDestinationDescription = redshiftDestinationDescription self.s3DestinationDescription = s3DestinationDescription self.snowflakeDestinationDescription = snowflakeDestinationDescription @@ -1210,6 +1244,7 @@ extension Firehose { case elasticsearchDestinationDescription = "ElasticsearchDestinationDescription" case extendedS3DestinationDescription = "ExtendedS3DestinationDescription" case httpEndpointDestinationDescription = "HttpEndpointDestinationDescription" + case icebergDestinationDescription = "IcebergDestinationDescription" case redshiftDestinationDescription = "RedshiftDestinationDescription" case s3DestinationDescription = "S3DestinationDescription" case snowflakeDestinationDescription = "SnowflakeDestinationDescription" @@ -1217,6 +1252,47 @@ extension Firehose { } } + public struct DestinationTableConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The name of the Apache Iceberg database. Amazon Data Firehose is in preview release and is subject to change. + public let destinationDatabaseName: String + /// Specifies the name of the Apache Iceberg Table. Amazon Data Firehose is in preview release and is subject to change. + public let destinationTableName: String + /// The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination. Amazon Data Firehose is in preview release and is subject to change. + public let s3ErrorOutputPrefix: String? + /// A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create/Update/Delete operations on the given Iceberg table. Amazon Data Firehose is in preview release and is subject to change. + public let uniqueKeys: [String]? + + public init(destinationDatabaseName: String, destinationTableName: String, s3ErrorOutputPrefix: String? = nil, uniqueKeys: [String]? = nil) { + self.destinationDatabaseName = destinationDatabaseName + self.destinationTableName = destinationTableName + self.s3ErrorOutputPrefix = s3ErrorOutputPrefix + self.uniqueKeys = uniqueKeys + } + + public func validate(name: String) throws { + try self.validate(self.destinationDatabaseName, name: "destinationDatabaseName", parent: name, max: 1024) + try self.validate(self.destinationDatabaseName, name: "destinationDatabaseName", parent: name, min: 1) + try self.validate(self.destinationDatabaseName, name: "destinationDatabaseName", parent: name, pattern: "^\\S+$") + try self.validate(self.destinationTableName, name: "destinationTableName", parent: name, max: 1024) + try self.validate(self.destinationTableName, name: "destinationTableName", parent: name, min: 1) + try self.validate(self.destinationTableName, name: "destinationTableName", parent: name, pattern: "^\\S+$") + try self.validate(self.s3ErrorOutputPrefix, name: "s3ErrorOutputPrefix", parent: name, max: 1024) + try self.validate(self.s3ErrorOutputPrefix, name: "s3ErrorOutputPrefix", parent: name, pattern: ".*") + try self.uniqueKeys?.forEach { + try validate($0, name: "uniqueKeys[]", parent: name, max: 1024) + try validate($0, name: "uniqueKeys[]", parent: name, min: 1) + try validate($0, name: "uniqueKeys[]", parent: name, pattern: "^\\S+$") + } + } + + private enum CodingKeys: String, CodingKey { + case destinationDatabaseName = "DestinationDatabaseName" + case destinationTableName = "DestinationTableName" + case s3ErrorOutputPrefix = "S3ErrorOutputPrefix" + case uniqueKeys = "UniqueKeys" + } + } + public struct DocumentIdOptions: AWSEncodableShape & AWSDecodableShape { /// When the FIREHOSE_DEFAULT option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs. When the NO_DOCUMENT_ID option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance. public let defaultDocumentIdFormat: DefaultDocumentIdFormat @@ -2143,6 +2219,156 @@ extension Firehose { } } + public struct IcebergDestinationConfiguration: AWSEncodableShape { + public let bufferingHints: BufferingHints? + /// Configuration describing where the destination Apache Iceberg Tables are persisted. Amazon Data Firehose is in preview release and is subject to change. + public let catalogConfiguration: CatalogConfiguration + public let cloudWatchLoggingOptions: CloudWatchLoggingOptions? + /// Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg tables. Amazon Data Firehose is in preview release and is subject to change. + public let destinationTableConfigurationList: [DestinationTableConfiguration]? + public let processingConfiguration: ProcessingConfiguration? + public let retryOptions: RetryOptions? + /// The Amazon Resource Name (ARN) of the Apache Iceberg tables role. Amazon Data Firehose is in preview release and is subject to change. + public let roleARN: String + /// Describes how Firehose will backup records. Currently,Firehose only supports FailedDataOnly for preview. Amazon Data Firehose is in preview release and is subject to change. + public let s3BackupMode: IcebergS3BackupMode? + public let s3Configuration: S3DestinationConfiguration + + public init(bufferingHints: BufferingHints? = nil, catalogConfiguration: CatalogConfiguration, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, destinationTableConfigurationList: [DestinationTableConfiguration]? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: RetryOptions? = nil, roleARN: String, s3BackupMode: IcebergS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration) { + self.bufferingHints = bufferingHints + self.catalogConfiguration = catalogConfiguration + self.cloudWatchLoggingOptions = cloudWatchLoggingOptions + self.destinationTableConfigurationList = destinationTableConfigurationList + self.processingConfiguration = processingConfiguration + self.retryOptions = retryOptions + self.roleARN = roleARN + self.s3BackupMode = s3BackupMode + self.s3Configuration = s3Configuration + } + + public func validate(name: String) throws { + try self.bufferingHints?.validate(name: "\(name).bufferingHints") + try self.catalogConfiguration.validate(name: "\(name).catalogConfiguration") + try self.cloudWatchLoggingOptions?.validate(name: "\(name).cloudWatchLoggingOptions") + try self.destinationTableConfigurationList?.forEach { + try $0.validate(name: "\(name).destinationTableConfigurationList[]") + } + try self.processingConfiguration?.validate(name: "\(name).processingConfiguration") + try self.retryOptions?.validate(name: "\(name).retryOptions") + try self.validate(self.roleARN, name: "roleARN", parent: name, max: 512) + try self.validate(self.roleARN, name: "roleARN", parent: name, min: 1) + try self.validate(self.roleARN, name: "roleARN", parent: name, pattern: "^arn:") + try self.s3Configuration.validate(name: "\(name).s3Configuration") + } + + private enum CodingKeys: String, CodingKey { + case bufferingHints = "BufferingHints" + case catalogConfiguration = "CatalogConfiguration" + case cloudWatchLoggingOptions = "CloudWatchLoggingOptions" + case destinationTableConfigurationList = "DestinationTableConfigurationList" + case processingConfiguration = "ProcessingConfiguration" + case retryOptions = "RetryOptions" + case roleARN = "RoleARN" + case s3BackupMode = "S3BackupMode" + case s3Configuration = "S3Configuration" + } + } + + public struct IcebergDestinationDescription: AWSDecodableShape { + public let bufferingHints: BufferingHints? + /// Configuration describing where the destination Iceberg tables are persisted. Amazon Data Firehose is in preview release and is subject to change. + public let catalogConfiguration: CatalogConfiguration? + public let cloudWatchLoggingOptions: CloudWatchLoggingOptions? + /// Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg tables. Amazon Data Firehose is in preview release and is subject to change. + public let destinationTableConfigurationList: [DestinationTableConfiguration]? + public let processingConfiguration: ProcessingConfiguration? + public let retryOptions: RetryOptions? + /// The Amazon Resource Name (ARN) of the Apache Iceberg Tables role. Amazon Data Firehose is in preview release and is subject to change. + public let roleARN: String? + /// Describes how Firehose will backup records. Currently,Firehose only supports FailedDataOnly for preview. Amazon Data Firehose is in preview release and is subject to change. + public let s3BackupMode: IcebergS3BackupMode? + public let s3DestinationDescription: S3DestinationDescription? + + public init(bufferingHints: BufferingHints? = nil, catalogConfiguration: CatalogConfiguration? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, destinationTableConfigurationList: [DestinationTableConfiguration]? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: RetryOptions? = nil, roleARN: String? = nil, s3BackupMode: IcebergS3BackupMode? = nil, s3DestinationDescription: S3DestinationDescription? = nil) { + self.bufferingHints = bufferingHints + self.catalogConfiguration = catalogConfiguration + self.cloudWatchLoggingOptions = cloudWatchLoggingOptions + self.destinationTableConfigurationList = destinationTableConfigurationList + self.processingConfiguration = processingConfiguration + self.retryOptions = retryOptions + self.roleARN = roleARN + self.s3BackupMode = s3BackupMode + self.s3DestinationDescription = s3DestinationDescription + } + + private enum CodingKeys: String, CodingKey { + case bufferingHints = "BufferingHints" + case catalogConfiguration = "CatalogConfiguration" + case cloudWatchLoggingOptions = "CloudWatchLoggingOptions" + case destinationTableConfigurationList = "DestinationTableConfigurationList" + case processingConfiguration = "ProcessingConfiguration" + case retryOptions = "RetryOptions" + case roleARN = "RoleARN" + case s3BackupMode = "S3BackupMode" + case s3DestinationDescription = "S3DestinationDescription" + } + } + + public struct IcebergDestinationUpdate: AWSEncodableShape { + public let bufferingHints: BufferingHints? + /// Configuration describing where the destination Iceberg tables are persisted. Amazon Data Firehose is in preview release and is subject to change. + public let catalogConfiguration: CatalogConfiguration? + public let cloudWatchLoggingOptions: CloudWatchLoggingOptions? + /// Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg tables. Amazon Data Firehose is in preview release and is subject to change. + public let destinationTableConfigurationList: [DestinationTableConfiguration]? + public let processingConfiguration: ProcessingConfiguration? + public let retryOptions: RetryOptions? + /// The Amazon Resource Name (ARN) of the Apache Iceberg Tables role. Amazon Data Firehose is in preview release and is subject to change. + public let roleARN: String? + /// Describes how Firehose will backup records. Currently,Firehose only supports FailedDataOnly for preview. Amazon Data Firehose is in preview release and is subject to change. + public let s3BackupMode: IcebergS3BackupMode? + public let s3Configuration: S3DestinationConfiguration? + + public init(bufferingHints: BufferingHints? = nil, catalogConfiguration: CatalogConfiguration? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, destinationTableConfigurationList: [DestinationTableConfiguration]? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: RetryOptions? = nil, roleARN: String? = nil, s3BackupMode: IcebergS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration? = nil) { + self.bufferingHints = bufferingHints + self.catalogConfiguration = catalogConfiguration + self.cloudWatchLoggingOptions = cloudWatchLoggingOptions + self.destinationTableConfigurationList = destinationTableConfigurationList + self.processingConfiguration = processingConfiguration + self.retryOptions = retryOptions + self.roleARN = roleARN + self.s3BackupMode = s3BackupMode + self.s3Configuration = s3Configuration + } + + public func validate(name: String) throws { + try self.bufferingHints?.validate(name: "\(name).bufferingHints") + try self.catalogConfiguration?.validate(name: "\(name).catalogConfiguration") + try self.cloudWatchLoggingOptions?.validate(name: "\(name).cloudWatchLoggingOptions") + try self.destinationTableConfigurationList?.forEach { + try $0.validate(name: "\(name).destinationTableConfigurationList[]") + } + try self.processingConfiguration?.validate(name: "\(name).processingConfiguration") + try self.retryOptions?.validate(name: "\(name).retryOptions") + try self.validate(self.roleARN, name: "roleARN", parent: name, max: 512) + try self.validate(self.roleARN, name: "roleARN", parent: name, min: 1) + try self.validate(self.roleARN, name: "roleARN", parent: name, pattern: "^arn:") + try self.s3Configuration?.validate(name: "\(name).s3Configuration") + } + + private enum CodingKeys: String, CodingKey { + case bufferingHints = "BufferingHints" + case catalogConfiguration = "CatalogConfiguration" + case cloudWatchLoggingOptions = "CloudWatchLoggingOptions" + case destinationTableConfigurationList = "DestinationTableConfigurationList" + case processingConfiguration = "ProcessingConfiguration" + case retryOptions = "RetryOptions" + case roleARN = "RoleARN" + case s3BackupMode = "S3BackupMode" + case s3Configuration = "S3Configuration" + } + } + public struct InputFormatConfiguration: AWSEncodableShape & AWSDecodableShape { /// Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request. public let deserializer: Deserializer? @@ -2326,12 +2552,15 @@ extension Firehose { public let authenticationConfiguration: AuthenticationConfiguration /// The ARN of the Amazon MSK cluster. public let mskClusterARN: String + /// The start date and time in UTC for the offset position within your MSK topic from where Firehose begins to read. By default, this is set to timestamp when Firehose becomes Active. If you want to create a Firehose stream with Earliest start position from SDK or CLI, you need to set the ReadFromTimestamp parameter to Epoch (1970-01-01T00:00:00Z). + public let readFromTimestamp: Date? /// The topic name within the Amazon MSK cluster. public let topicName: String - public init(authenticationConfiguration: AuthenticationConfiguration, mskClusterARN: String, topicName: String) { + public init(authenticationConfiguration: AuthenticationConfiguration, mskClusterARN: String, readFromTimestamp: Date? = nil, topicName: String) { self.authenticationConfiguration = authenticationConfiguration self.mskClusterARN = mskClusterARN + self.readFromTimestamp = readFromTimestamp self.topicName = topicName } @@ -2348,6 +2577,7 @@ extension Firehose { private enum CodingKeys: String, CodingKey { case authenticationConfiguration = "AuthenticationConfiguration" case mskClusterARN = "MSKClusterARN" + case readFromTimestamp = "ReadFromTimestamp" case topicName = "TopicName" } } @@ -2359,13 +2589,16 @@ extension Firehose { public let deliveryStartTimestamp: Date? /// The ARN of the Amazon MSK cluster. public let mskClusterARN: String? + /// The start date and time in UTC for the offset position within your MSK topic from where Firehose begins to read. By default, this is set to timestamp when Firehose becomes Active. If you want to create a Firehose stream with Earliest start position from SDK or CLI, you need to set the ReadFromTimestampUTC parameter to Epoch (1970-01-01T00:00:00Z). + public let readFromTimestamp: Date? /// The topic name within the Amazon MSK cluster. public let topicName: String? - public init(authenticationConfiguration: AuthenticationConfiguration? = nil, deliveryStartTimestamp: Date? = nil, mskClusterARN: String? = nil, topicName: String? = nil) { + public init(authenticationConfiguration: AuthenticationConfiguration? = nil, deliveryStartTimestamp: Date? = nil, mskClusterARN: String? = nil, readFromTimestamp: Date? = nil, topicName: String? = nil) { self.authenticationConfiguration = authenticationConfiguration self.deliveryStartTimestamp = deliveryStartTimestamp self.mskClusterARN = mskClusterARN + self.readFromTimestamp = readFromTimestamp self.topicName = topicName } @@ -2373,6 +2606,7 @@ extension Firehose { case authenticationConfiguration = "AuthenticationConfiguration" case deliveryStartTimestamp = "DeliveryStartTimestamp" case mskClusterARN = "MSKClusterARN" + case readFromTimestamp = "ReadFromTimestamp" case topicName = "TopicName" } } @@ -3236,9 +3470,35 @@ extension Firehose { } } + public struct SnowflakeBufferingHints: AWSEncodableShape & AWSDecodableShape { + /// Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 0. + public let intervalInSeconds: Int? + /// Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 1. + public let sizeInMBs: Int? + + public init(intervalInSeconds: Int? = nil, sizeInMBs: Int? = nil) { + self.intervalInSeconds = intervalInSeconds + self.sizeInMBs = sizeInMBs + } + + public func validate(name: String) throws { + try self.validate(self.intervalInSeconds, name: "intervalInSeconds", parent: name, max: 900) + try self.validate(self.intervalInSeconds, name: "intervalInSeconds", parent: name, min: 0) + try self.validate(self.sizeInMBs, name: "sizeInMBs", parent: name, max: 128) + try self.validate(self.sizeInMBs, name: "sizeInMBs", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case intervalInSeconds = "IntervalInSeconds" + case sizeInMBs = "SizeInMBs" + } + } + public struct SnowflakeDestinationConfiguration: AWSEncodableShape { /// URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional. public let accountUrl: String + /// Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values. + public let bufferingHints: SnowflakeBufferingHints? public let cloudWatchLoggingOptions: CloudWatchLoggingOptions? /// The name of the record content column public let contentColumnName: String? @@ -3273,8 +3533,9 @@ extension Firehose { /// User login name for the Snowflake account. public let user: String? - public init(accountUrl: String, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String, dataLoadingOption: SnowflakeDataLoadingOption? = nil, keyPassphrase: String? = nil, metaDataColumnName: String? = nil, privateKey: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String, s3BackupMode: SnowflakeS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration, schema: String, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, snowflakeVpcConfiguration: SnowflakeVpcConfiguration? = nil, table: String, user: String? = nil) { + public init(accountUrl: String, bufferingHints: SnowflakeBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String, dataLoadingOption: SnowflakeDataLoadingOption? = nil, keyPassphrase: String? = nil, metaDataColumnName: String? = nil, privateKey: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String, s3BackupMode: SnowflakeS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration, schema: String, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, snowflakeVpcConfiguration: SnowflakeVpcConfiguration? = nil, table: String, user: String? = nil) { self.accountUrl = accountUrl + self.bufferingHints = bufferingHints self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.contentColumnName = contentColumnName self.database = database @@ -3299,6 +3560,7 @@ extension Firehose { try self.validate(self.accountUrl, name: "accountUrl", parent: name, max: 2048) try self.validate(self.accountUrl, name: "accountUrl", parent: name, min: 24) try self.validate(self.accountUrl, name: "accountUrl", parent: name, pattern: "^.+?\\.snowflakecomputing\\.com$") + try self.bufferingHints?.validate(name: "\(name).bufferingHints") try self.cloudWatchLoggingOptions?.validate(name: "\(name).cloudWatchLoggingOptions") try self.validate(self.contentColumnName, name: "contentColumnName", parent: name, max: 255) try self.validate(self.contentColumnName, name: "contentColumnName", parent: name, min: 1) @@ -3330,6 +3592,7 @@ extension Firehose { private enum CodingKeys: String, CodingKey { case accountUrl = "AccountUrl" + case bufferingHints = "BufferingHints" case cloudWatchLoggingOptions = "CloudWatchLoggingOptions" case contentColumnName = "ContentColumnName" case database = "Database" @@ -3354,6 +3617,8 @@ extension Firehose { public struct SnowflakeDestinationDescription: AWSDecodableShape { /// URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional. public let accountUrl: String? + /// Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values. + public let bufferingHints: SnowflakeBufferingHints? public let cloudWatchLoggingOptions: CloudWatchLoggingOptions? /// The name of the record content column public let contentColumnName: String? @@ -3384,8 +3649,9 @@ extension Firehose { /// User login name for the Snowflake account. public let user: String? - public init(accountUrl: String? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String? = nil, dataLoadingOption: SnowflakeDataLoadingOption? = nil, metaDataColumnName: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: SnowflakeS3BackupMode? = nil, s3DestinationDescription: S3DestinationDescription? = nil, schema: String? = nil, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, snowflakeVpcConfiguration: SnowflakeVpcConfiguration? = nil, table: String? = nil, user: String? = nil) { + public init(accountUrl: String? = nil, bufferingHints: SnowflakeBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String? = nil, dataLoadingOption: SnowflakeDataLoadingOption? = nil, metaDataColumnName: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: SnowflakeS3BackupMode? = nil, s3DestinationDescription: S3DestinationDescription? = nil, schema: String? = nil, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, snowflakeVpcConfiguration: SnowflakeVpcConfiguration? = nil, table: String? = nil, user: String? = nil) { self.accountUrl = accountUrl + self.bufferingHints = bufferingHints self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.contentColumnName = contentColumnName self.database = database @@ -3406,6 +3672,7 @@ extension Firehose { private enum CodingKeys: String, CodingKey { case accountUrl = "AccountUrl" + case bufferingHints = "BufferingHints" case cloudWatchLoggingOptions = "CloudWatchLoggingOptions" case contentColumnName = "ContentColumnName" case database = "Database" @@ -3428,6 +3695,8 @@ extension Firehose { public struct SnowflakeDestinationUpdate: AWSEncodableShape { /// URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional. public let accountUrl: String? + /// Describes the buffering to perform before delivering data to the Snowflake destination. + public let bufferingHints: SnowflakeBufferingHints? public let cloudWatchLoggingOptions: CloudWatchLoggingOptions? /// The name of the content metadata column public let contentColumnName: String? @@ -3460,8 +3729,9 @@ extension Firehose { /// User login name for the Snowflake account. public let user: String? - public init(accountUrl: String? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String? = nil, dataLoadingOption: SnowflakeDataLoadingOption? = nil, keyPassphrase: String? = nil, metaDataColumnName: String? = nil, privateKey: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: SnowflakeS3BackupMode? = nil, s3Update: S3DestinationUpdate? = nil, schema: String? = nil, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, table: String? = nil, user: String? = nil) { + public init(accountUrl: String? = nil, bufferingHints: SnowflakeBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String? = nil, dataLoadingOption: SnowflakeDataLoadingOption? = nil, keyPassphrase: String? = nil, metaDataColumnName: String? = nil, privateKey: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: SnowflakeS3BackupMode? = nil, s3Update: S3DestinationUpdate? = nil, schema: String? = nil, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, table: String? = nil, user: String? = nil) { self.accountUrl = accountUrl + self.bufferingHints = bufferingHints self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.contentColumnName = contentColumnName self.database = database @@ -3485,6 +3755,7 @@ extension Firehose { try self.validate(self.accountUrl, name: "accountUrl", parent: name, max: 2048) try self.validate(self.accountUrl, name: "accountUrl", parent: name, min: 24) try self.validate(self.accountUrl, name: "accountUrl", parent: name, pattern: "^.+?\\.snowflakecomputing\\.com$") + try self.bufferingHints?.validate(name: "\(name).bufferingHints") try self.cloudWatchLoggingOptions?.validate(name: "\(name).cloudWatchLoggingOptions") try self.validate(self.contentColumnName, name: "contentColumnName", parent: name, max: 255) try self.validate(self.contentColumnName, name: "contentColumnName", parent: name, min: 1) @@ -3515,6 +3786,7 @@ extension Firehose { private enum CodingKeys: String, CodingKey { case accountUrl = "AccountUrl" + case bufferingHints = "BufferingHints" case cloudWatchLoggingOptions = "CloudWatchLoggingOptions" case contentColumnName = "ContentColumnName" case database = "Database" @@ -4001,6 +4273,8 @@ extension Firehose { public let extendedS3DestinationUpdate: ExtendedS3DestinationUpdate? /// Describes an update to the specified HTTP endpoint destination. public let httpEndpointDestinationUpdate: HttpEndpointDestinationUpdate? + /// Describes an update for a destination in Apache Iceberg Tables. Amazon Data Firehose is in preview release and is subject to change. + public let icebergDestinationUpdate: IcebergDestinationUpdate? /// Describes an update for a destination in Amazon Redshift. public let redshiftDestinationUpdate: RedshiftDestinationUpdate? /// [Deprecated] Describes an update for a destination in Amazon S3. @@ -4010,7 +4284,7 @@ extension Firehose { /// Describes an update for a destination in Splunk. public let splunkDestinationUpdate: SplunkDestinationUpdate? - public init(amazonOpenSearchServerlessDestinationUpdate: AmazonOpenSearchServerlessDestinationUpdate? = nil, amazonopensearchserviceDestinationUpdate: AmazonopensearchserviceDestinationUpdate? = nil, currentDeliveryStreamVersionId: String, deliveryStreamName: String, destinationId: String, elasticsearchDestinationUpdate: ElasticsearchDestinationUpdate? = nil, extendedS3DestinationUpdate: ExtendedS3DestinationUpdate? = nil, httpEndpointDestinationUpdate: HttpEndpointDestinationUpdate? = nil, redshiftDestinationUpdate: RedshiftDestinationUpdate? = nil, snowflakeDestinationUpdate: SnowflakeDestinationUpdate? = nil, splunkDestinationUpdate: SplunkDestinationUpdate? = nil) { + public init(amazonOpenSearchServerlessDestinationUpdate: AmazonOpenSearchServerlessDestinationUpdate? = nil, amazonopensearchserviceDestinationUpdate: AmazonopensearchserviceDestinationUpdate? = nil, currentDeliveryStreamVersionId: String, deliveryStreamName: String, destinationId: String, elasticsearchDestinationUpdate: ElasticsearchDestinationUpdate? = nil, extendedS3DestinationUpdate: ExtendedS3DestinationUpdate? = nil, httpEndpointDestinationUpdate: HttpEndpointDestinationUpdate? = nil, icebergDestinationUpdate: IcebergDestinationUpdate? = nil, redshiftDestinationUpdate: RedshiftDestinationUpdate? = nil, snowflakeDestinationUpdate: SnowflakeDestinationUpdate? = nil, splunkDestinationUpdate: SplunkDestinationUpdate? = nil) { self.amazonOpenSearchServerlessDestinationUpdate = amazonOpenSearchServerlessDestinationUpdate self.amazonopensearchserviceDestinationUpdate = amazonopensearchserviceDestinationUpdate self.currentDeliveryStreamVersionId = currentDeliveryStreamVersionId @@ -4019,6 +4293,7 @@ extension Firehose { self.elasticsearchDestinationUpdate = elasticsearchDestinationUpdate self.extendedS3DestinationUpdate = extendedS3DestinationUpdate self.httpEndpointDestinationUpdate = httpEndpointDestinationUpdate + self.icebergDestinationUpdate = icebergDestinationUpdate self.redshiftDestinationUpdate = redshiftDestinationUpdate self.s3DestinationUpdate = nil self.snowflakeDestinationUpdate = snowflakeDestinationUpdate @@ -4026,7 +4301,7 @@ extension Firehose { } @available(*, deprecated, message: "Members s3DestinationUpdate have been deprecated") - public init(amazonOpenSearchServerlessDestinationUpdate: AmazonOpenSearchServerlessDestinationUpdate? = nil, amazonopensearchserviceDestinationUpdate: AmazonopensearchserviceDestinationUpdate? = nil, currentDeliveryStreamVersionId: String, deliveryStreamName: String, destinationId: String, elasticsearchDestinationUpdate: ElasticsearchDestinationUpdate? = nil, extendedS3DestinationUpdate: ExtendedS3DestinationUpdate? = nil, httpEndpointDestinationUpdate: HttpEndpointDestinationUpdate? = nil, redshiftDestinationUpdate: RedshiftDestinationUpdate? = nil, s3DestinationUpdate: S3DestinationUpdate? = nil, snowflakeDestinationUpdate: SnowflakeDestinationUpdate? = nil, splunkDestinationUpdate: SplunkDestinationUpdate? = nil) { + public init(amazonOpenSearchServerlessDestinationUpdate: AmazonOpenSearchServerlessDestinationUpdate? = nil, amazonopensearchserviceDestinationUpdate: AmazonopensearchserviceDestinationUpdate? = nil, currentDeliveryStreamVersionId: String, deliveryStreamName: String, destinationId: String, elasticsearchDestinationUpdate: ElasticsearchDestinationUpdate? = nil, extendedS3DestinationUpdate: ExtendedS3DestinationUpdate? = nil, httpEndpointDestinationUpdate: HttpEndpointDestinationUpdate? = nil, icebergDestinationUpdate: IcebergDestinationUpdate? = nil, redshiftDestinationUpdate: RedshiftDestinationUpdate? = nil, s3DestinationUpdate: S3DestinationUpdate? = nil, snowflakeDestinationUpdate: SnowflakeDestinationUpdate? = nil, splunkDestinationUpdate: SplunkDestinationUpdate? = nil) { self.amazonOpenSearchServerlessDestinationUpdate = amazonOpenSearchServerlessDestinationUpdate self.amazonopensearchserviceDestinationUpdate = amazonopensearchserviceDestinationUpdate self.currentDeliveryStreamVersionId = currentDeliveryStreamVersionId @@ -4035,6 +4310,7 @@ extension Firehose { self.elasticsearchDestinationUpdate = elasticsearchDestinationUpdate self.extendedS3DestinationUpdate = extendedS3DestinationUpdate self.httpEndpointDestinationUpdate = httpEndpointDestinationUpdate + self.icebergDestinationUpdate = icebergDestinationUpdate self.redshiftDestinationUpdate = redshiftDestinationUpdate self.s3DestinationUpdate = s3DestinationUpdate self.snowflakeDestinationUpdate = snowflakeDestinationUpdate @@ -4056,6 +4332,7 @@ extension Firehose { try self.elasticsearchDestinationUpdate?.validate(name: "\(name).elasticsearchDestinationUpdate") try self.extendedS3DestinationUpdate?.validate(name: "\(name).extendedS3DestinationUpdate") try self.httpEndpointDestinationUpdate?.validate(name: "\(name).httpEndpointDestinationUpdate") + try self.icebergDestinationUpdate?.validate(name: "\(name).icebergDestinationUpdate") try self.redshiftDestinationUpdate?.validate(name: "\(name).redshiftDestinationUpdate") try self.s3DestinationUpdate?.validate(name: "\(name).s3DestinationUpdate") try self.snowflakeDestinationUpdate?.validate(name: "\(name).snowflakeDestinationUpdate") @@ -4071,6 +4348,7 @@ extension Firehose { case elasticsearchDestinationUpdate = "ElasticsearchDestinationUpdate" case extendedS3DestinationUpdate = "ExtendedS3DestinationUpdate" case httpEndpointDestinationUpdate = "HttpEndpointDestinationUpdate" + case icebergDestinationUpdate = "IcebergDestinationUpdate" case redshiftDestinationUpdate = "RedshiftDestinationUpdate" case s3DestinationUpdate = "S3DestinationUpdate" case snowflakeDestinationUpdate = "SnowflakeDestinationUpdate" diff --git a/Sources/Soto/Services/Glue/Glue_shapes.swift b/Sources/Soto/Services/Glue/Glue_shapes.swift index 036140a8c6..20e4f77d1d 100644 --- a/Sources/Soto/Services/Glue/Glue_shapes.swift +++ b/Sources/Soto/Services/Glue/Glue_shapes.swift @@ -949,7 +949,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -973,7 +973,7 @@ extension Glue { public func validate(name: String) throws { try self.column.forEach { - try validate($0, name: "column[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "column[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } } @@ -1102,7 +1102,7 @@ extension Glue { try self.tableSchema?.forEach { try $0.validate(name: "\(name).tableSchema[]") } - try self.validate(self.tempDir, name: "tempDir", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.tempDir, name: "tempDir", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -1148,7 +1148,7 @@ extension Glue { public func validate(name: String) throws { try self.data?.validate(name: "\(name).data") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -1178,7 +1178,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -1211,7 +1211,7 @@ extension Glue { try self.mapping.forEach { try $0.validate(name: "\(name).mapping[]") } - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -1248,15 +1248,15 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.connectionTable, name: "connectionTable", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n])*$") - try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.connectorName, name: "connectorName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectionTable, name: "connectionTable", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectorName, name: "connectorName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } - try self.validate(self.schemaName, name: "schemaName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.schemaName, name: "schemaName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -1407,14 +1407,14 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -2694,15 +2694,15 @@ extension Glue { public func validate(name: String) throws { try self.additionalDeltaOptions?.forEach { - try validate($0.key, name: "additionalDeltaOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalDeltaOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalDeltaOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalDeltaOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -2762,15 +2762,15 @@ extension Glue { public func validate(name: String) throws { try self.additionalHudiOptions?.forEach { - try validate($0.key, name: "additionalHudiOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalHudiOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalHudiOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalHudiOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -2830,11 +2830,11 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.dataPreviewOptions?.validate(name: "\(name).dataPreviewOptions") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.streamingOptions?.validate(name: "\(name).streamingOptions") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.windowSize, name: "windowSize", parent: name, min: 0) } @@ -2876,11 +2876,11 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.dataPreviewOptions?.validate(name: "\(name).dataPreviewOptions") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.streamingOptions?.validate(name: "\(name).streamingOptions") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.windowSize, name: "windowSize", parent: name, min: 0) } @@ -2927,9 +2927,9 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -3828,6 +3828,36 @@ extension Glue { } } + public struct ConditionExpression: AWSEncodableShape & AWSDecodableShape { + /// The condition of the condition expression. + public let condition: String + /// The target column of the condition expressions. + public let targetColumn: String + /// The value of the condition expression. + public let value: String? + + public init(condition: String, targetColumn: String, value: String? = nil) { + self.condition = condition + self.targetColumn = targetColumn + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.condition, name: "condition", parent: name, max: 128) + try self.validate(self.condition, name: "condition", parent: name, min: 1) + try self.validate(self.condition, name: "condition", parent: name, pattern: "^[A-Z\\_]+$") + try self.validate(self.targetColumn, name: "targetColumn", parent: name, max: 1024) + try self.validate(self.targetColumn, name: "targetColumn", parent: name, min: 1) + try self.validate(self.value, name: "value", parent: name, max: 1024) + } + + private enum CodingKeys: String, CodingKey { + case condition = "Condition" + case targetColumn = "TargetColumn" + case value = "Value" + } + } + public struct ConfigurationObject: AWSEncodableShape & AWSDecodableShape { /// A list of allowed values for the parameter. public let allowedValues: [String]? @@ -4072,8 +4102,8 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } @@ -4106,13 +4136,13 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs?.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -6301,13 +6331,13 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.className, name: "className", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.className, name: "className", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.code, name: "code", parent: name, pattern: "^[\\s\\S]*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } @@ -6362,7 +6392,7 @@ extension Glue { public func validate(name: String) throws { try self.validate(self.evaluationContext, name: "evaluationContext", parent: name, pattern: "^[A-Za-z0-9_-]*$") - try self.validate(self.resultsS3Prefix, name: "resultsS3Prefix", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.resultsS3Prefix, name: "resultsS3Prefix", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -8231,11 +8261,11 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.redshiftTmpDir, name: "redshiftTmpDir", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.redshiftTmpDir, name: "redshiftTmpDir", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -8270,7 +8300,7 @@ extension Glue { public func validate(name: String) throws { try self.dataPreviewOptions?.validate(name: "\(name).dataPreviewOptions") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.streamingOptions?.validate(name: "\(name).streamingOptions") try self.validate(self.windowSize, name: "windowSize", parent: name, min: 0) } @@ -8306,7 +8336,7 @@ extension Glue { public func validate(name: String) throws { try self.dataPreviewOptions?.validate(name: "\(name).dataPreviewOptions") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.streamingOptions?.validate(name: "\(name).streamingOptions") try self.validate(self.windowSize, name: "windowSize", parent: name, min: 0) } @@ -8338,8 +8368,8 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -8400,7 +8430,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -8430,7 +8460,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -8463,7 +8493,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.nullTextList?.forEach { try $0.validate(name: "\(name).nullTextList[]") } @@ -8508,22 +8538,22 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.functionName, name: "functionName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.functionName, name: "functionName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } try self.parameters?.forEach { try $0.validate(name: "\(name).parameters[]") } - try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.transformName, name: "transformName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.version, name: "version", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.transformName, name: "transformName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.version, name: "version", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -8553,9 +8583,9 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -8723,7 +8753,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.publishingOptions?.validate(name: "\(name).publishingOptions") try self.validate(self.ruleset, name: "ruleset", parent: name, max: 65536) try self.validate(self.ruleset, name: "ruleset", parent: name, min: 1) @@ -8768,14 +8798,14 @@ extension Glue { public func validate(name: String) throws { try self.additionalDataSources?.forEach { - try validate($0.key, name: "additionalDataSources.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try validate($0.value, name: "additionalDataSources[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalDataSources.key", parent: name, pattern: "^([^\\r\\n])*$") + try validate($0.value, name: "additionalDataSources[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.publishingOptions?.validate(name: "\(name).publishingOptions") try self.validate(self.ruleset, name: "ruleset", parent: name, max: 65536) try self.validate(self.ruleset, name: "ruleset", parent: name, min: 1) @@ -8925,14 +8955,14 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.filledPath, name: "filledPath", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.imputedPath, name: "imputedPath", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.filledPath, name: "filledPath", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.imputedPath, name: "imputedPath", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -8969,7 +8999,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -9020,7 +9050,7 @@ extension Glue { public func validate(name: String) throws { try self.value.forEach { - try validate($0, name: "value[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "value[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } } @@ -12829,10 +12859,10 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.partitionPredicate, name: "partitionPredicate", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.partitionPredicate, name: "partitionPredicate", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -12868,14 +12898,14 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -13068,14 +13098,14 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.filterPredicate, name: "filterPredicate", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.filterPredicate, name: "filterPredicate", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.jobBookmarkKeys?.forEach { - try validate($0, name: "jobBookmarkKeys[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "jobBookmarkKeys[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.jobBookmarkKeysSortOrder, name: "jobBookmarkKeysSortOrder", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.jobBookmarkKeysSortOrder, name: "jobBookmarkKeysSortOrder", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.lowerBound, name: "lowerBound", parent: name, min: 0) try self.validate(self.numPartitions, name: "numPartitions", parent: name, min: 0) - try self.validate(self.partitionColumn, name: "partitionColumn", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.partitionColumn, name: "partitionColumn", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.upperBound, name: "upperBound", parent: name, min: 0) } @@ -13122,11 +13152,11 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions?.validate(name: "\(name).additionalOptions") - try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.connectionTable, name: "connectionTable", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n])*$") - try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.connectorName, name: "connectorName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectionTable, name: "connectionTable", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectorName, name: "connectorName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } @@ -13176,19 +13206,19 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions?.forEach { - try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.connectionTable, name: "connectionTable", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n])*$") - try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.connectorName, name: "connectorName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectionTable, name: "connectionTable", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectorName, name: "connectorName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } @@ -13796,7 +13826,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 2) try self.validate(self.inputs, name: "inputs", parent: name, min: 2) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -13819,7 +13849,7 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.from, name: "from", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.from, name: "from", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -13921,23 +13951,23 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.addRecordTimestamp, name: "addRecordTimestamp", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.assign, name: "assign", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.bootstrapServers, name: "bootstrapServers", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.classification, name: "classification", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.delimiter, name: "delimiter", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.emitConsumerLagMetrics, name: "emitConsumerLagMetrics", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.endingOffsets, name: "endingOffsets", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.addRecordTimestamp, name: "addRecordTimestamp", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.assign, name: "assign", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.bootstrapServers, name: "bootstrapServers", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.classification, name: "classification", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.delimiter, name: "delimiter", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.emitConsumerLagMetrics, name: "emitConsumerLagMetrics", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.endingOffsets, name: "endingOffsets", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.maxOffsetsPerTrigger, name: "maxOffsetsPerTrigger", parent: name, min: 0) try self.validate(self.minPartitions, name: "minPartitions", parent: name, min: 0) try self.validate(self.numRetries, name: "numRetries", parent: name, min: 0) try self.validate(self.pollTimeoutMs, name: "pollTimeoutMs", parent: name, min: 0) try self.validate(self.retryIntervalMs, name: "retryIntervalMs", parent: name, min: 0) - try self.validate(self.securityProtocol, name: "securityProtocol", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.startingOffsets, name: "startingOffsets", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.subscribePattern, name: "subscribePattern", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.topicName, name: "topicName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.securityProtocol, name: "securityProtocol", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.startingOffsets, name: "startingOffsets", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.subscribePattern, name: "subscribePattern", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.topicName, name: "topicName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -14050,12 +14080,12 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.addRecordTimestamp, name: "addRecordTimestamp", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.classification, name: "classification", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.delimiter, name: "delimiter", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.addRecordTimestamp, name: "addRecordTimestamp", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.classification, name: "classification", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.delimiter, name: "delimiter", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.describeShardInterval, name: "describeShardInterval", parent: name, min: 0) - try self.validate(self.emitConsumerLagMetrics, name: "emitConsumerLagMetrics", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.endpointUrl, name: "endpointUrl", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.emitConsumerLagMetrics, name: "emitConsumerLagMetrics", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.endpointUrl, name: "endpointUrl", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.idleTimeBetweenReadsInMs, name: "idleTimeBetweenReadsInMs", parent: name, min: 0) try self.validate(self.maxFetchRecordsPerShard, name: "maxFetchRecordsPerShard", parent: name, min: 0) try self.validate(self.maxFetchTimeInMs, name: "maxFetchTimeInMs", parent: name, min: 0) @@ -14063,10 +14093,10 @@ extension Glue { try self.validate(self.maxRetryIntervalMs, name: "maxRetryIntervalMs", parent: name, min: 0) try self.validate(self.numRetries, name: "numRetries", parent: name, min: 0) try self.validate(self.retryIntervalMs, name: "retryIntervalMs", parent: name, min: 0) - try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.roleSessionName, name: "roleSessionName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.streamArn, name: "streamArn", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.streamName, name: "streamName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.roleSessionName, name: "roleSessionName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.streamArn, name: "streamArn", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.streamName, name: "streamName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -15421,11 +15451,11 @@ extension Glue { try $0.validate(name: "\(name).children[]") } try self.fromPath?.forEach { - try validate($0, name: "fromPath[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "fromPath[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.fromType, name: "fromType", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.toKey, name: "toKey", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.toType, name: "toType", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.fromType, name: "fromType", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.toKey, name: "toKey", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.toType, name: "toType", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -15494,7 +15524,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 2) try self.validate(self.inputs, name: "inputs", parent: name, min: 2) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.validate(self.source, name: "source", parent: name, pattern: "^[A-Za-z0-9_-]*$") } @@ -15589,9 +15619,9 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -15619,14 +15649,14 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -15673,9 +15703,9 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -15703,14 +15733,14 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -15805,7 +15835,7 @@ extension Glue { public func validate(name: String) throws { try self.datatype.validate(name: "\(name).datatype") - try self.validate(self.value, name: "value", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.value, name: "value", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -15937,9 +15967,9 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.description, name: "description", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.label, name: "label", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.value, name: "value", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.description, name: "description", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.label, name: "label", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.value, name: "value", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -15964,9 +15994,9 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -15994,14 +16024,14 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -16085,7 +16115,7 @@ extension Glue { public func validate(name: String) throws { try self.entityTypesToDetect.forEach { - try validate($0, name: "entityTypesToDetect[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "entityTypesToDetect[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") @@ -16094,8 +16124,8 @@ extension Glue { try self.validate(self.inputs, name: "inputs", parent: name, min: 1) try self.validate(self.maskValue, name: "maskValue", parent: name, max: 256) try self.validate(self.maskValue, name: "maskValue", parent: name, pattern: "^[*A-Za-z0-9_-]*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.outputColumnName, name: "outputColumnName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.outputColumnName, name: "outputColumnName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.sampleFraction, name: "sampleFraction", parent: name, max: 1.0) try self.validate(self.sampleFraction, name: "sampleFraction", parent: name, min: 0.0) try self.validate(self.thresholdFraction, name: "thresholdFraction", parent: name, max: 1.0) @@ -16342,9 +16372,9 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -16372,14 +16402,14 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -16819,12 +16849,15 @@ extension Glue { /// The name of the Glue Studio node. public let name: String /// A reference to the DataBrew recipe used by the node. - public let recipeReference: RecipeReference + public let recipeReference: RecipeReference? + /// Transform steps used in the recipe node. + public let recipeSteps: [RecipeStep]? - public init(inputs: [String], name: String, recipeReference: RecipeReference) { + public init(inputs: [String], name: String, recipeReference: RecipeReference? = nil, recipeSteps: [RecipeStep]? = nil) { self.inputs = inputs self.name = name self.recipeReference = recipeReference + self.recipeSteps = recipeSteps } public func validate(name: String) throws { @@ -16833,14 +16866,48 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.recipeReference.validate(name: "\(name).recipeReference") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.recipeReference?.validate(name: "\(name).recipeReference") + try self.recipeSteps?.forEach { + try $0.validate(name: "\(name).recipeSteps[]") + } } private enum CodingKeys: String, CodingKey { case inputs = "Inputs" case name = "Name" case recipeReference = "RecipeReference" + case recipeSteps = "RecipeSteps" + } + } + + public struct RecipeAction: AWSEncodableShape & AWSDecodableShape { + /// The operation of the recipe action. + public let operation: String + /// The parameters of the recipe action. + public let parameters: [String: String]? + + public init(operation: String, parameters: [String: String]? = nil) { + self.operation = operation + self.parameters = parameters + } + + public func validate(name: String) throws { + try self.validate(self.operation, name: "operation", parent: name, max: 128) + try self.validate(self.operation, name: "operation", parent: name, min: 1) + try self.validate(self.operation, name: "operation", parent: name, pattern: "^[A-Z\\_]+$") + try self.parameters?.forEach { + try validate($0.key, name: "parameters.key", parent: name, max: 128) + try validate($0.key, name: "parameters.key", parent: name, min: 1) + try validate($0.key, name: "parameters.key", parent: name, pattern: "^[A-Za-z0-9]+$") + try validate($0.value, name: "parameters[\"\($0.key)\"]", parent: name, max: 32768) + try validate($0.value, name: "parameters[\"\($0.key)\"]", parent: name, min: 1) + } + } + + private enum CodingKeys: String, CodingKey { + case operation = "Operation" + case parameters = "Parameters" } } @@ -16856,7 +16923,7 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.recipeArn, name: "recipeArn", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.recipeArn, name: "recipeArn", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.recipeVersion, name: "recipeVersion", parent: name, max: 16) try self.validate(self.recipeVersion, name: "recipeVersion", parent: name, min: 1) } @@ -16867,6 +16934,30 @@ extension Glue { } } + public struct RecipeStep: AWSEncodableShape & AWSDecodableShape { + /// The transformation action of the recipe step. + public let action: RecipeAction + /// The condition expressions for the recipe step. + public let conditionExpressions: [ConditionExpression]? + + public init(action: RecipeAction, conditionExpressions: [ConditionExpression]? = nil) { + self.action = action + self.conditionExpressions = conditionExpressions + } + + public func validate(name: String) throws { + try self.action.validate(name: "\(name).action") + try self.conditionExpressions?.forEach { + try $0.validate(name: "\(name).conditionExpressions[]") + } + } + + private enum CodingKeys: String, CodingKey { + case action = "Action" + case conditionExpressions = "ConditionExpressions" + } + } + public struct RecrawlPolicy: AWSEncodableShape & AWSDecodableShape { /// Specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run. A value of CRAWL_EVERYTHING specifies crawling the entire dataset again. A value of CRAWL_NEW_FOLDERS_ONLY specifies crawling only folders that were added since the last crawler run. A value of CRAWL_EVENT_MODE specifies crawling only the changes identified by Amazon S3 events. public let recrawlBehavior: RecrawlBehavior? @@ -16901,11 +16992,11 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.redshiftTmpDir, name: "redshiftTmpDir", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.tmpDirIAMRole, name: "tmpDirIAMRole", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.redshiftTmpDir, name: "redshiftTmpDir", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.tmpDirIAMRole, name: "tmpDirIAMRole", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -16944,16 +17035,16 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.redshiftTmpDir, name: "redshiftTmpDir", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.tmpDirIAMRole, name: "tmpDirIAMRole", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.redshiftTmpDir, name: "redshiftTmpDir", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.tmpDirIAMRole, name: "tmpDirIAMRole", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.upsertRedshiftOptions?.validate(name: "\(name).upsertRedshiftOptions") } @@ -17087,9 +17178,9 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -17197,12 +17288,12 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.sourcePath.forEach { - try validate($0, name: "sourcePath[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "sourcePath[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } try self.targetPath.forEach { - try validate($0, name: "targetPath[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "targetPath[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } } @@ -17410,15 +17501,15 @@ extension Glue { public func validate(name: String) throws { try self.additionalDeltaOptions?.forEach { - try validate($0.key, name: "additionalDeltaOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalDeltaOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalDeltaOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalDeltaOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -17452,15 +17543,15 @@ extension Glue { public func validate(name: String) throws { try self.additionalHudiOptions?.forEach { - try validate($0.key, name: "additionalHudiOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalHudiOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalHudiOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalHudiOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -17493,10 +17584,10 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.partitionPredicate, name: "partitionPredicate", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.partitionPredicate, name: "partitionPredicate", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -17532,14 +17623,14 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -17616,20 +17707,20 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions?.validate(name: "\(name).additionalOptions") - try self.validate(self.escaper, name: "escaper", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n])*$") + try self.validate(self.escaper, name: "escaper", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.exclusions?.forEach { - try validate($0, name: "exclusions[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "exclusions[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.groupFiles, name: "groupFiles", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.groupSize, name: "groupSize", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.groupFiles, name: "groupFiles", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.groupSize, name: "groupSize", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.maxBand, name: "maxBand", parent: name, min: 0) try self.validate(self.maxFilesInBand, name: "maxFilesInBand", parent: name, min: 0) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } try self.paths.forEach { - try validate($0, name: "paths[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "paths[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } } @@ -17684,17 +17775,17 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions?.forEach { - try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -17739,16 +17830,16 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions?.forEach { - try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.schemaChangePolicy?.validate(name: "\(name).schemaChangePolicy") } @@ -17786,16 +17877,16 @@ extension Glue { public func validate(name: String) throws { try self.additionalDeltaOptions?.forEach { - try validate($0.key, name: "additionalDeltaOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalDeltaOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalDeltaOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalDeltaOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } try self.additionalOptions?.validate(name: "\(name).additionalOptions") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } try self.paths.forEach { - try validate($0, name: "paths[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "paths[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } } @@ -17826,7 +17917,7 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.samplePath, name: "samplePath", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.samplePath, name: "samplePath", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -17864,14 +17955,14 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.compression, name: "compression", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.compression, name: "compression", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.schemaChangePolicy?.validate(name: "\(name).schemaChangePolicy") } @@ -17936,8 +18027,8 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.schemaChangePolicy?.validate(name: "\(name).schemaChangePolicy") } @@ -17979,17 +18070,17 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions.forEach { - try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.database, name: "database", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.table, name: "table", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -18034,16 +18125,16 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions.forEach { - try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.schemaChangePolicy?.validate(name: "\(name).schemaChangePolicy") } @@ -18081,16 +18172,16 @@ extension Glue { public func validate(name: String) throws { try self.additionalHudiOptions?.forEach { - try validate($0.key, name: "additionalHudiOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalHudiOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalHudiOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalHudiOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } try self.additionalOptions?.validate(name: "\(name).additionalOptions") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } try self.paths.forEach { - try validate($0, name: "paths[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "paths[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } } @@ -18150,19 +18241,19 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions?.validate(name: "\(name).additionalOptions") try self.exclusions?.forEach { - try validate($0, name: "exclusions[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "exclusions[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.groupFiles, name: "groupFiles", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.groupSize, name: "groupSize", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.jsonPath, name: "jsonPath", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.groupFiles, name: "groupFiles", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.groupSize, name: "groupSize", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.jsonPath, name: "jsonPath", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.maxBand, name: "maxBand", parent: name, min: 0) try self.validate(self.maxFilesInBand, name: "maxFilesInBand", parent: name, min: 0) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } try self.paths.forEach { - try validate($0, name: "paths[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "paths[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } } @@ -18224,18 +18315,18 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions?.validate(name: "\(name).additionalOptions") try self.exclusions?.forEach { - try validate($0, name: "exclusions[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "exclusions[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.groupFiles, name: "groupFiles", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.groupSize, name: "groupSize", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.groupFiles, name: "groupFiles", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.groupSize, name: "groupSize", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.maxBand, name: "maxBand", parent: name, min: 0) try self.validate(self.maxFilesInBand, name: "maxFilesInBand", parent: name, min: 0) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } try self.paths.forEach { - try validate($0, name: "paths[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "paths[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } } @@ -18665,7 +18756,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -18696,7 +18787,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -18954,8 +19045,8 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions?.forEach { - try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } try self.connection?.validate(name: "\(name).connection") try self.iamRole?.validate(name: "\(name).iamRole") @@ -18969,7 +19060,7 @@ extension Glue { try self.tableSchema?.forEach { try $0.validate(name: "\(name).tableSchema[]") } - try self.validate(self.tempDir, name: "tempDir", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.tempDir, name: "tempDir", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } private enum CodingKeys: String, CodingKey { @@ -19013,7 +19104,7 @@ extension Glue { public func validate(name: String) throws { try self.data.validate(name: "\(name).data") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } @@ -19047,7 +19138,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -19159,13 +19250,13 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions?.forEach { - try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.connectorName, name: "connectorName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectorName, name: "connectorName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } @@ -19209,18 +19300,18 @@ extension Glue { public func validate(name: String) throws { try self.additionalOptions?.forEach { - try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0.key, name: "additionalOptions.key", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try validate($0.value, name: "additionalOptions[\"\($0.key)\"]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } - try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.connectorName, name: "connectorName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectionType, name: "connectionType", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.connectorName, name: "connectorName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.inputs.forEach { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } @@ -19263,7 +19354,7 @@ extension Glue { try validate($0, name: "inputs[]", parent: name, pattern: "^[A-Za-z0-9_-]*$") } try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") try self.outputSchemas?.forEach { try $0.validate(name: "\(name).outputSchemas[]") } @@ -19308,8 +19399,8 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") - try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") + try self.validate(self.path, name: "path", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.prob, name: "prob", parent: name, max: 1.0) try self.validate(self.prob, name: "prob", parent: name, min: 0.0) try self.validate(self.topk, name: "topk", parent: name, max: 100) @@ -19345,7 +19436,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 1) try self.validate(self.inputs, name: "inputs", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -19367,7 +19458,7 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.alias, name: "alias", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n])*$") + try self.validate(self.alias, name: "alias", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.validate(self.from, name: "from", parent: name, pattern: "^[A-Za-z0-9_-]*$") } @@ -20975,11 +21066,11 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.validationMessage, name: "validationMessage", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.validationRule, name: "validationRule", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.validationMessage, name: "validationMessage", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.validationRule, name: "validationRule", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.value?.forEach { - try validate($0, name: "value[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "value[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } } @@ -21263,7 +21354,7 @@ extension Glue { } try self.validate(self.inputs, name: "inputs", parent: name, max: 2) try self.validate(self.inputs, name: "inputs", parent: name, min: 2) - try self.validate(self.name, name: "name", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^([^\\r\\n])*$") } private enum CodingKeys: String, CodingKey { @@ -22663,10 +22754,10 @@ extension Glue { } public func validate(name: String) throws { - try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") - try self.validate(self.tableLocation, name: "tableLocation", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") + try self.validate(self.tableLocation, name: "tableLocation", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") try self.upsertKeys?.forEach { - try validate($0, name: "upsertKeys[]", parent: name, pattern: "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$") + try validate($0, name: "upsertKeys[]", parent: name, pattern: "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$") } } diff --git a/Sources/Soto/Services/Ivschat/Ivschat_api.swift b/Sources/Soto/Services/Ivschat/Ivschat_api.swift index 7c1cc0b3ae..c34d7fabd6 100644 --- a/Sources/Soto/Services/Ivschat/Ivschat_api.swift +++ b/Sources/Soto/Services/Ivschat/Ivschat_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS Ivschat service. /// -/// Introduction The Amazon IVS Chat control-plane API enables you to create and manage Amazon IVS Chat resources. You also need to integrate with the Amazon IVS Chat Messaging API, to enable users to interact with chat rooms in real time. The API is an AWS regional service. For a list of supported regions and Amazon IVS Chat HTTPS service endpoints, see the Amazon IVS Chat information on the Amazon IVS page in the AWS General Reference. Notes on terminology: You create service applications using the Amazon IVS Chat API. We refer to these as applications. You create front-end client applications (browser and Android/iOS apps) using the Amazon IVS Chat Messaging API. We refer to these as clients. Key Concepts LoggingConfiguration — A configuration that allows customers to store and record sent messages in a chat room. Room — The central Amazon IVS Chat resource through which clients connect to and exchange chat messages. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS Chat API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Room. At most 50 tags can be applied to a resource. API Access Security Your Amazon IVS Chat applications (service applications and clients) must be authenticated and authorized to access Amazon IVS Chat resources. Note the differences between these concepts: Authentication is about verifying identity. Requests to the Amazon IVS Chat API must be signed to verify your identity. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS Chat API requests. Users (viewers) connect to a room using secure access tokens that you create using the CreateChatToken endpoint through the AWS SDK. You call CreateChatToken for every user’s chat session, passing identity and authorization information about the user. Signing API Requests HTTP API requests must be signed with an AWS SigV4 signature using your AWS security credentials. The AWS Command Line Interface (CLI) and the AWS SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS Chat HTTP API directly, it’s your responsibility to sign the requests. You generate a signature using valid AWS credentials for an IAM role that has permission to perform the requested action. For example, DeleteMessage requests must be made using an IAM role that has the ivschat:DeleteMessage permission. For more information: Authentication and generating signatures — See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions — See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference. Messaging Endpoints DeleteMessage — Sends an event to a specific room which directs clients to delete a specific message; that is, unrender it from view and delete it from the client’s chat history. This event’s EventName is aws:DELETE_MESSAGE. This replicates the DeleteMessage WebSocket operation in the Amazon IVS Chat Messaging API. DisconnectUser — Disconnects all connections using a specified user ID from a room. This replicates the DisconnectUser WebSocket operation in the Amazon IVS Chat Messaging API. SendEvent — Sends an event to a room. Use this within your application’s business logic to send events to clients of a room; e.g., to notify clients to change the way the chat UI is rendered. Chat Token Endpoint CreateChatToken — Creates an encrypted token that is used by a chat participant to establish an individual WebSocket chat connection to a room. When the token is used to connect to chat, the connection is valid for the session duration specified in the request. The token becomes invalid at the token-expiration timestamp included in the response. Room Endpoints CreateRoom — Creates a room that allows clients to connect and pass messages. DeleteRoom — Deletes the specified room. GetRoom — Gets the specified room. ListRooms — Gets summary information about all your rooms in the AWS region where the API request is processed. UpdateRoom — Updates a room’s configuration. Logging Configuration Endpoints CreateLoggingConfiguration — Creates a logging configuration that allows clients to store and record sent messages. DeleteLoggingConfiguration — Deletes the specified logging configuration. GetLoggingConfiguration — Gets the specified logging configuration. ListLoggingConfigurations — Gets summary information about all your logging configurations in the AWS region where the API request is processed. UpdateLoggingConfiguration — Updates a specified logging configuration. Tags Endpoints ListTagsForResource — Gets information about AWS tags for the specified ARN. TagResource — Adds or updates tags for the AWS resource with the specified ARN. UntagResource — Removes tags from the resource with the specified ARN. All the above are HTTP operations. There is a separate messaging API for managing Chat resources; see the Amazon IVS Chat Messaging API Reference. +/// Introduction The Amazon IVS Chat control-plane API enables you to create and manage Amazon IVS Chat resources. You also need to integrate with the Amazon IVS Chat Messaging API, to enable users to interact with chat rooms in real time. The API is an AWS regional service. For a list of supported regions and Amazon IVS Chat HTTPS service endpoints, see the Amazon IVS Chat information on the Amazon IVS page in the AWS General Reference. This document describes HTTP operations. There is a separate messaging API for managing Chat resources; see the Amazon IVS Chat Messaging API Reference. Notes on terminology: You create service applications using the Amazon IVS Chat API. We refer to these as applications. You create front-end client applications (browser and Android/iOS apps) using the Amazon IVS Chat Messaging API. We refer to these as clients. Resources The following resources are part of Amazon IVS Chat: LoggingConfiguration — A configuration that allows customers to store and record sent messages in a chat room. See the Logging Configuration endpoints for more information. Room — The central Amazon IVS Chat resource through which clients connect to and exchange chat messages. See the Room endpoints for more information. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS Chat API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Room. At most 50 tags can be applied to a resource. API Access Security Your Amazon IVS Chat applications (service applications and clients) must be authenticated and authorized to access Amazon IVS Chat resources. Note the differences between these concepts: Authentication is about verifying identity. Requests to the Amazon IVS Chat API must be signed to verify your identity. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS Chat API requests. Users (viewers) connect to a room using secure access tokens that you create using the CreateChatToken endpoint through the AWS SDK. You call CreateChatToken for every user’s chat session, passing identity and authorization information about the user. Signing API Requests HTTP API requests must be signed with an AWS SigV4 signature using your AWS security credentials. The AWS Command Line Interface (CLI) and the AWS SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS Chat HTTP API directly, it’s your responsibility to sign the requests. You generate a signature using valid AWS credentials for an IAM role that has permission to perform the requested action. For example, DeleteMessage requests must be made using an IAM role that has the ivschat:DeleteMessage permission. For more information: Authentication and generating signatures — See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions — See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference. public struct Ivschat: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/LicenseManagerLinuxSubscriptions/LicenseManagerLinuxSubscriptions_api.swift b/Sources/Soto/Services/LicenseManagerLinuxSubscriptions/LicenseManagerLinuxSubscriptions_api.swift index 73e3c69d30..c5ef790b7e 100644 --- a/Sources/Soto/Services/LicenseManagerLinuxSubscriptions/LicenseManagerLinuxSubscriptions_api.swift +++ b/Sources/Soto/Services/LicenseManagerLinuxSubscriptions/LicenseManagerLinuxSubscriptions_api.swift @@ -83,7 +83,34 @@ public struct LicenseManagerLinuxSubscriptions: AWSService { // MARK: API Calls - /// Lists the Linux subscriptions service settings. + /// Remove a third-party subscription provider from the Bring Your Own License (BYOL) subscriptions + /// registered to your account. + @Sendable + public func deregisterSubscriptionProvider(_ input: DeregisterSubscriptionProviderRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeregisterSubscriptionProviderResponse { + return try await self.client.execute( + operation: "DeregisterSubscriptionProvider", + path: "/subscription/DeregisterSubscriptionProvider", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Get details for a Bring Your Own License (BYOL) subscription that's registered to your account. + @Sendable + public func getRegisteredSubscriptionProvider(_ input: GetRegisteredSubscriptionProviderRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetRegisteredSubscriptionProviderResponse { + return try await self.client.execute( + operation: "GetRegisteredSubscriptionProvider", + path: "/subscription/GetRegisteredSubscriptionProvider", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists the Linux subscriptions service settings for your account. @Sendable public func getServiceSettings(_ input: GetServiceSettingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetServiceSettingsResponse { return try await self.client.execute( @@ -122,6 +149,72 @@ public struct LicenseManagerLinuxSubscriptions: AWSService { ) } + /// List Bring Your Own License (BYOL) subscription registration resources for your account. + @Sendable + public func listRegisteredSubscriptionProviders(_ input: ListRegisteredSubscriptionProvidersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListRegisteredSubscriptionProvidersResponse { + return try await self.client.execute( + operation: "ListRegisteredSubscriptionProviders", + path: "/subscription/ListRegisteredSubscriptionProviders", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// List the metadata tags that are assigned to the + /// specified Amazon Web Services resource. + @Sendable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + return try await self.client.execute( + operation: "ListTagsForResource", + path: "/tags/{resourceArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Register the supported third-party subscription provider for your Bring Your Own License (BYOL) subscription. + @Sendable + public func registerSubscriptionProvider(_ input: RegisterSubscriptionProviderRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterSubscriptionProviderResponse { + return try await self.client.execute( + operation: "RegisterSubscriptionProvider", + path: "/subscription/RegisterSubscriptionProvider", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Add metadata tags to the specified Amazon Web Services resource. + @Sendable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + return try await self.client.execute( + operation: "TagResource", + path: "/tags/{resourceArn}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Remove one or more metadata tag from the specified Amazon Web Services resource. + @Sendable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + return try await self.client.execute( + operation: "UntagResource", + path: "/tags/{resourceArn}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates the service settings for Linux subscriptions. @Sendable public func updateServiceSettings(_ input: UpdateServiceSettingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateServiceSettingsResponse { @@ -186,6 +279,25 @@ extension LicenseManagerLinuxSubscriptions { logger: logger ) } + + /// List Bring Your Own License (BYOL) subscription registration resources for your account. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listRegisteredSubscriptionProvidersPaginator( + _ input: ListRegisteredSubscriptionProvidersRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listRegisteredSubscriptionProviders, + inputKey: \ListRegisteredSubscriptionProvidersRequest.nextToken, + outputKey: \ListRegisteredSubscriptionProvidersResponse.nextToken, + logger: logger + ) + } } extension LicenseManagerLinuxSubscriptions.ListLinuxSubscriptionInstancesRequest: AWSPaginateToken { @@ -207,3 +319,13 @@ extension LicenseManagerLinuxSubscriptions.ListLinuxSubscriptionsRequest: AWSPag ) } } + +extension LicenseManagerLinuxSubscriptions.ListRegisteredSubscriptionProvidersRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> LicenseManagerLinuxSubscriptions.ListRegisteredSubscriptionProvidersRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + subscriptionProviderSources: self.subscriptionProviderSources + ) + } +} diff --git a/Sources/Soto/Services/LicenseManagerLinuxSubscriptions/LicenseManagerLinuxSubscriptions_shapes.swift b/Sources/Soto/Services/LicenseManagerLinuxSubscriptions/LicenseManagerLinuxSubscriptions_shapes.swift index 305533a892..247bdb1f78 100644 --- a/Sources/Soto/Services/LicenseManagerLinuxSubscriptions/LicenseManagerLinuxSubscriptions_shapes.swift +++ b/Sources/Soto/Services/LicenseManagerLinuxSubscriptions/LicenseManagerLinuxSubscriptions_shapes.swift @@ -64,8 +64,45 @@ extension LicenseManagerLinuxSubscriptions { public var description: String { return self.rawValue } } + public enum SubscriptionProviderSource: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + /// RedHat subscription provider namespace + case redHat = "RedHat" + public var description: String { return self.rawValue } + } + + public enum SubscriptionProviderStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + /// ACTIVE status + case active = "ACTIVE" + /// INVALID status + case invalid = "INVALID" + /// PENDING status + case pending = "PENDING" + public var description: String { return self.rawValue } + } + // MARK: Shapes + public struct DeregisterSubscriptionProviderRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the subscription provider resource to deregister. + public let subscriptionProviderArn: String + + public init(subscriptionProviderArn: String) { + self.subscriptionProviderArn = subscriptionProviderArn + } + + public func validate(name: String) throws { + try self.validate(self.subscriptionProviderArn, name: "subscriptionProviderArn", parent: name, pattern: "^arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,510}/[a-z0-9-\\.]{1,510}$") + } + + private enum CodingKeys: String, CodingKey { + case subscriptionProviderArn = "SubscriptionProviderArn" + } + } + + public struct DeregisterSubscriptionProviderResponse: AWSDecodableShape { + public init() {} + } + public struct Filter: AWSEncodableShape { /// The type of name to filter by. public let name: String? @@ -92,6 +129,60 @@ extension LicenseManagerLinuxSubscriptions { } } + public struct GetRegisteredSubscriptionProviderRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the BYOL registration resource to get details for. + public let subscriptionProviderArn: String + + public init(subscriptionProviderArn: String) { + self.subscriptionProviderArn = subscriptionProviderArn + } + + public func validate(name: String) throws { + try self.validate(self.subscriptionProviderArn, name: "subscriptionProviderArn", parent: name, pattern: "^arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,510}/[a-z0-9-\\.]{1,510}$") + } + + private enum CodingKeys: String, CodingKey { + case subscriptionProviderArn = "SubscriptionProviderArn" + } + } + + public struct GetRegisteredSubscriptionProviderResponse: AWSDecodableShape { + /// The timestamp from the last time License Manager retrieved subscription details + /// from your registered third-party Linux subscription provider. + public let lastSuccessfulDataRetrievalTime: String? + /// The Amazon Resource Name (ARN) of the third-party access secret stored in Secrets Manager for the BYOL + /// registration resource specified in the request. + public let secretArn: String? + /// The Amazon Resource Name (ARN) for the BYOL registration resource specified in the request. + public let subscriptionProviderArn: String? + /// The subscription provider for the BYOL registration resource specified + /// in the request. + public let subscriptionProviderSource: SubscriptionProviderSource? + /// The status of the Linux subscription provider access token from the last + /// successful subscription data request. + public let subscriptionProviderStatus: SubscriptionProviderStatus? + /// The detailed message from your subscription provider token status. + public let subscriptionProviderStatusMessage: String? + + public init(lastSuccessfulDataRetrievalTime: String? = nil, secretArn: String? = nil, subscriptionProviderArn: String? = nil, subscriptionProviderSource: SubscriptionProviderSource? = nil, subscriptionProviderStatus: SubscriptionProviderStatus? = nil, subscriptionProviderStatusMessage: String? = nil) { + self.lastSuccessfulDataRetrievalTime = lastSuccessfulDataRetrievalTime + self.secretArn = secretArn + self.subscriptionProviderArn = subscriptionProviderArn + self.subscriptionProviderSource = subscriptionProviderSource + self.subscriptionProviderStatus = subscriptionProviderStatus + self.subscriptionProviderStatusMessage = subscriptionProviderStatusMessage + } + + private enum CodingKeys: String, CodingKey { + case lastSuccessfulDataRetrievalTime = "LastSuccessfulDataRetrievalTime" + case secretArn = "SecretArn" + case subscriptionProviderArn = "SubscriptionProviderArn" + case subscriptionProviderSource = "SubscriptionProviderSource" + case subscriptionProviderStatus = "SubscriptionProviderStatus" + case subscriptionProviderStatusMessage = "SubscriptionProviderStatusMessage" + } + } + public struct GetServiceSettingsRequest: AWSEncodableShape { public init() {} } @@ -130,46 +221,70 @@ extension LicenseManagerLinuxSubscriptions { public let accountID: String? /// The AMI ID used to launch the instance. public let amiId: String? + /// Indicates that you have two different license subscriptions for + /// the same software on your instance. + public let dualSubscription: String? /// The instance ID of the resource. public let instanceID: String? /// The instance type of the resource. public let instanceType: String? /// The time in which the last discovery updated the instance details. public let lastUpdatedTime: String? + /// The operating system software version that runs on your instance. + public let osVersion: String? /// The product code for the instance. For more information, see Usage operation values in the License Manager User Guide . public let productCode: [String]? /// The Region the instance is running in. public let region: String? + /// Indicates that your instance uses a BYOL license subscription from + /// a third-party Linux subscription provider that you've registered with License Manager. + public let registeredWithSubscriptionProvider: String? /// The status of the instance. public let status: String? - /// The name of the subscription being used by the instance. + /// The name of the license subscription that the instance uses. public let subscriptionName: String? + /// The timestamp when you registered the third-party Linux subscription + /// provider for the subscription that the instance uses. + public let subscriptionProviderCreateTime: String? + /// The timestamp from the last time that the instance synced with the registered + /// third-party Linux subscription provider. + public let subscriptionProviderUpdateTime: String? /// The usage operation of the instance. For more information, see For more information, see Usage operation values in the License Manager User Guide. public let usageOperation: String? - public init(accountID: String? = nil, amiId: String? = nil, instanceID: String? = nil, instanceType: String? = nil, lastUpdatedTime: String? = nil, productCode: [String]? = nil, region: String? = nil, status: String? = nil, subscriptionName: String? = nil, usageOperation: String? = nil) { + public init(accountID: String? = nil, amiId: String? = nil, dualSubscription: String? = nil, instanceID: String? = nil, instanceType: String? = nil, lastUpdatedTime: String? = nil, osVersion: String? = nil, productCode: [String]? = nil, region: String? = nil, registeredWithSubscriptionProvider: String? = nil, status: String? = nil, subscriptionName: String? = nil, subscriptionProviderCreateTime: String? = nil, subscriptionProviderUpdateTime: String? = nil, usageOperation: String? = nil) { self.accountID = accountID self.amiId = amiId + self.dualSubscription = dualSubscription self.instanceID = instanceID self.instanceType = instanceType self.lastUpdatedTime = lastUpdatedTime + self.osVersion = osVersion self.productCode = productCode self.region = region + self.registeredWithSubscriptionProvider = registeredWithSubscriptionProvider self.status = status self.subscriptionName = subscriptionName + self.subscriptionProviderCreateTime = subscriptionProviderCreateTime + self.subscriptionProviderUpdateTime = subscriptionProviderUpdateTime self.usageOperation = usageOperation } private enum CodingKeys: String, CodingKey { case accountID = "AccountID" case amiId = "AmiId" + case dualSubscription = "DualSubscription" case instanceID = "InstanceID" case instanceType = "InstanceType" case lastUpdatedTime = "LastUpdatedTime" + case osVersion = "OsVersion" case productCode = "ProductCode" case region = "Region" + case registeredWithSubscriptionProvider = "RegisteredWithSubscriptionProvider" case status = "Status" case subscriptionName = "SubscriptionName" + case subscriptionProviderCreateTime = "SubscriptionProviderCreateTime" + case subscriptionProviderUpdateTime = "SubscriptionProviderUpdateTime" case usageOperation = "UsageOperation" } } @@ -197,11 +312,12 @@ extension LicenseManagerLinuxSubscriptions { } public struct ListLinuxSubscriptionInstancesRequest: AWSEncodableShape { - /// An array of structures that you can use to filter the results to those that match one or more sets of key-value pairs that you specify. For example, you can filter by the name of AmiID with an optional operator to see subscriptions that match, partially match, or don't match a certain Amazon Machine Image (AMI) ID. The valid names for this filter are: AmiID InstanceID AccountID Status Region UsageOperation ProductCode InstanceType The valid Operators for this filter are: contains equals Notequal + /// An array of structures that you can use to filter the results by your specified criteria. For example, you can specify Region in the Name, with the contains operator to list all subscriptions that match a partial string in the Value, such as us-west. For each filter, you can specify one of the following values for the Name key to streamline results: AccountID AmiID DualSubscription InstanceID InstanceType ProductCode Region Status UsageOperation For each filter, you can use one of the following Operator values to define the behavior of the filter: contains equals Notequal public let filters: [Filter]? - /// Maximum number of results to return in a single call. + /// The maximum items to return in a request. public let maxResults: Int? - /// Token for the next set of results. + /// A token to specify where to start paginating. This + /// is the nextToken from a previously truncated response. public let nextToken: String? public init(filters: [Filter]? = nil, maxResults: Int? = nil, nextToken: String? = nil) { @@ -226,7 +342,10 @@ extension LicenseManagerLinuxSubscriptions { public struct ListLinuxSubscriptionInstancesResponse: AWSDecodableShape { /// An array that contains instance objects. public let instances: [Instance]? - /// Token for the next set of results. + /// The next token used for paginated responses. When this + /// field isn't empty, there are additional elements that the service hasn't + /// included in this request. Use this token with the next request to retrieve + /// additional objects. public let nextToken: String? public init(instances: [Instance]? = nil, nextToken: String? = nil) { @@ -243,9 +362,10 @@ extension LicenseManagerLinuxSubscriptions { public struct ListLinuxSubscriptionsRequest: AWSEncodableShape { /// An array of structures that you can use to filter the results to those that match one or more sets of key-value pairs that you specify. For example, you can filter by the name of Subscription with an optional operator to see subscriptions that match, partially match, or don't match a certain subscription's name. The valid names for this filter are: Subscription The valid Operators for this filter are: contains equals Notequal public let filters: [Filter]? - /// Maximum number of results to return in a single call. + /// The maximum items to return in a request. public let maxResults: Int? - /// Token for the next set of results. + /// A token to specify where to start paginating. This + /// is the nextToken from a previously truncated response. public let nextToken: String? public init(filters: [Filter]? = nil, maxResults: Int? = nil, nextToken: String? = nil) { @@ -268,7 +388,10 @@ extension LicenseManagerLinuxSubscriptions { } public struct ListLinuxSubscriptionsResponse: AWSDecodableShape { - /// Token for the next set of results. + /// The next token used for paginated responses. When this + /// field isn't empty, there are additional elements that the service hasn't + /// included in this request. Use this token with the next request to retrieve + /// additional objects. public let nextToken: String? /// An array that contains subscription objects. public let subscriptions: [Subscription]? @@ -284,6 +407,174 @@ extension LicenseManagerLinuxSubscriptions { } } + public struct ListRegisteredSubscriptionProvidersRequest: AWSEncodableShape { + /// The maximum items to return in a request. + public let maxResults: Int? + /// A token to specify where to start paginating. This + /// is the nextToken from a previously truncated response. + public let nextToken: String? + /// To filter your results, specify which subscription providers to return + /// in the list. + public let subscriptionProviderSources: [SubscriptionProviderSource]? + + public init(maxResults: Int? = nil, nextToken: String? = nil, subscriptionProviderSources: [SubscriptionProviderSource]? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.subscriptionProviderSources = subscriptionProviderSources + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case nextToken = "NextToken" + case subscriptionProviderSources = "SubscriptionProviderSources" + } + } + + public struct ListRegisteredSubscriptionProvidersResponse: AWSDecodableShape { + /// The next token used for paginated responses. When this + /// field isn't empty, there are additional elements that the service hasn't + /// included in this request. Use this token with the next request to retrieve + /// additional objects. + public let nextToken: String? + /// The list of BYOL registration resources that fit the criteria + /// you specified in the request. + public let registeredSubscriptionProviders: [RegisteredSubscriptionProvider]? + + public init(nextToken: String? = nil, registeredSubscriptionProviders: [RegisteredSubscriptionProvider]? = nil) { + self.nextToken = nextToken + self.registeredSubscriptionProviders = registeredSubscriptionProviders + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case registeredSubscriptionProviders = "RegisteredSubscriptionProviders" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource for which to list metadata tags. + public let resourceArn: String + + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,510}/[a-z0-9-\\.]{1,510}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// The metadata tags for the requested resource. + public let tags: [String: String]? + + public init(tags: [String: String]? = nil) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct RegisterSubscriptionProviderRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the secret where you've stored your subscription provider's access token. For + /// RHEL subscriptions managed through the Red Hat Subscription Manager (RHSM), the secret contains + /// your Red Hat Offline token. + public let secretArn: String + /// The supported Linux subscription provider to register. + public let subscriptionProviderSource: SubscriptionProviderSource + /// The metadata tags to assign to your registered Linux subscription provider + /// resource. + public let tags: [String: String]? + + public init(secretArn: String, subscriptionProviderSource: SubscriptionProviderSource, tags: [String: String]? = nil) { + self.secretArn = secretArn + self.subscriptionProviderSource = subscriptionProviderSource + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.secretArn, name: "secretArn", parent: name, pattern: "^arn:[a-z0-9-\\.]{1,63}:secretsmanager:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:secret:[^/]{1,1023}$") + try self.validate(self.tags, name: "tags", parent: name, max: 50) + } + + private enum CodingKeys: String, CodingKey { + case secretArn = "SecretArn" + case subscriptionProviderSource = "SubscriptionProviderSource" + case tags = "Tags" + } + } + + public struct RegisterSubscriptionProviderResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Linux subscription provider resource that you registered. + public let subscriptionProviderArn: String? + /// The Linux subscription provider that you registered. + public let subscriptionProviderSource: SubscriptionProviderSource? + /// Indicates the status of the registration action for the Linux subscription provider + /// that you requested. + public let subscriptionProviderStatus: SubscriptionProviderStatus? + + public init(subscriptionProviderArn: String? = nil, subscriptionProviderSource: SubscriptionProviderSource? = nil, subscriptionProviderStatus: SubscriptionProviderStatus? = nil) { + self.subscriptionProviderArn = subscriptionProviderArn + self.subscriptionProviderSource = subscriptionProviderSource + self.subscriptionProviderStatus = subscriptionProviderStatus + } + + private enum CodingKeys: String, CodingKey { + case subscriptionProviderArn = "SubscriptionProviderArn" + case subscriptionProviderSource = "SubscriptionProviderSource" + case subscriptionProviderStatus = "SubscriptionProviderStatus" + } + } + + public struct RegisteredSubscriptionProvider: AWSDecodableShape { + /// The timestamp from the last time that License Manager accessed third-party subscription data + /// for your account from your registered Linux subscription provider. + public let lastSuccessfulDataRetrievalTime: String? + /// The Amazon Resource Name (ARN) of the Secrets Manager secret that stores your registered Linux subscription provider + /// access token. For RHEL account subscriptions, this is the offline token. + public let secretArn: String? + /// The Amazon Resource Name (ARN) of the Linux subscription provider resource that you registered. + public let subscriptionProviderArn: String? + /// A supported third-party Linux subscription provider. License Manager currently supports + /// Red Hat subscriptions. + public let subscriptionProviderSource: SubscriptionProviderSource? + /// Indicates the status of your registered Linux subscription provider access token + /// from the last time License Manager retrieved subscription data. For RHEL account subscriptions, + /// this is the status of the offline token. + public let subscriptionProviderStatus: SubscriptionProviderStatus? + /// A detailed message that's associated with your BYOL subscription + /// provider token status. + public let subscriptionProviderStatusMessage: String? + + public init(lastSuccessfulDataRetrievalTime: String? = nil, secretArn: String? = nil, subscriptionProviderArn: String? = nil, subscriptionProviderSource: SubscriptionProviderSource? = nil, subscriptionProviderStatus: SubscriptionProviderStatus? = nil, subscriptionProviderStatusMessage: String? = nil) { + self.lastSuccessfulDataRetrievalTime = lastSuccessfulDataRetrievalTime + self.secretArn = secretArn + self.subscriptionProviderArn = subscriptionProviderArn + self.subscriptionProviderSource = subscriptionProviderSource + self.subscriptionProviderStatus = subscriptionProviderStatus + self.subscriptionProviderStatusMessage = subscriptionProviderStatusMessage + } + + private enum CodingKeys: String, CodingKey { + case lastSuccessfulDataRetrievalTime = "LastSuccessfulDataRetrievalTime" + case secretArn = "SecretArn" + case subscriptionProviderArn = "SubscriptionProviderArn" + case subscriptionProviderSource = "SubscriptionProviderSource" + case subscriptionProviderStatus = "SubscriptionProviderStatus" + case subscriptionProviderStatusMessage = "SubscriptionProviderStatusMessage" + } + } + public struct Subscription: AWSDecodableShape { /// The total amount of running instances using this subscription. public let instanceCount: Int64? @@ -305,6 +596,71 @@ extension LicenseManagerLinuxSubscriptions { } } + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the Amazon Web Services resource to which to add the specified + /// metadata tags. + public let resourceArn: String + /// The metadata tags to assign to the Amazon Web Services resource. Tags are + /// formatted as key value pairs. + public let tags: [String: String] + + public init(resourceArn: String, tags: [String: String]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + try container.encode(self.tags, forKey: .tags) + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,510}/[a-z0-9-\\.]{1,510}$") + try self.validate(self.tags, name: "tags", parent: name, max: 50) + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the Amazon Web Services resource to remove the metadata tags from. + public let resourceArn: String + /// A list of metadata tag keys to remove from the requested + /// resource. + public let tagKeys: [String] + + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + request.encodeQuery(self.tagKeys, key: "tagKeys") + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,510}/[a-z0-9-\\.]{1,510}$") + try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 50) + } + + private enum CodingKeys: CodingKey {} + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + public struct UpdateServiceSettingsRequest: AWSEncodableShape { /// Describes if updates are allowed to the service settings for Linux subscriptions. If you allow updates, you can aggregate Linux subscription data in more than one home Region. public let allowUpdate: Bool? @@ -366,6 +722,7 @@ extension LicenseManagerLinuxSubscriptions { public struct LicenseManagerLinuxSubscriptionsErrorType: AWSErrorType { enum Code: String { case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" case throttlingException = "ThrottlingException" case validationException = "ValidationException" } @@ -390,6 +747,8 @@ public struct LicenseManagerLinuxSubscriptionsErrorType: AWSErrorType { /// An exception occurred with the service. public static var internalServerException: Self { .init(.internalServerException) } + /// Unable to find the requested Amazon Web Services resource. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } /// The request was denied due to request throttling. public static var throttlingException: Self { .init(.throttlingException) } /// The provided input is not valid. Try your request again. diff --git a/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift b/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift index 6ed5c03c8f..3a698ec24b 100644 --- a/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift +++ b/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift @@ -161,6 +161,12 @@ extension MediaConnect { public var description: String { return self.rawValue } } + public enum OutputStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum PriceUnits: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case hourly = "HOURLY" public var description: String { return self.rawValue } @@ -697,6 +703,8 @@ extension MediaConnect { public let minLatency: Int? /// The name of the output. This value must be unique within the current flow. public let name: String? + /// An indication of whether the new output should be enabled or disabled as soon as it is created. If you don't specify the outputStatus field in your request, MediaConnect sets it to ENABLED. + public let outputStatus: OutputStatus? /// The port to use when content is distributed to this output. public let port: Int? /// The protocol to use for the output. @@ -712,7 +720,7 @@ extension MediaConnect { /// The name of the VPC interface attachment to use for this output. public let vpcInterfaceAttachment: VpcInterfaceAttachment? - public init(cidrAllowList: [String]? = nil, description: String? = nil, destination: String? = nil, encryption: Encryption? = nil, maxLatency: Int? = nil, mediaStreamOutputConfigurations: [MediaStreamOutputConfigurationRequest]? = nil, minLatency: Int? = nil, name: String? = nil, port: Int? = nil, protocol: `Protocol`? = nil, remoteId: String? = nil, senderControlPort: Int? = nil, smoothingLatency: Int? = nil, streamId: String? = nil, vpcInterfaceAttachment: VpcInterfaceAttachment? = nil) { + public init(cidrAllowList: [String]? = nil, description: String? = nil, destination: String? = nil, encryption: Encryption? = nil, maxLatency: Int? = nil, mediaStreamOutputConfigurations: [MediaStreamOutputConfigurationRequest]? = nil, minLatency: Int? = nil, name: String? = nil, outputStatus: OutputStatus? = nil, port: Int? = nil, protocol: `Protocol`? = nil, remoteId: String? = nil, senderControlPort: Int? = nil, smoothingLatency: Int? = nil, streamId: String? = nil, vpcInterfaceAttachment: VpcInterfaceAttachment? = nil) { self.cidrAllowList = cidrAllowList self.description = description self.destination = destination @@ -721,6 +729,7 @@ extension MediaConnect { self.mediaStreamOutputConfigurations = mediaStreamOutputConfigurations self.minLatency = minLatency self.name = name + self.outputStatus = outputStatus self.port = port self.`protocol` = `protocol` self.remoteId = remoteId @@ -739,6 +748,7 @@ extension MediaConnect { case mediaStreamOutputConfigurations = "mediaStreamOutputConfigurations" case minLatency = "minLatency" case name = "name" + case outputStatus = "outputStatus" case port = "port" case `protocol` = "protocol" case remoteId = "remoteId" @@ -2801,6 +2811,8 @@ extension MediaConnect { public let name: String? /// The ARN of the output. public let outputArn: String? + /// An indication of whether the output is transmitting data or not. + public let outputStatus: OutputStatus? /// The port to use when content is distributed to this output. public let port: Int? /// Attributes related to the transport stream that are used in the output. @@ -2808,7 +2820,7 @@ extension MediaConnect { /// The name of the VPC interface attachment to use for this output. public let vpcInterfaceAttachment: VpcInterfaceAttachment? - public init(bridgeArn: String? = nil, bridgePorts: [Int]? = nil, dataTransferSubscriberFeePercent: Int? = nil, description: String? = nil, destination: String? = nil, encryption: Encryption? = nil, entitlementArn: String? = nil, listenerAddress: String? = nil, mediaLiveInputArn: String? = nil, mediaStreamOutputConfigurations: [MediaStreamOutputConfiguration]? = nil, name: String? = nil, outputArn: String? = nil, port: Int? = nil, transport: Transport? = nil, vpcInterfaceAttachment: VpcInterfaceAttachment? = nil) { + public init(bridgeArn: String? = nil, bridgePorts: [Int]? = nil, dataTransferSubscriberFeePercent: Int? = nil, description: String? = nil, destination: String? = nil, encryption: Encryption? = nil, entitlementArn: String? = nil, listenerAddress: String? = nil, mediaLiveInputArn: String? = nil, mediaStreamOutputConfigurations: [MediaStreamOutputConfiguration]? = nil, name: String? = nil, outputArn: String? = nil, outputStatus: OutputStatus? = nil, port: Int? = nil, transport: Transport? = nil, vpcInterfaceAttachment: VpcInterfaceAttachment? = nil) { self.bridgeArn = bridgeArn self.bridgePorts = bridgePorts self.dataTransferSubscriberFeePercent = dataTransferSubscriberFeePercent @@ -2821,6 +2833,7 @@ extension MediaConnect { self.mediaStreamOutputConfigurations = mediaStreamOutputConfigurations self.name = name self.outputArn = outputArn + self.outputStatus = outputStatus self.port = port self.transport = transport self.vpcInterfaceAttachment = vpcInterfaceAttachment @@ -2839,6 +2852,7 @@ extension MediaConnect { case mediaStreamOutputConfigurations = "mediaStreamOutputConfigurations" case name = "name" case outputArn = "outputArn" + case outputStatus = "outputStatus" case port = "port" case transport = "transport" case vpcInterfaceAttachment = "vpcInterfaceAttachment" @@ -4133,6 +4147,8 @@ extension MediaConnect { public let minLatency: Int? /// The ARN of the output that you want to update. public let outputArn: String + /// An indication of whether the output should transmit data or not. If you don't specify the outputStatus field in your request, MediaConnect leaves the value unchanged. + public let outputStatus: OutputStatus? /// The port to use when content is distributed to this output. public let port: Int? /// The protocol to use for the output. @@ -4150,7 +4166,7 @@ extension MediaConnect { /// The name of the VPC interface attachment to use for this output. public let vpcInterfaceAttachment: VpcInterfaceAttachment? - public init(cidrAllowList: [String]? = nil, description: String? = nil, destination: String? = nil, encryption: UpdateEncryption? = nil, flowArn: String, maxLatency: Int? = nil, mediaStreamOutputConfigurations: [MediaStreamOutputConfigurationRequest]? = nil, minLatency: Int? = nil, outputArn: String, port: Int? = nil, protocol: `Protocol`? = nil, remoteId: String? = nil, senderControlPort: Int? = nil, senderIpAddress: String? = nil, smoothingLatency: Int? = nil, streamId: String? = nil, vpcInterfaceAttachment: VpcInterfaceAttachment? = nil) { + public init(cidrAllowList: [String]? = nil, description: String? = nil, destination: String? = nil, encryption: UpdateEncryption? = nil, flowArn: String, maxLatency: Int? = nil, mediaStreamOutputConfigurations: [MediaStreamOutputConfigurationRequest]? = nil, minLatency: Int? = nil, outputArn: String, outputStatus: OutputStatus? = nil, port: Int? = nil, protocol: `Protocol`? = nil, remoteId: String? = nil, senderControlPort: Int? = nil, senderIpAddress: String? = nil, smoothingLatency: Int? = nil, streamId: String? = nil, vpcInterfaceAttachment: VpcInterfaceAttachment? = nil) { self.cidrAllowList = cidrAllowList self.description = description self.destination = destination @@ -4160,6 +4176,7 @@ extension MediaConnect { self.mediaStreamOutputConfigurations = mediaStreamOutputConfigurations self.minLatency = minLatency self.outputArn = outputArn + self.outputStatus = outputStatus self.port = port self.`protocol` = `protocol` self.remoteId = remoteId @@ -4182,6 +4199,7 @@ extension MediaConnect { try container.encodeIfPresent(self.mediaStreamOutputConfigurations, forKey: .mediaStreamOutputConfigurations) try container.encodeIfPresent(self.minLatency, forKey: .minLatency) request.encodePath(self.outputArn, key: "OutputArn") + try container.encodeIfPresent(self.outputStatus, forKey: .outputStatus) try container.encodeIfPresent(self.port, forKey: .port) try container.encodeIfPresent(self.`protocol`, forKey: .`protocol`) try container.encodeIfPresent(self.remoteId, forKey: .remoteId) @@ -4200,6 +4218,7 @@ extension MediaConnect { case maxLatency = "maxLatency" case mediaStreamOutputConfigurations = "mediaStreamOutputConfigurations" case minLatency = "minLatency" + case outputStatus = "outputStatus" case port = "port" case `protocol` = "protocol" case remoteId = "remoteId" diff --git a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift index 2144d8de03..739f64c068 100644 --- a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift +++ b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift @@ -136,6 +136,13 @@ extension MediaLive { public var description: String { return self.rawValue } } + public enum Algorithm: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case aes128 = "AES128" + case aes192 = "AES192" + case aes256 = "AES256" + public var description: String { return self.rawValue } + } + public enum AudioDescriptionAudioTypeControl: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case followInput = "FOLLOW_INPUT" case useConfigured = "USE_CONFIGURED" @@ -1400,6 +1407,7 @@ extension MediaLive { case rtmpPull = "RTMP_PULL" case rtmpPush = "RTMP_PUSH" case rtpPush = "RTP_PUSH" + case srtCaller = "SRT_CALLER" case tsFile = "TS_FILE" case udpPush = "UDP_PUSH" case urlPull = "URL_PULL" @@ -4381,12 +4389,14 @@ extension MediaLive { /// exactly two source URLs for redundancy. /// Only specify sources for PULL type Inputs. Leave Destinations empty. public let sources: [InputSourceRequest]? + /// The settings associated with an SRT input. + public let srtSettings: SrtSettingsRequest? /// A collection of key-value pairs. public let tags: [String: String]? public let type: InputType? public let vpc: InputVpcRequest? - public init(destinations: [InputDestinationRequest]? = nil, inputDevices: [InputDeviceSettings]? = nil, inputSecurityGroups: [String]? = nil, mediaConnectFlows: [MediaConnectFlowRequest]? = nil, name: String? = nil, requestId: String? = CreateInputRequest.idempotencyToken(), roleArn: String? = nil, sources: [InputSourceRequest]? = nil, tags: [String: String]? = nil, type: InputType? = nil, vpc: InputVpcRequest? = nil) { + public init(destinations: [InputDestinationRequest]? = nil, inputDevices: [InputDeviceSettings]? = nil, inputSecurityGroups: [String]? = nil, mediaConnectFlows: [MediaConnectFlowRequest]? = nil, name: String? = nil, requestId: String? = CreateInputRequest.idempotencyToken(), roleArn: String? = nil, sources: [InputSourceRequest]? = nil, srtSettings: SrtSettingsRequest? = nil, tags: [String: String]? = nil, type: InputType? = nil, vpc: InputVpcRequest? = nil) { self.destinations = destinations self.inputDevices = inputDevices self.inputSecurityGroups = inputSecurityGroups @@ -4395,6 +4405,7 @@ extension MediaLive { self.requestId = requestId self.roleArn = roleArn self.sources = sources + self.srtSettings = srtSettings self.tags = tags self.type = type self.vpc = vpc @@ -4409,6 +4420,7 @@ extension MediaLive { case requestId = "requestId" case roleArn = "roleArn" case sources = "sources" + case srtSettings = "srtSettings" case tags = "tags" case type = "type" case vpc = "vpc" @@ -5524,12 +5536,14 @@ extension MediaLive { public let securityGroups: [String]? /// A list of the sources of the input (PULL-type). public let sources: [InputSource]? + /// The settings associated with an SRT input. + public let srtSettings: SrtSettings? public let state: InputState? /// A collection of key-value pairs. public let tags: [String: String]? public let type: InputType? - public init(arn: String? = nil, attachedChannels: [String]? = nil, destinations: [InputDestination]? = nil, id: String? = nil, inputClass: InputClass? = nil, inputDevices: [InputDeviceSettings]? = nil, inputPartnerIds: [String]? = nil, inputSourceType: InputSourceType? = nil, mediaConnectFlows: [MediaConnectFlow]? = nil, name: String? = nil, roleArn: String? = nil, securityGroups: [String]? = nil, sources: [InputSource]? = nil, state: InputState? = nil, tags: [String: String]? = nil, type: InputType? = nil) { + public init(arn: String? = nil, attachedChannels: [String]? = nil, destinations: [InputDestination]? = nil, id: String? = nil, inputClass: InputClass? = nil, inputDevices: [InputDeviceSettings]? = nil, inputPartnerIds: [String]? = nil, inputSourceType: InputSourceType? = nil, mediaConnectFlows: [MediaConnectFlow]? = nil, name: String? = nil, roleArn: String? = nil, securityGroups: [String]? = nil, sources: [InputSource]? = nil, srtSettings: SrtSettings? = nil, state: InputState? = nil, tags: [String: String]? = nil, type: InputType? = nil) { self.arn = arn self.attachedChannels = attachedChannels self.destinations = destinations @@ -5543,6 +5557,7 @@ extension MediaLive { self.roleArn = roleArn self.securityGroups = securityGroups self.sources = sources + self.srtSettings = srtSettings self.state = state self.tags = tags self.type = type @@ -5562,6 +5577,7 @@ extension MediaLive { case roleArn = "roleArn" case securityGroups = "securityGroups" case sources = "sources" + case srtSettings = "srtSettings" case state = "state" case tags = "tags" case type = "type" @@ -6191,7 +6207,6 @@ extension MediaLive { public struct Eac3AtmosSettings: AWSEncodableShape & AWSDecodableShape { /// Average bitrate in bits/second. Valid bitrates depend on the coding mode. - /// // * @affectsRightSizing true public let bitrate: Double? /// Dolby Digital Plus with Dolby Atmos coding mode. Determines number of channels. public let codingMode: Eac3AtmosCodingMode? @@ -7316,7 +7331,12 @@ extension MediaLive { public let colorSpaceSettings: H264ColorSpaceSettings? /// Entropy encoding mode. Use cabac (must be in Main or High profile) or cavlc. public let entropyEncoding: H264EntropyEncoding? - /// Optional filters that you can apply to an encode. + /// Optional. Both filters reduce bandwidth by removing imperceptible details. You can enable one of the filters. We + /// recommend that you try both filters and observe the results to decide which one to use. + /// The Temporal Filter reduces bandwidth by removing imperceptible details in the content. It combines perceptual + /// filtering and motion compensated temporal filtering (MCTF). It operates independently of the compression level. + /// The Bandwidth Reduction filter is a perceptual filter located within the encoding loop. It adapts to the current + /// compression level to filter imperceptible signals. This filter works only when the resolution is 1080p or lower. public let filterSettings: H264FilterSettings? /// Four bit AFD value to write on all frames of video in the output stream. Only valid when afdSignaling is set to 'Fixed'. public let fixedAfd: FixedAfd? @@ -7582,7 +7602,12 @@ extension MediaLive { public let colorMetadata: H265ColorMetadata? /// Color Space settings public let colorSpaceSettings: H265ColorSpaceSettings? - /// Optional filters that you can apply to an encode. + /// Optional. Both filters reduce bandwidth by removing imperceptible details. You can enable one of the filters. We + /// recommend that you try both filters and observe the results to decide which one to use. + /// The Temporal Filter reduces bandwidth by removing imperceptible details in the content. It combines perceptual + /// filtering and motion compensated temporal filtering (MCTF). It operates independently of the compression level. + /// The Bandwidth Reduction filter is a perceptual filter located within the encoding loop. It adapts to the current + /// compression level to filter imperceptible signals. This filter works only when the resolution is 1080p or lower. public let filterSettings: H265FilterSettings? /// Four bit AFD value to write on all frames of video in the output stream. Only valid when afdSignaling is set to 'Fixed'. public let fixedAfd: FixedAfd? @@ -8378,12 +8403,14 @@ extension MediaLive { public let securityGroups: [String]? /// A list of the sources of the input (PULL-type). public let sources: [InputSource]? + /// The settings associated with an SRT input. + public let srtSettings: SrtSettings? public let state: InputState? /// A collection of key-value pairs. public let tags: [String: String]? public let type: InputType? - public init(arn: String? = nil, attachedChannels: [String]? = nil, destinations: [InputDestination]? = nil, id: String? = nil, inputClass: InputClass? = nil, inputDevices: [InputDeviceSettings]? = nil, inputPartnerIds: [String]? = nil, inputSourceType: InputSourceType? = nil, mediaConnectFlows: [MediaConnectFlow]? = nil, name: String? = nil, roleArn: String? = nil, securityGroups: [String]? = nil, sources: [InputSource]? = nil, state: InputState? = nil, tags: [String: String]? = nil, type: InputType? = nil) { + public init(arn: String? = nil, attachedChannels: [String]? = nil, destinations: [InputDestination]? = nil, id: String? = nil, inputClass: InputClass? = nil, inputDevices: [InputDeviceSettings]? = nil, inputPartnerIds: [String]? = nil, inputSourceType: InputSourceType? = nil, mediaConnectFlows: [MediaConnectFlow]? = nil, name: String? = nil, roleArn: String? = nil, securityGroups: [String]? = nil, sources: [InputSource]? = nil, srtSettings: SrtSettings? = nil, state: InputState? = nil, tags: [String: String]? = nil, type: InputType? = nil) { self.arn = arn self.attachedChannels = attachedChannels self.destinations = destinations @@ -8397,6 +8424,7 @@ extension MediaLive { self.roleArn = roleArn self.securityGroups = securityGroups self.sources = sources + self.srtSettings = srtSettings self.state = state self.tags = tags self.type = type @@ -8416,6 +8444,7 @@ extension MediaLive { case roleArn = "roleArn" case securityGroups = "securityGroups" case sources = "sources" + case srtSettings = "srtSettings" case state = "state" case tags = "tags" case type = "type" @@ -12681,6 +12710,120 @@ extension MediaLive { public init() {} } + public struct SrtCallerDecryption: AWSDecodableShape { + /// The algorithm used to encrypt content. + public let algorithm: Algorithm? + /// The ARN for the secret in Secrets Manager. Someone in your organization must create a secret and provide you with its ARN. The secret holds the passphrase that MediaLive uses to decrypt the source content. + public let passphraseSecretArn: String? + + public init(algorithm: Algorithm? = nil, passphraseSecretArn: String? = nil) { + self.algorithm = algorithm + self.passphraseSecretArn = passphraseSecretArn + } + + private enum CodingKeys: String, CodingKey { + case algorithm = "algorithm" + case passphraseSecretArn = "passphraseSecretArn" + } + } + + public struct SrtCallerDecryptionRequest: AWSEncodableShape { + /// The algorithm used to encrypt content. + public let algorithm: Algorithm? + /// The ARN for the secret in Secrets Manager. Someone in your organization must create a secret and provide you with its ARN. This secret holds the passphrase that MediaLive will use to decrypt the source content. + public let passphraseSecretArn: String? + + public init(algorithm: Algorithm? = nil, passphraseSecretArn: String? = nil) { + self.algorithm = algorithm + self.passphraseSecretArn = passphraseSecretArn + } + + private enum CodingKeys: String, CodingKey { + case algorithm = "algorithm" + case passphraseSecretArn = "passphraseSecretArn" + } + } + + public struct SrtCallerSource: AWSDecodableShape { + public let decryption: SrtCallerDecryption? + /// The preferred latency (in milliseconds) for implementing packet loss and recovery. Packet recovery is a key feature of SRT. + public let minimumLatency: Int? + /// The IP address at the upstream system (the listener) that MediaLive (the caller) connects to. + public let srtListenerAddress: String? + /// The port at the upstream system (the listener) that MediaLive (the caller) connects to. + public let srtListenerPort: String? + /// The stream ID, if the upstream system uses this identifier. + public let streamId: String? + + public init(decryption: SrtCallerDecryption? = nil, minimumLatency: Int? = nil, srtListenerAddress: String? = nil, srtListenerPort: String? = nil, streamId: String? = nil) { + self.decryption = decryption + self.minimumLatency = minimumLatency + self.srtListenerAddress = srtListenerAddress + self.srtListenerPort = srtListenerPort + self.streamId = streamId + } + + private enum CodingKeys: String, CodingKey { + case decryption = "decryption" + case minimumLatency = "minimumLatency" + case srtListenerAddress = "srtListenerAddress" + case srtListenerPort = "srtListenerPort" + case streamId = "streamId" + } + } + + public struct SrtCallerSourceRequest: AWSEncodableShape { + public let decryption: SrtCallerDecryptionRequest? + /// The preferred latency (in milliseconds) for implementing packet loss and recovery. Packet recovery is a key feature of SRT. Obtain this value from the operator at the upstream system. + public let minimumLatency: Int? + /// The IP address at the upstream system (the listener) that MediaLive (the caller) will connect to. + public let srtListenerAddress: String? + /// The port at the upstream system (the listener) that MediaLive (the caller) will connect to. + public let srtListenerPort: String? + /// This value is required if the upstream system uses this identifier because without it, the SRT handshake between MediaLive (the caller) and the upstream system (the listener) might fail. + public let streamId: String? + + public init(decryption: SrtCallerDecryptionRequest? = nil, minimumLatency: Int? = nil, srtListenerAddress: String? = nil, srtListenerPort: String? = nil, streamId: String? = nil) { + self.decryption = decryption + self.minimumLatency = minimumLatency + self.srtListenerAddress = srtListenerAddress + self.srtListenerPort = srtListenerPort + self.streamId = streamId + } + + private enum CodingKeys: String, CodingKey { + case decryption = "decryption" + case minimumLatency = "minimumLatency" + case srtListenerAddress = "srtListenerAddress" + case srtListenerPort = "srtListenerPort" + case streamId = "streamId" + } + } + + public struct SrtSettings: AWSDecodableShape { + public let srtCallerSources: [SrtCallerSource]? + + public init(srtCallerSources: [SrtCallerSource]? = nil) { + self.srtCallerSources = srtCallerSources + } + + private enum CodingKeys: String, CodingKey { + case srtCallerSources = "srtCallerSources" + } + } + + public struct SrtSettingsRequest: AWSEncodableShape { + public let srtCallerSources: [SrtCallerSourceRequest]? + + public init(srtCallerSources: [SrtCallerSourceRequest]? = nil) { + self.srtCallerSources = srtCallerSources + } + + private enum CodingKeys: String, CodingKey { + case srtCallerSources = "srtCallerSources" + } + } + public struct StandardHlsSettings: AWSEncodableShape & AWSDecodableShape { /// List all the audio groups that are used with the video output stream. Input all the audio GROUP-IDs that are associated to the video, separate by ','. public let audioRenditionSets: String? @@ -14625,8 +14768,10 @@ extension MediaLive { /// exactly two source URLs for redundancy. /// Only specify sources for PULL type Inputs. Leave Destinations empty. public let sources: [InputSourceRequest]? + /// The settings associated with an SRT input. + public let srtSettings: SrtSettingsRequest? - public init(destinations: [InputDestinationRequest]? = nil, inputDevices: [InputDeviceRequest]? = nil, inputId: String, inputSecurityGroups: [String]? = nil, mediaConnectFlows: [MediaConnectFlowRequest]? = nil, name: String? = nil, roleArn: String? = nil, sources: [InputSourceRequest]? = nil) { + public init(destinations: [InputDestinationRequest]? = nil, inputDevices: [InputDeviceRequest]? = nil, inputId: String, inputSecurityGroups: [String]? = nil, mediaConnectFlows: [MediaConnectFlowRequest]? = nil, name: String? = nil, roleArn: String? = nil, sources: [InputSourceRequest]? = nil, srtSettings: SrtSettingsRequest? = nil) { self.destinations = destinations self.inputDevices = inputDevices self.inputId = inputId @@ -14635,6 +14780,7 @@ extension MediaLive { self.name = name self.roleArn = roleArn self.sources = sources + self.srtSettings = srtSettings } public func encode(to encoder: Encoder) throws { @@ -14648,6 +14794,7 @@ extension MediaLive { try container.encodeIfPresent(self.name, forKey: .name) try container.encodeIfPresent(self.roleArn, forKey: .roleArn) try container.encodeIfPresent(self.sources, forKey: .sources) + try container.encodeIfPresent(self.srtSettings, forKey: .srtSettings) } private enum CodingKeys: String, CodingKey { @@ -14658,6 +14805,7 @@ extension MediaLive { case name = "name" case roleArn = "roleArn" case sources = "sources" + case srtSettings = "srtSettings" } } diff --git a/Sources/Soto/Services/OpenSearch/OpenSearch_shapes.swift b/Sources/Soto/Services/OpenSearch/OpenSearch_shapes.swift index 5fd5c4b411..e7b2bfc17f 100644 --- a/Sources/Soto/Services/OpenSearch/OpenSearch_shapes.swift +++ b/Sources/Soto/Services/OpenSearch/OpenSearch_shapes.swift @@ -217,6 +217,23 @@ extension OpenSearch { public var description: String { return self.rawValue } } + public enum NaturalLanguageQueryGenerationCurrentState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disableComplete = "DISABLE_COMPLETE" + case disableFailed = "DISABLE_FAILED" + case disableInProgress = "DISABLE_IN_PROGRESS" + case enableComplete = "ENABLE_COMPLETE" + case enableFailed = "ENABLE_FAILED" + case enableInProgress = "ENABLE_IN_PROGRESS" + case notEnabled = "NOT_ENABLED" + public var description: String { return self.rawValue } + } + + public enum NaturalLanguageQueryGenerationDesiredState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum NodeStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "Active" case notAvailable = "NotAvailable" @@ -510,6 +527,48 @@ extension OpenSearch { // MARK: Shapes + public struct AIMLOptionsInput: AWSEncodableShape { + /// Container for parameters required for natural language query generation on the specified domain. + public let naturalLanguageQueryGenerationOptions: NaturalLanguageQueryGenerationOptionsInput? + + public init(naturalLanguageQueryGenerationOptions: NaturalLanguageQueryGenerationOptionsInput? = nil) { + self.naturalLanguageQueryGenerationOptions = naturalLanguageQueryGenerationOptions + } + + private enum CodingKeys: String, CodingKey { + case naturalLanguageQueryGenerationOptions = "NaturalLanguageQueryGenerationOptions" + } + } + + public struct AIMLOptionsOutput: AWSDecodableShape { + /// Container for parameters required for natural language query generation on the specified domain. + public let naturalLanguageQueryGenerationOptions: NaturalLanguageQueryGenerationOptionsOutput? + + public init(naturalLanguageQueryGenerationOptions: NaturalLanguageQueryGenerationOptionsOutput? = nil) { + self.naturalLanguageQueryGenerationOptions = naturalLanguageQueryGenerationOptions + } + + private enum CodingKeys: String, CodingKey { + case naturalLanguageQueryGenerationOptions = "NaturalLanguageQueryGenerationOptions" + } + } + + public struct AIMLOptionsStatus: AWSDecodableShape { + /// Machine learning options on the specified domain. + public let options: AIMLOptionsOutput? + public let status: OptionStatus? + + public init(options: AIMLOptionsOutput? = nil, status: OptionStatus? = nil) { + self.options = options + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case options = "Options" + case status = "Status" + } + } + public struct AWSDomainInformation: AWSEncodableShape & AWSDecodableShape { /// Name of the domain. public let domainName: String @@ -1518,6 +1577,8 @@ extension OpenSearch { public let advancedOptions: [String: String]? /// Options for fine-grained access control. public let advancedSecurityOptions: AdvancedSecurityOptionsInput? + /// Options for all machine learning features for the specified domain. + public let aimlOptions: AIMLOptionsInput? /// Options for Auto-Tune. public let autoTuneOptions: AutoTuneOptionsInput? /// Container for the cluster configuration of a domain. @@ -1551,10 +1612,11 @@ extension OpenSearch { /// Container for the values required to configure VPC access domains. If you don't specify these values, OpenSearch Service creates the domain with a public endpoint. For more information, see Launching your Amazon OpenSearch Service domains using a VPC. public let vpcOptions: VPCOptions? - public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptionsInput? = nil, autoTuneOptions: AutoTuneOptionsInput? = nil, clusterConfig: ClusterConfig? = nil, cognitoOptions: CognitoOptions? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainName: String, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, engineVersion: String? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, tagList: [Tag]? = nil, vpcOptions: VPCOptions? = nil) { + public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptionsInput? = nil, aimlOptions: AIMLOptionsInput? = nil, autoTuneOptions: AutoTuneOptionsInput? = nil, clusterConfig: ClusterConfig? = nil, cognitoOptions: CognitoOptions? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainName: String, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, engineVersion: String? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, tagList: [Tag]? = nil, vpcOptions: VPCOptions? = nil) { self.accessPolicies = accessPolicies self.advancedOptions = advancedOptions self.advancedSecurityOptions = advancedSecurityOptions + self.aimlOptions = aimlOptions self.autoTuneOptions = autoTuneOptions self.clusterConfig = clusterConfig self.cognitoOptions = cognitoOptions @@ -1600,6 +1662,7 @@ extension OpenSearch { case accessPolicies = "AccessPolicies" case advancedOptions = "AdvancedOptions" case advancedSecurityOptions = "AdvancedSecurityOptions" + case aimlOptions = "AIMLOptions" case autoTuneOptions = "AutoTuneOptions" case clusterConfig = "ClusterConfig" case cognitoOptions = "CognitoOptions" @@ -2820,6 +2883,8 @@ extension OpenSearch { public let advancedOptions: AdvancedOptionsStatus? /// Container for fine-grained access control settings for the domain. public let advancedSecurityOptions: AdvancedSecurityOptionsStatus? + /// Container for parameters required to enable all machine learning features. + public let aimlOptions: AIMLOptionsStatus? /// Container for Auto-Tune settings for the domain. public let autoTuneOptions: AutoTuneOptionsStatus? /// Container for information about the progress of an existing configuration change. @@ -2853,10 +2918,11 @@ extension OpenSearch { /// The current VPC options for the domain and the status of any updates to their configuration. public let vpcOptions: VPCDerivedInfoStatus? - public init(accessPolicies: AccessPoliciesStatus? = nil, advancedOptions: AdvancedOptionsStatus? = nil, advancedSecurityOptions: AdvancedSecurityOptionsStatus? = nil, autoTuneOptions: AutoTuneOptionsStatus? = nil, changeProgressDetails: ChangeProgressDetails? = nil, clusterConfig: ClusterConfigStatus? = nil, cognitoOptions: CognitoOptionsStatus? = nil, domainEndpointOptions: DomainEndpointOptionsStatus? = nil, ebsOptions: EBSOptionsStatus? = nil, encryptionAtRestOptions: EncryptionAtRestOptionsStatus? = nil, engineVersion: VersionStatus? = nil, ipAddressType: IPAddressTypeStatus? = nil, logPublishingOptions: LogPublishingOptionsStatus? = nil, modifyingProperties: [ModifyingProperties]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptionsStatus? = nil, offPeakWindowOptions: OffPeakWindowOptionsStatus? = nil, snapshotOptions: SnapshotOptionsStatus? = nil, softwareUpdateOptions: SoftwareUpdateOptionsStatus? = nil, vpcOptions: VPCDerivedInfoStatus? = nil) { + public init(accessPolicies: AccessPoliciesStatus? = nil, advancedOptions: AdvancedOptionsStatus? = nil, advancedSecurityOptions: AdvancedSecurityOptionsStatus? = nil, aimlOptions: AIMLOptionsStatus? = nil, autoTuneOptions: AutoTuneOptionsStatus? = nil, changeProgressDetails: ChangeProgressDetails? = nil, clusterConfig: ClusterConfigStatus? = nil, cognitoOptions: CognitoOptionsStatus? = nil, domainEndpointOptions: DomainEndpointOptionsStatus? = nil, ebsOptions: EBSOptionsStatus? = nil, encryptionAtRestOptions: EncryptionAtRestOptionsStatus? = nil, engineVersion: VersionStatus? = nil, ipAddressType: IPAddressTypeStatus? = nil, logPublishingOptions: LogPublishingOptionsStatus? = nil, modifyingProperties: [ModifyingProperties]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptionsStatus? = nil, offPeakWindowOptions: OffPeakWindowOptionsStatus? = nil, snapshotOptions: SnapshotOptionsStatus? = nil, softwareUpdateOptions: SoftwareUpdateOptionsStatus? = nil, vpcOptions: VPCDerivedInfoStatus? = nil) { self.accessPolicies = accessPolicies self.advancedOptions = advancedOptions self.advancedSecurityOptions = advancedSecurityOptions + self.aimlOptions = aimlOptions self.autoTuneOptions = autoTuneOptions self.changeProgressDetails = changeProgressDetails self.clusterConfig = clusterConfig @@ -2879,6 +2945,7 @@ extension OpenSearch { case accessPolicies = "AccessPolicies" case advancedOptions = "AdvancedOptions" case advancedSecurityOptions = "AdvancedSecurityOptions" + case aimlOptions = "AIMLOptions" case autoTuneOptions = "AutoTuneOptions" case changeProgressDetails = "ChangeProgressDetails" case clusterConfig = "ClusterConfig" @@ -3121,6 +3188,8 @@ extension OpenSearch { public let advancedOptions: [String: String]? /// Settings for fine-grained access control. public let advancedSecurityOptions: AdvancedSecurityOptions? + /// Container for parameters required to enable all machine learning features. + public let aimlOptions: AIMLOptionsOutput? /// The Amazon Resource Name (ARN) of the domain. For more information, see IAM identifiers in the AWS Identity and Access Management User Guide. public let arn: String /// Auto-Tune settings for the domain. @@ -3180,10 +3249,11 @@ extension OpenSearch { /// The VPC configuration for the domain. public let vpcOptions: VPCDerivedInfo? - public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptions? = nil, arn: String, autoTuneOptions: AutoTuneOptionsOutput? = nil, changeProgressDetails: ChangeProgressDetails? = nil, clusterConfig: ClusterConfig, cognitoOptions: CognitoOptions? = nil, created: Bool? = nil, deleted: Bool? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainEndpointV2HostedZoneId: String? = nil, domainId: String, domainName: String, domainProcessingStatus: DomainProcessingStatusType? = nil, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, endpoint: String? = nil, endpoints: [String: String]? = nil, endpointV2: String? = nil, engineVersion: String? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, modifyingProperties: [ModifyingProperties]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, processing: Bool? = nil, serviceSoftwareOptions: ServiceSoftwareOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, upgradeProcessing: Bool? = nil, vpcOptions: VPCDerivedInfo? = nil) { + public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptions? = nil, aimlOptions: AIMLOptionsOutput? = nil, arn: String, autoTuneOptions: AutoTuneOptionsOutput? = nil, changeProgressDetails: ChangeProgressDetails? = nil, clusterConfig: ClusterConfig, cognitoOptions: CognitoOptions? = nil, created: Bool? = nil, deleted: Bool? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainEndpointV2HostedZoneId: String? = nil, domainId: String, domainName: String, domainProcessingStatus: DomainProcessingStatusType? = nil, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, endpoint: String? = nil, endpoints: [String: String]? = nil, endpointV2: String? = nil, engineVersion: String? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, modifyingProperties: [ModifyingProperties]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, processing: Bool? = nil, serviceSoftwareOptions: ServiceSoftwareOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, upgradeProcessing: Bool? = nil, vpcOptions: VPCDerivedInfo? = nil) { self.accessPolicies = accessPolicies self.advancedOptions = advancedOptions self.advancedSecurityOptions = advancedSecurityOptions + self.aimlOptions = aimlOptions self.arn = arn self.autoTuneOptions = autoTuneOptions self.changeProgressDetails = changeProgressDetails @@ -3219,6 +3289,7 @@ extension OpenSearch { case accessPolicies = "AccessPolicies" case advancedOptions = "AdvancedOptions" case advancedSecurityOptions = "AdvancedSecurityOptions" + case aimlOptions = "AIMLOptions" case arn = "ARN" case autoTuneOptions = "AutoTuneOptions" case changeProgressDetails = "ChangeProgressDetails" @@ -4606,6 +4677,36 @@ extension OpenSearch { } } + public struct NaturalLanguageQueryGenerationOptionsInput: AWSEncodableShape { + /// The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED. + public let desiredState: NaturalLanguageQueryGenerationDesiredState? + + public init(desiredState: NaturalLanguageQueryGenerationDesiredState? = nil) { + self.desiredState = desiredState + } + + private enum CodingKeys: String, CodingKey { + case desiredState = "DesiredState" + } + } + + public struct NaturalLanguageQueryGenerationOptionsOutput: AWSDecodableShape { + /// The current state of the natural language query generation feature, indicating completion, in progress, or failure. + public let currentState: NaturalLanguageQueryGenerationCurrentState? + /// The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED. + public let desiredState: NaturalLanguageQueryGenerationDesiredState? + + public init(currentState: NaturalLanguageQueryGenerationCurrentState? = nil, desiredState: NaturalLanguageQueryGenerationDesiredState? = nil) { + self.currentState = currentState + self.desiredState = desiredState + } + + private enum CodingKeys: String, CodingKey { + case currentState = "CurrentState" + case desiredState = "DesiredState" + } + } + public struct NodeToNodeEncryptionOptions: AWSEncodableShape & AWSDecodableShape { /// True to enable node-to-node encryption. public let enabled: Bool? @@ -5679,6 +5780,8 @@ extension OpenSearch { public let advancedOptions: [String: String]? /// Options for fine-grained access control. public let advancedSecurityOptions: AdvancedSecurityOptionsInput? + /// Options for all machine learning features for the specified domain. + public let aimlOptions: AIMLOptionsInput? /// Options for Auto-Tune. public let autoTuneOptions: AutoTuneOptions? /// Changes that you want to make to the cluster configuration, such as the instance type and number of EC2 instances. @@ -5712,10 +5815,11 @@ extension OpenSearch { /// Options to specify the subnets and security groups for a VPC endpoint. For more information, see Launching your Amazon OpenSearch Service domains using a VPC. public let vpcOptions: VPCOptions? - public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptionsInput? = nil, autoTuneOptions: AutoTuneOptions? = nil, clusterConfig: ClusterConfig? = nil, cognitoOptions: CognitoOptions? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainName: String, dryRun: Bool? = nil, dryRunMode: DryRunMode? = nil, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, vpcOptions: VPCOptions? = nil) { + public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptionsInput? = nil, aimlOptions: AIMLOptionsInput? = nil, autoTuneOptions: AutoTuneOptions? = nil, clusterConfig: ClusterConfig? = nil, cognitoOptions: CognitoOptions? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainName: String, dryRun: Bool? = nil, dryRunMode: DryRunMode? = nil, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, vpcOptions: VPCOptions? = nil) { self.accessPolicies = accessPolicies self.advancedOptions = advancedOptions self.advancedSecurityOptions = advancedSecurityOptions + self.aimlOptions = aimlOptions self.autoTuneOptions = autoTuneOptions self.clusterConfig = clusterConfig self.cognitoOptions = cognitoOptions @@ -5740,6 +5844,7 @@ extension OpenSearch { try container.encodeIfPresent(self.accessPolicies, forKey: .accessPolicies) try container.encodeIfPresent(self.advancedOptions, forKey: .advancedOptions) try container.encodeIfPresent(self.advancedSecurityOptions, forKey: .advancedSecurityOptions) + try container.encodeIfPresent(self.aimlOptions, forKey: .aimlOptions) try container.encodeIfPresent(self.autoTuneOptions, forKey: .autoTuneOptions) try container.encodeIfPresent(self.clusterConfig, forKey: .clusterConfig) try container.encodeIfPresent(self.cognitoOptions, forKey: .cognitoOptions) @@ -5779,6 +5884,7 @@ extension OpenSearch { case accessPolicies = "AccessPolicies" case advancedOptions = "AdvancedOptions" case advancedSecurityOptions = "AdvancedSecurityOptions" + case aimlOptions = "AIMLOptions" case autoTuneOptions = "AutoTuneOptions" case clusterConfig = "ClusterConfig" case cognitoOptions = "CognitoOptions" diff --git a/Sources/Soto/Services/PI/PI_api.swift b/Sources/Soto/Services/PI/PI_api.swift index 1be8054220..9a3df7478b 100644 --- a/Sources/Soto/Services/PI/PI_api.swift +++ b/Sources/Soto/Services/PI/PI_api.swift @@ -60,6 +60,7 @@ public struct PI: AWSService { serviceProtocol: .json(version: "1.1"), apiVersion: "2018-02-27", endpoint: endpoint, + variantEndpoints: Self.variantEndpoints, errorType: PIErrorType.self, xmlNamespace: "http://pi.amazonaws.com/doc/2018-02-27/", middleware: middleware, @@ -72,6 +73,64 @@ public struct PI: AWSService { + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "pi.af-south-1.api.aws", + "ap-east-1": "pi.ap-east-1.api.aws", + "ap-northeast-1": "pi.ap-northeast-1.api.aws", + "ap-northeast-2": "pi.ap-northeast-2.api.aws", + "ap-northeast-3": "pi.ap-northeast-3.api.aws", + "ap-south-1": "pi.ap-south-1.api.aws", + "ap-south-2": "pi.ap-south-2.api.aws", + "ap-southeast-1": "pi.ap-southeast-1.api.aws", + "ap-southeast-2": "pi.ap-southeast-2.api.aws", + "ap-southeast-3": "pi.ap-southeast-3.api.aws", + "ap-southeast-4": "pi.ap-southeast-4.api.aws", + "ca-central-1": "pi.ca-central-1.api.aws", + "ca-west-1": "pi.ca-west-1.api.aws", + "cn-north-1": "pi.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "pi.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "pi.eu-central-1.api.aws", + "eu-central-2": "pi.eu-central-2.api.aws", + "eu-north-1": "pi.eu-north-1.api.aws", + "eu-south-1": "pi.eu-south-1.api.aws", + "eu-south-2": "pi.eu-south-2.api.aws", + "eu-west-1": "pi.eu-west-1.api.aws", + "eu-west-2": "pi.eu-west-2.api.aws", + "eu-west-3": "pi.eu-west-3.api.aws", + "il-central-1": "pi.il-central-1.api.aws", + "me-central-1": "pi.me-central-1.api.aws", + "me-south-1": "pi.me-south-1.api.aws", + "sa-east-1": "pi.sa-east-1.api.aws", + "us-east-1": "pi.us-east-1.api.aws", + "us-east-2": "pi.us-east-2.api.aws", + "us-gov-east-1": "pi.us-gov-east-1.api.aws", + "us-gov-west-1": "pi.us-gov-west-1.api.aws", + "us-west-1": "pi.us-west-1.api.aws", + "us-west-2": "pi.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "pi-fips.ca-central-1.api.aws", + "ca-west-1": "pi-fips.ca-west-1.api.aws", + "us-east-1": "pi-fips.us-east-1.api.aws", + "us-east-2": "pi-fips.us-east-2.api.aws", + "us-gov-east-1": "pi-fips.us-gov-east-1.api.aws", + "us-gov-west-1": "pi-fips.us-gov-west-1.api.aws", + "us-west-1": "pi-fips.us-west-1.api.aws", + "us-west-2": "pi-fips.us-west-2.api.aws" + ]), + [.fips]: .init(endpoints: [ + "ca-central-1": "pi-fips.ca-central-1.amazonaws.com", + "ca-west-1": "pi-fips.ca-west-1.amazonaws.com", + "us-east-1": "pi-fips.us-east-1.amazonaws.com", + "us-east-2": "pi-fips.us-east-2.amazonaws.com", + "us-gov-east-1": "pi-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "pi-fips.us-gov-west-1.amazonaws.com", + "us-west-1": "pi-fips.us-west-1.amazonaws.com", + "us-west-2": "pi-fips.us-west-2.amazonaws.com" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/QApps/QApps_api.swift b/Sources/Soto/Services/QApps/QApps_api.swift new file mode 100644 index 0000000000..bfab65593a --- /dev/null +++ b/Sources/Soto/Services/QApps/QApps_api.swift @@ -0,0 +1,448 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +@_exported import SotoCore + +/// Service object for interacting with AWS QApps service. +/// +/// The Amazon Q Apps feature capability within Amazon Q Business allows web experience users to create lightweight, purpose-built AI apps to fulfill specific tasks from within their web experience. For example, users can create an Q Appthat exclusively generates marketing-related content to improve your marketing team's productivity or a Q App for marketing content-generation like writing customer emails and creating promotional content using a certain style of voice, tone, and branding. For more information, see Amazon Q App in the Amazon Q Business User Guide. +public struct QApps: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the QApps client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded + /// - timeout: Timeout value for HTTP requests + /// - byteBufferAllocator: Allocator for ByteBuffers + /// - options: Service options + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + middleware: AWSMiddlewareProtocol? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + serviceName: "QApps", + serviceIdentifier: "data.qapps", + signingName: "qapps", + serviceProtocol: .restjson, + apiVersion: "2023-11-27", + endpoint: endpoint, + errorType: QAppsErrorType.self, + middleware: middleware, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + + + + + // MARK: API Calls + + /// Associates a rating or review for a library item with the user submitting the request. This increments the rating count for the specified library item. + @Sendable + public func associateLibraryItemReview(_ input: AssociateLibraryItemReviewInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "AssociateLibraryItemReview", + path: "/catalog.associateItemRating", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// This operation creates a link between the user's identity calling the operation and a specific Q App. This is useful to mark the Q App as a favorite for the user if the user doesn't own the Amazon Q App so they can still run it and see it in their inventory of Q Apps. + @Sendable + public func associateQAppWithUser(_ input: AssociateQAppWithUserInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "AssociateQAppWithUser", + path: "/apps.install", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a new library item for an Amazon Q App, allowing it to be discovered and used by other allowed users. + @Sendable + public func createLibraryItem(_ input: CreateLibraryItemInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateLibraryItemOutput { + return try await self.client.execute( + operation: "CreateLibraryItem", + path: "/catalog.createItem", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a new Amazon Q App based on the provided definition. The Q App definition specifies the cards and flow of the Q App. This operation also calculates the dependencies between the cards by inspecting the references in the prompts. + @Sendable + public func createQApp(_ input: CreateQAppInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateQAppOutput { + return try await self.client.execute( + operation: "CreateQApp", + path: "/apps.create", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes a library item for an Amazon Q App, removing it from the library so it can no longer be discovered or used by other users. + @Sendable + public func deleteLibraryItem(_ input: DeleteLibraryItemInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteLibraryItem", + path: "/catalog.deleteItem", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes an Amazon Q App owned by the user. If the Q App was previously published to the library, it is also removed from the library. + @Sendable + public func deleteQApp(_ input: DeleteQAppInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteQApp", + path: "/apps.delete", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Removes a rating or review previously submitted by the user for a library item. + @Sendable + public func disassociateLibraryItemReview(_ input: DisassociateLibraryItemReviewInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DisassociateLibraryItemReview", + path: "/catalog.disassociateItemRating", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Disassociates a Q App from a user removing the user's access to run the Q App. + @Sendable + public func disassociateQAppFromUser(_ input: DisassociateQAppFromUserInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DisassociateQAppFromUser", + path: "/apps.uninstall", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves details about a library item for an Amazon Q App, including its metadata, categories, ratings, and usage statistics. + @Sendable + public func getLibraryItem(_ input: GetLibraryItemInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetLibraryItemOutput { + return try await self.client.execute( + operation: "GetLibraryItem", + path: "/catalog.getItem", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the full details of an Q App, including its definition specifying the cards and flow. + @Sendable + public func getQApp(_ input: GetQAppInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetQAppOutput { + return try await self.client.execute( + operation: "GetQApp", + path: "/apps.get", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the current state and results for an active session of an Amazon Q App. + @Sendable + public func getQAppSession(_ input: GetQAppSessionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetQAppSessionOutput { + return try await self.client.execute( + operation: "GetQAppSession", + path: "/runtime.getQAppSession", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Uploads a file that can then be used either as a default in a FileUploadCard from Q App definition or as a file that is used inside a single Q App run. The purpose of the document is determined by a scope parameter that indicates whether it is at the app definition level or at the app session level. + @Sendable + public func importDocument(_ input: ImportDocumentInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ImportDocumentOutput { + return try await self.client.execute( + operation: "ImportDocument", + path: "/apps.importDocument", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists the library items for Amazon Q Apps that are published and available for users in your Amazon Web Services account. + @Sendable + public func listLibraryItems(_ input: ListLibraryItemsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListLibraryItemsOutput { + return try await self.client.execute( + operation: "ListLibraryItems", + path: "/catalog.list", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists the Amazon Q Apps owned by or associated with the user either because they created it or because they used it from the library in the past. The user identity is extracted from the credentials used to invoke this operation.. + @Sendable + public func listQApps(_ input: ListQAppsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListQAppsOutput { + return try await self.client.execute( + operation: "ListQApps", + path: "/apps.list", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists the tags associated with an Amazon Q Apps resource. + @Sendable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + return try await self.client.execute( + operation: "ListTagsForResource", + path: "/tags/{resourceARN}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Generates an Amazon Q App definition based on either a conversation or a problem statement provided as input.The resulting app definition can be used to call CreateQApp. This API doesn't create Amazon Q Apps directly. + @Sendable + public func predictQApp(_ input: PredictQAppInput, logger: Logger = AWSClient.loggingDisabled) async throws -> PredictQAppOutput { + return try await self.client.execute( + operation: "PredictQApp", + path: "/apps.predictQApp", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Starts a new session for an Amazon Q App, allowing inputs to be provided and the app to be run. Each Q App session will be condensed into a single conversation in the web experience. + @Sendable + public func startQAppSession(_ input: StartQAppSessionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartQAppSessionOutput { + return try await self.client.execute( + operation: "StartQAppSession", + path: "/runtime.startQAppSession", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Stops an active session for an Amazon Q App.This deletes all data related to the session and makes it invalid for future uses. The results of the session will be persisted as part of the conversation. + @Sendable + public func stopQAppSession(_ input: StopQAppSessionInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "StopQAppSession", + path: "/runtime.deleteMiniAppRun", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Associates tags with an Amazon Q Apps resource. + @Sendable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + return try await self.client.execute( + operation: "TagResource", + path: "/tags/{resourceARN}", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Disassociates tags from an Amazon Q Apps resource. + @Sendable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + return try await self.client.execute( + operation: "UntagResource", + path: "/tags/{resourceARN}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates the metadata and status of a library item for an Amazon Q App. + @Sendable + public func updateLibraryItem(_ input: UpdateLibraryItemInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLibraryItemOutput { + return try await self.client.execute( + operation: "UpdateLibraryItem", + path: "/catalog.updateItem", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates an existing Amazon Q App, allowing modifications to its title, description, and definition. + @Sendable + public func updateQApp(_ input: UpdateQAppInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateQAppOutput { + return try await self.client.execute( + operation: "UpdateQApp", + path: "/apps.update", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates the session for a given Q App sessionId. This is only valid when at least one card of the session is in the WAITING state. Data for each WAITING card can be provided as input. If inputs are not provided, the call will be accepted but session will not move forward. Inputs for cards that are not in the WAITING status will be ignored. + @Sendable + public func updateQAppSession(_ input: UpdateQAppSessionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateQAppSessionOutput { + return try await self.client.execute( + operation: "UpdateQAppSession", + path: "/runtime.updateQAppSession", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } +} + +extension QApps { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: QApps, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension QApps { + /// Lists the library items for Amazon Q Apps that are published and available for users in your Amazon Web Services account. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listLibraryItemsPaginator( + _ input: ListLibraryItemsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listLibraryItems, + inputKey: \ListLibraryItemsInput.nextToken, + outputKey: \ListLibraryItemsOutput.nextToken, + logger: logger + ) + } + + /// Lists the Amazon Q Apps owned by or associated with the user either because they created it or because they used it from the library in the past. The user identity is extracted from the credentials used to invoke this operation.. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listQAppsPaginator( + _ input: ListQAppsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listQApps, + inputKey: \ListQAppsInput.nextToken, + outputKey: \ListQAppsOutput.nextToken, + logger: logger + ) + } +} + +extension QApps.ListLibraryItemsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> QApps.ListLibraryItemsInput { + return .init( + categoryId: self.categoryId, + instanceId: self.instanceId, + limit: self.limit, + nextToken: token + ) + } +} + +extension QApps.ListQAppsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> QApps.ListQAppsInput { + return .init( + instanceId: self.instanceId, + limit: self.limit, + nextToken: token + ) + } +} diff --git a/Sources/Soto/Services/QApps/QApps_shapes.swift b/Sources/Soto/Services/QApps/QApps_shapes.swift new file mode 100644 index 0000000000..882b2ca928 --- /dev/null +++ b/Sources/Soto/Services/QApps/QApps_shapes.swift @@ -0,0 +1,2227 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_spi(SotoInternal) import SotoCore + +extension QApps { + // MARK: Enums + + public enum AppRequiredCapability: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case creatorMode = "CreatorMode" + case fileUpload = "FileUpload" + case pluginMode = "PluginMode" + case retrievalMode = "RetrievalMode" + public var description: String { return self.rawValue } + } + + public enum AppStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case deleted = "DELETED" + case draft = "DRAFT" + case published = "PUBLISHED" + public var description: String { return self.rawValue } + } + + public enum CardOutputSource: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case approvedSources = "approved-sources" + case llm = "llm" + public var description: String { return self.rawValue } + } + + public enum CardType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case fileUpload = "file-upload" + case qPlugin = "q-plugin" + case qQuery = "q-query" + case textInput = "text-input" + public var description: String { return self.rawValue } + } + + public enum DocumentScope: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case application = "APPLICATION" + case session = "SESSION" + public var description: String { return self.rawValue } + } + + public enum ExecutionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case completed = "COMPLETED" + case inProgress = "IN_PROGRESS" + case waiting = "WAITING" + public var description: String { return self.rawValue } + } + + public enum LibraryItemStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case published = "PUBLISHED" + public var description: String { return self.rawValue } + } + + public enum PluginType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case custom = "CUSTOM" + case jira = "JIRA" + case salesforce = "SALESFORCE" + case serviceNow = "SERVICE_NOW" + case zendesk = "ZENDESK" + public var description: String { return self.rawValue } + } + + public enum Sender: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case system = "SYSTEM" + case user = "USER" + public var description: String { return self.rawValue } + } + + public enum Card: AWSDecodableShape, Sendable { + /// A container for the properties of the file upload card. + case fileUpload(FileUploadCard) + /// A container for the properties of the plugin card. + case qPlugin(QPluginCard) + /// A container for the properties of the query card. + case qQuery(QQueryCard) + /// A container for the properties of the text input card. + case textInput(TextInputCard) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .fileUpload: + let value = try container.decode(FileUploadCard.self, forKey: .fileUpload) + self = .fileUpload(value) + case .qPlugin: + let value = try container.decode(QPluginCard.self, forKey: .qPlugin) + self = .qPlugin(value) + case .qQuery: + let value = try container.decode(QQueryCard.self, forKey: .qQuery) + self = .qQuery(value) + case .textInput: + let value = try container.decode(TextInputCard.self, forKey: .textInput) + self = .textInput(value) + } + } + + private enum CodingKeys: String, CodingKey { + case fileUpload = "fileUpload" + case qPlugin = "qPlugin" + case qQuery = "qQuery" + case textInput = "textInput" + } + } + + public enum CardInput: AWSEncodableShape & AWSDecodableShape, Sendable { + /// A container for the properties of the file upload input card. + case fileUpload(FileUploadCardInput) + /// A container for the properties of the plugin input card. + case qPlugin(QPluginCardInput) + /// A container for the properties of the query input card. + case qQuery(QQueryCardInput) + /// A container for the properties of the text input card. + case textInput(TextInputCardInput) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .fileUpload: + let value = try container.decode(FileUploadCardInput.self, forKey: .fileUpload) + self = .fileUpload(value) + case .qPlugin: + let value = try container.decode(QPluginCardInput.self, forKey: .qPlugin) + self = .qPlugin(value) + case .qQuery: + let value = try container.decode(QQueryCardInput.self, forKey: .qQuery) + self = .qQuery(value) + case .textInput: + let value = try container.decode(TextInputCardInput.self, forKey: .textInput) + self = .textInput(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .fileUpload(let value): + try container.encode(value, forKey: .fileUpload) + case .qPlugin(let value): + try container.encode(value, forKey: .qPlugin) + case .qQuery(let value): + try container.encode(value, forKey: .qQuery) + case .textInput(let value): + try container.encode(value, forKey: .textInput) + } + } + + public func validate(name: String) throws { + switch self { + case .fileUpload(let value): + try value.validate(name: "\(name).fileUpload") + case .qPlugin(let value): + try value.validate(name: "\(name).qPlugin") + case .qQuery(let value): + try value.validate(name: "\(name).qQuery") + case .textInput(let value): + try value.validate(name: "\(name).textInput") + } + } + + private enum CodingKeys: String, CodingKey { + case fileUpload = "fileUpload" + case qPlugin = "qPlugin" + case qQuery = "qQuery" + case textInput = "textInput" + } + } + + public enum DocumentAttributeValue: AWSEncodableShape & AWSDecodableShape, Sendable { + /// A date expressed as an ISO 8601 string. It's important for the time zone to be included in the ISO 8601 date-time format. For example, 2012-03-25T12:30:10+01:00 is the ISO 8601 date-time format for March 25th 2012 at 12:30PM (plus 10 seconds) in Central European Time. + case dateValue(Date) + /// A long integer value. + case longValue(Int64) + /// A list of strings. + case stringListValue([String]) + /// A string. + case stringValue(String) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .dateValue: + let value = try container.decode(Date.self, forKey: .dateValue) + self = .dateValue(value) + case .longValue: + let value = try container.decode(Int64.self, forKey: .longValue) + self = .longValue(value) + case .stringListValue: + let value = try container.decode([String].self, forKey: .stringListValue) + self = .stringListValue(value) + case .stringValue: + let value = try container.decode(String.self, forKey: .stringValue) + self = .stringValue(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .dateValue(let value): + try container.encode(value, forKey: .dateValue) + case .longValue(let value): + try container.encode(value, forKey: .longValue) + case .stringListValue(let value): + try container.encode(value, forKey: .stringListValue) + case .stringValue(let value): + try container.encode(value, forKey: .stringValue) + } + } + + public func validate(name: String) throws { + switch self { + case .stringListValue(let value): + try value.forEach { + try validate($0, name: "stringListValue[]", parent: name, max: 2048) + try validate($0, name: "stringListValue[]", parent: name, min: 1) + } + case .stringValue(let value): + try self.validate(value, name: "stringValue", parent: name, max: 2048) + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case dateValue = "dateValue" + case longValue = "longValue" + case stringListValue = "stringListValue" + case stringValue = "stringValue" + } + } + + public enum PredictQAppInputOptions: AWSEncodableShape, Sendable { + /// A conversation to use as input for generating the Q App definition. + case conversation([ConversationMessage]) + /// A problem statement to use as input for generating the Q App definition. + case problemStatement(String) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .conversation(let value): + try container.encode(value, forKey: .conversation) + case .problemStatement(let value): + try container.encode(value, forKey: .problemStatement) + } + } + + private enum CodingKeys: String, CodingKey { + case conversation = "conversation" + case problemStatement = "problemStatement" + } + } + + // MARK: Shapes + + public struct AppDefinition: AWSDecodableShape { + /// The version of the app definition schema or specification. + public let appDefinitionVersion: String + /// A flag indicating whether the Q App's definition can be edited by the user. + public let canEdit: Bool? + /// The cards that make up the Q App, such as text input, file upload, or query cards. + public let cards: [Card] + + public init(appDefinitionVersion: String, canEdit: Bool? = nil, cards: [Card]) { + self.appDefinitionVersion = appDefinitionVersion + self.canEdit = canEdit + self.cards = cards + } + + private enum CodingKeys: String, CodingKey { + case appDefinitionVersion = "appDefinitionVersion" + case canEdit = "canEdit" + case cards = "cards" + } + } + + public struct AppDefinitionInput: AWSEncodableShape & AWSDecodableShape { + /// The cards that make up the Q App definition. + public let cards: [CardInput] + /// The initial prompt displayed when the Q App is started. + public let initialPrompt: String? + + public init(cards: [CardInput], initialPrompt: String? = nil) { + self.cards = cards + self.initialPrompt = initialPrompt + } + + public func validate(name: String) throws { + try self.cards.forEach { + try $0.validate(name: "\(name).cards[]") + } + try self.validate(self.cards, name: "cards", parent: name, max: 20) + try self.validate(self.initialPrompt, name: "initialPrompt", parent: name, max: 10000) + } + + private enum CodingKeys: String, CodingKey { + case cards = "cards" + case initialPrompt = "initialPrompt" + } + } + + public struct AssociateLibraryItemReviewInput: AWSEncodableShape { + /// The unique identifier for the Amazon Q Business application environment instance. + public let instanceId: String + /// The unique identifier of the library item to associate the review with. + public let libraryItemId: String + + public init(instanceId: String, libraryItemId: String) { + self.instanceId = instanceId + self.libraryItemId = libraryItemId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.instanceId, key: "instance-id") + try container.encode(self.libraryItemId, forKey: .libraryItemId) + } + + public func validate(name: String) throws { + try self.validate(self.libraryItemId, name: "libraryItemId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case libraryItemId = "libraryItemId" + } + } + + public struct AssociateQAppWithUserInput: AWSEncodableShape { + /// The ID of the Amazon Q App to associate with the user. + public let appId: String + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + + public init(appId: String, instanceId: String) { + self.appId = appId + self.instanceId = instanceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.appId, forKey: .appId) + request.encodeHeader(self.instanceId, key: "instance-id") + } + + public func validate(name: String) throws { + try self.validate(self.appId, name: "appId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case appId = "appId" + } + } + + public final class AttributeFilter: AWSEncodableShape & AWSDecodableShape { + /// Performs a logical AND operation on all supplied filters. + public let andAllFilters: [AttributeFilter]? + /// Returns true when a document contains all the specified document attributes or metadata fields. Supported for the following document attribute value types: stringListValue. + public let containsAll: DocumentAttribute? + /// Returns true when a document contains any of the specified document attributes or metadata fields. Supported for the following document attribute value types: stringListValue. + public let containsAny: DocumentAttribute? + /// Performs an equals operation on two document attributes or metadata fields. Supported for the following document attribute value types: dateValue, longValue, stringListValue and stringValue. + public let equalsTo: DocumentAttribute? + /// Performs a greater than operation on two document attributes or metadata fields. Supported for the following document attribute value types: dateValue and longValue. + public let greaterThan: DocumentAttribute? + /// Performs a greater than or equals operation on two document attributes or metadata fields. Supported for the following document attribute value types: dateValue and longValue. + public let greaterThanOrEquals: DocumentAttribute? + /// Performs a less than operation on two document attributes or metadata fields. Supported for the following document attribute value types: dateValue and longValue. + public let lessThan: DocumentAttribute? + /// Performs a less than or equals operation on two document attributes or metadata fields.Supported for the following document attribute value type: dateValue and longValue. + public let lessThanOrEquals: DocumentAttribute? + /// Performs a logical NOT operation on all supplied filters. + public let notFilter: AttributeFilter? + /// Performs a logical OR operation on all supplied filters. + public let orAllFilters: [AttributeFilter]? + + public init(andAllFilters: [AttributeFilter]? = nil, containsAll: DocumentAttribute? = nil, containsAny: DocumentAttribute? = nil, equalsTo: DocumentAttribute? = nil, greaterThan: DocumentAttribute? = nil, greaterThanOrEquals: DocumentAttribute? = nil, lessThan: DocumentAttribute? = nil, lessThanOrEquals: DocumentAttribute? = nil, notFilter: AttributeFilter? = nil, orAllFilters: [AttributeFilter]? = nil) { + self.andAllFilters = andAllFilters + self.containsAll = containsAll + self.containsAny = containsAny + self.equalsTo = equalsTo + self.greaterThan = greaterThan + self.greaterThanOrEquals = greaterThanOrEquals + self.lessThan = lessThan + self.lessThanOrEquals = lessThanOrEquals + self.notFilter = notFilter + self.orAllFilters = orAllFilters + } + + public func validate(name: String) throws { + try self.andAllFilters?.forEach { + try $0.validate(name: "\(name).andAllFilters[]") + } + try self.containsAll?.validate(name: "\(name).containsAll") + try self.containsAny?.validate(name: "\(name).containsAny") + try self.equalsTo?.validate(name: "\(name).equalsTo") + try self.greaterThan?.validate(name: "\(name).greaterThan") + try self.greaterThanOrEquals?.validate(name: "\(name).greaterThanOrEquals") + try self.lessThan?.validate(name: "\(name).lessThan") + try self.lessThanOrEquals?.validate(name: "\(name).lessThanOrEquals") + try self.notFilter?.validate(name: "\(name).notFilter") + try self.orAllFilters?.forEach { + try $0.validate(name: "\(name).orAllFilters[]") + } + } + + private enum CodingKeys: String, CodingKey { + case andAllFilters = "andAllFilters" + case containsAll = "containsAll" + case containsAny = "containsAny" + case equalsTo = "equalsTo" + case greaterThan = "greaterThan" + case greaterThanOrEquals = "greaterThanOrEquals" + case lessThan = "lessThan" + case lessThanOrEquals = "lessThanOrEquals" + case notFilter = "notFilter" + case orAllFilters = "orAllFilters" + } + } + + public struct CardStatus: AWSDecodableShape { + /// The current state of the card. + public let currentState: ExecutionStatus + /// The current value or result associated with the card. + public let currentValue: String + + public init(currentState: ExecutionStatus, currentValue: String) { + self.currentState = currentState + self.currentValue = currentValue + } + + private enum CodingKeys: String, CodingKey { + case currentState = "currentState" + case currentValue = "currentValue" + } + } + + public struct CardValue: AWSEncodableShape { + /// The unique identifier of the card. + public let cardId: String + /// The value or result associated with the card. + public let value: String + + public init(cardId: String, value: String) { + self.cardId = cardId + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.cardId, name: "cardId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case cardId = "cardId" + case value = "value" + } + } + + public struct Category: AWSDecodableShape { + /// The unique identifier of the category. + public let id: String + /// The title or name of the category. + public let title: String + + public init(id: String, title: String) { + self.id = id + self.title = title + } + + private enum CodingKeys: String, CodingKey { + case id = "id" + case title = "title" + } + } + + public struct ConversationMessage: AWSEncodableShape { + /// The text content of the conversation message. + public let body: String + /// The type of the conversation message. + public let type: Sender + + public init(body: String, type: Sender) { + self.body = body + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case body = "body" + case type = "type" + } + } + + public struct CreateLibraryItemInput: AWSEncodableShape { + /// The unique identifier of the Amazon Q App to publish to the library. + public let appId: String + /// The version of the Amazon Q App to publish to the library. + public let appVersion: Int + /// The categories to associate with the library item for easier discovery. + public let categories: [String] + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + + public init(appId: String, appVersion: Int, categories: [String], instanceId: String) { + self.appId = appId + self.appVersion = appVersion + self.categories = categories + self.instanceId = instanceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.appId, forKey: .appId) + try container.encode(self.appVersion, forKey: .appVersion) + try container.encode(self.categories, forKey: .categories) + request.encodeHeader(self.instanceId, key: "instance-id") + } + + public func validate(name: String) throws { + try self.validate(self.appId, name: "appId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.appVersion, name: "appVersion", parent: name, max: 2147483647) + try self.validate(self.appVersion, name: "appVersion", parent: name, min: 0) + try self.categories.forEach { + try validate($0, name: "categories[]", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + try self.validate(self.categories, name: "categories", parent: name, max: 3) + } + + private enum CodingKeys: String, CodingKey { + case appId = "appId" + case appVersion = "appVersion" + case categories = "categories" + } + } + + public struct CreateLibraryItemOutput: AWSDecodableShape { + /// The date and time the library item was created. + @CustomCoding + public var createdAt: Date + /// The user who created the library item. + public let createdBy: String + /// The unique identifier of the new library item. + public let libraryItemId: String + /// The number of ratings the library item has received from users. + public let ratingCount: Int + /// The status of the new library item, such as "Published". + public let status: String + /// The date and time the library item was last updated. + @OptionalCustomCoding + public var updatedAt: Date? + /// The user who last updated the library item. + public let updatedBy: String? + + public init(createdAt: Date, createdBy: String, libraryItemId: String, ratingCount: Int, status: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.libraryItemId = libraryItemId + self.ratingCount = ratingCount + self.status = status + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case libraryItemId = "libraryItemId" + case ratingCount = "ratingCount" + case status = "status" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct CreateQAppInput: AWSEncodableShape { + /// The definition of the new Q App, specifying the cards and flow. + public let appDefinition: AppDefinitionInput + /// The description of the new Q App. + public let description: String? + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// Optional tags to associate with the new Q App. + public let tags: [String: String]? + /// The title of the new Q App. + public let title: String + + public init(appDefinition: AppDefinitionInput, description: String? = nil, instanceId: String, tags: [String: String]? = nil, title: String) { + self.appDefinition = appDefinition + self.description = description + self.instanceId = instanceId + self.tags = tags + self.title = title + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.appDefinition, forKey: .appDefinition) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodeHeader(self.instanceId, key: "instance-id") + try container.encodeIfPresent(self.tags, forKey: .tags) + try container.encode(self.title, forKey: .title) + } + + public func validate(name: String) throws { + try self.appDefinition.validate(name: "\(name).appDefinition") + try self.validate(self.description, name: "description", parent: name, max: 500) + try self.validate(self.title, name: "title", parent: name, max: 100) + } + + private enum CodingKeys: String, CodingKey { + case appDefinition = "appDefinition" + case description = "description" + case tags = "tags" + case title = "title" + } + } + + public struct CreateQAppOutput: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the new Q App. + public let appArn: String + /// The unique identifier of the new Q App. + public let appId: String + /// The version of the new Q App. + public let appVersion: Int + /// The date and time the Q App was created. + @CustomCoding + public var createdAt: Date + /// The user who created the Q App. + public let createdBy: String + /// The description of the new Q App. + public let description: String? + /// The initial prompt displayed when the Q App is started. + public let initialPrompt: String? + /// The capabilities required to run the Q App, such as file upload or third-party integrations. + public let requiredCapabilities: [AppRequiredCapability]? + /// The status of the new Q App, such as "Created". + public let status: AppStatus + /// The title of the new Q App. + public let title: String + /// The date and time the Q App was last updated. + @CustomCoding + public var updatedAt: Date + /// The user who last updated the Q App. + public let updatedBy: String + + public init(appArn: String, appId: String, appVersion: Int, createdAt: Date, createdBy: String, description: String? = nil, initialPrompt: String? = nil, requiredCapabilities: [AppRequiredCapability]? = nil, status: AppStatus, title: String, updatedAt: Date, updatedBy: String) { + self.appArn = appArn + self.appId = appId + self.appVersion = appVersion + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.initialPrompt = initialPrompt + self.requiredCapabilities = requiredCapabilities + self.status = status + self.title = title + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + case appId = "appId" + case appVersion = "appVersion" + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case initialPrompt = "initialPrompt" + case requiredCapabilities = "requiredCapabilities" + case status = "status" + case title = "title" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct DeleteLibraryItemInput: AWSEncodableShape { + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// The unique identifier of the library item to delete. + public let libraryItemId: String + + public init(instanceId: String, libraryItemId: String) { + self.instanceId = instanceId + self.libraryItemId = libraryItemId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.instanceId, key: "instance-id") + try container.encode(self.libraryItemId, forKey: .libraryItemId) + } + + public func validate(name: String) throws { + try self.validate(self.libraryItemId, name: "libraryItemId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case libraryItemId = "libraryItemId" + } + } + + public struct DeleteQAppInput: AWSEncodableShape { + /// The unique identifier of the Q App to delete. + public let appId: String + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + + public init(appId: String, instanceId: String) { + self.appId = appId + self.instanceId = instanceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.appId, forKey: .appId) + request.encodeHeader(self.instanceId, key: "instance-id") + } + + public func validate(name: String) throws { + try self.validate(self.appId, name: "appId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case appId = "appId" + } + } + + public struct DisassociateLibraryItemReviewInput: AWSEncodableShape { + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// The unique identifier of the library item to remove the review from. + public let libraryItemId: String + + public init(instanceId: String, libraryItemId: String) { + self.instanceId = instanceId + self.libraryItemId = libraryItemId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.instanceId, key: "instance-id") + try container.encode(self.libraryItemId, forKey: .libraryItemId) + } + + public func validate(name: String) throws { + try self.validate(self.libraryItemId, name: "libraryItemId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case libraryItemId = "libraryItemId" + } + } + + public struct DisassociateQAppFromUserInput: AWSEncodableShape { + /// The unique identifier of the Q App to disassociate from the user. + public let appId: String + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + + public init(appId: String, instanceId: String) { + self.appId = appId + self.instanceId = instanceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.appId, forKey: .appId) + request.encodeHeader(self.instanceId, key: "instance-id") + } + + public func validate(name: String) throws { + try self.validate(self.appId, name: "appId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case appId = "appId" + } + } + + public struct DocumentAttribute: AWSEncodableShape & AWSDecodableShape { + /// The identifier for the attribute. + public let name: String + /// The value of the attribute. + public let value: DocumentAttributeValue + + public init(name: String, value: DocumentAttributeValue) { + self.name = name + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 200) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_][a-zA-Z0-9_-]*$") + try self.value.validate(name: "\(name).value") + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case value = "value" + } + } + + public struct FileUploadCard: AWSDecodableShape { + /// A flag indicating if the user can override the default file for the upload card. + public let allowOverride: Bool? + /// Any dependencies or requirements for the file upload card. + public let dependencies: [String] + /// The unique identifier of the file associated with the card. + public let fileId: String? + /// The name of the file being uploaded. + public let filename: String? + /// The unique identifier of the file upload card. + public let id: String + /// The title of the file upload card. + public let title: String + /// The type of the card. + public let type: CardType + + public init(allowOverride: Bool? = nil, dependencies: [String], fileId: String? = nil, filename: String? = nil, id: String, title: String, type: CardType) { + self.allowOverride = allowOverride + self.dependencies = dependencies + self.fileId = fileId + self.filename = filename + self.id = id + self.title = title + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case allowOverride = "allowOverride" + case dependencies = "dependencies" + case fileId = "fileId" + case filename = "filename" + case id = "id" + case title = "title" + case type = "type" + } + } + + public struct FileUploadCardInput: AWSEncodableShape & AWSDecodableShape { + /// A flag indicating if the user can override the default file for the upload card. + public let allowOverride: Bool? + /// The identifier of a pre-uploaded file associated with the card. + public let fileId: String? + /// The default filename to use for the file upload card. + public let filename: String? + /// The unique identifier of the file upload card. + public let id: String + /// The title or label of the file upload card. + public let title: String + /// The type of the card. + public let type: CardType + + public init(allowOverride: Bool? = nil, fileId: String? = nil, filename: String? = nil, id: String, title: String, type: CardType) { + self.allowOverride = allowOverride + self.fileId = fileId + self.filename = filename + self.id = id + self.title = title + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.fileId, name: "fileId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.filename, name: "filename", parent: name, max: 100) + try self.validate(self.id, name: "id", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.title, name: "title", parent: name, max: 100) + } + + private enum CodingKeys: String, CodingKey { + case allowOverride = "allowOverride" + case fileId = "fileId" + case filename = "filename" + case id = "id" + case title = "title" + case type = "type" + } + } + + public struct GetLibraryItemInput: AWSEncodableShape { + /// The unique identifier of the Amazon Q App associated with the library item. + public let appId: String? + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// The unique identifier of the library item to retrieve. + public let libraryItemId: String + + public init(appId: String? = nil, instanceId: String, libraryItemId: String) { + self.appId = appId + self.instanceId = instanceId + self.libraryItemId = libraryItemId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.appId, key: "appId") + request.encodeHeader(self.instanceId, key: "instance-id") + request.encodeQuery(self.libraryItemId, key: "libraryItemId") + } + + public func validate(name: String) throws { + try self.validate(self.appId, name: "appId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.libraryItemId, name: "libraryItemId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetLibraryItemOutput: AWSDecodableShape { + /// The unique identifier of the Q App associated with the library item. + public let appId: String + /// The version of the Q App associated with the library item. + public let appVersion: Int + /// The categories associated with the library item for discovery. + public let categories: [Category] + /// The date and time the library item was created. + @CustomCoding + public var createdAt: Date + /// The user who created the library item. + public let createdBy: String + /// Whether the current user has rated the library item. + public let isRatedByUser: Bool? + /// The unique identifier of the library item. + public let libraryItemId: String + /// The number of ratings the library item has received from users. + public let ratingCount: Int + /// The status of the library item, such as "Published". + public let status: String + /// The date and time the library item was last updated. + @OptionalCustomCoding + public var updatedAt: Date? + /// The user who last updated the library item. + public let updatedBy: String? + /// The number of users who have associated the Q App with their account. + public let userCount: Int? + + public init(appId: String, appVersion: Int, categories: [Category], createdAt: Date, createdBy: String, isRatedByUser: Bool? = nil, libraryItemId: String, ratingCount: Int, status: String, updatedAt: Date? = nil, updatedBy: String? = nil, userCount: Int? = nil) { + self.appId = appId + self.appVersion = appVersion + self.categories = categories + self.createdAt = createdAt + self.createdBy = createdBy + self.isRatedByUser = isRatedByUser + self.libraryItemId = libraryItemId + self.ratingCount = ratingCount + self.status = status + self.updatedAt = updatedAt + self.updatedBy = updatedBy + self.userCount = userCount + } + + private enum CodingKeys: String, CodingKey { + case appId = "appId" + case appVersion = "appVersion" + case categories = "categories" + case createdAt = "createdAt" + case createdBy = "createdBy" + case isRatedByUser = "isRatedByUser" + case libraryItemId = "libraryItemId" + case ratingCount = "ratingCount" + case status = "status" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + case userCount = "userCount" + } + } + + public struct GetQAppInput: AWSEncodableShape { + /// The unique identifier of the Q App to retrieve. + public let appId: String + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + + public init(appId: String, instanceId: String) { + self.appId = appId + self.instanceId = instanceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.appId, key: "appId") + request.encodeHeader(self.instanceId, key: "instance-id") + } + + public func validate(name: String) throws { + try self.validate(self.appId, name: "appId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetQAppOutput: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Q App. + public let appArn: String + /// The full definition of the Q App, specifying the cards and flow. + public let appDefinition: AppDefinition + /// The unique identifier of the Q App. + public let appId: String + /// The version of the Q App. + public let appVersion: Int + /// The date and time the Q App was created. + @CustomCoding + public var createdAt: Date + /// The user who created the Q App. + public let createdBy: String + /// The description of the Q App. + public let description: String? + /// The initial prompt displayed when the Q App is started. + public let initialPrompt: String? + /// The capabilities required to run the Q App, such as file upload or third-party integrations. + public let requiredCapabilities: [AppRequiredCapability]? + /// The status of the Q App. + public let status: AppStatus + /// The title of the Q App. + public let title: String + /// The date and time the Q App was last updated. + @CustomCoding + public var updatedAt: Date + /// The user who last updated the Q App. + public let updatedBy: String + + public init(appArn: String, appDefinition: AppDefinition, appId: String, appVersion: Int, createdAt: Date, createdBy: String, description: String? = nil, initialPrompt: String? = nil, requiredCapabilities: [AppRequiredCapability]? = nil, status: AppStatus, title: String, updatedAt: Date, updatedBy: String) { + self.appArn = appArn + self.appDefinition = appDefinition + self.appId = appId + self.appVersion = appVersion + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.initialPrompt = initialPrompt + self.requiredCapabilities = requiredCapabilities + self.status = status + self.title = title + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + case appDefinition = "appDefinition" + case appId = "appId" + case appVersion = "appVersion" + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case initialPrompt = "initialPrompt" + case requiredCapabilities = "requiredCapabilities" + case status = "status" + case title = "title" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GetQAppSessionInput: AWSEncodableShape { + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// The unique identifier of the Q App session to retrieve. + public let sessionId: String + + public init(instanceId: String, sessionId: String) { + self.instanceId = instanceId + self.sessionId = sessionId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.instanceId, key: "instance-id") + request.encodeQuery(self.sessionId, key: "sessionId") + } + + public func validate(name: String) throws { + try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetQAppSessionOutput: AWSDecodableShape { + /// The current status for each card in the Q App session. + public let cardStatus: [String: CardStatus] + /// The Amazon Resource Name (ARN) of the Q App session. + public let sessionArn: String + /// The unique identifier of the Q App session. + public let sessionId: String + /// The current status of the Q App session. + public let status: ExecutionStatus + + public init(cardStatus: [String: CardStatus], sessionArn: String, sessionId: String, status: ExecutionStatus) { + self.cardStatus = cardStatus + self.sessionArn = sessionArn + self.sessionId = sessionId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case cardStatus = "cardStatus" + case sessionArn = "sessionArn" + case sessionId = "sessionId" + case status = "status" + } + } + + public struct ImportDocumentInput: AWSEncodableShape { + /// The unique identifier of the Q App the file is associated with. + public let appId: String + /// The unique identifier of the card the file is associated with, if applicable. + public let cardId: String + /// The base64-encoded contents of the file to upload. + public let fileContentsBase64: String + /// The name of the file being uploaded. + public let fileName: String + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// Whether the file is associated with an Q App definition or a specific Q App session. + public let scope: DocumentScope + /// The unique identifier of the Q App session the file is associated with, if applicable. + public let sessionId: String? + + public init(appId: String, cardId: String, fileContentsBase64: String, fileName: String, instanceId: String, scope: DocumentScope, sessionId: String? = nil) { + self.appId = appId + self.cardId = cardId + self.fileContentsBase64 = fileContentsBase64 + self.fileName = fileName + self.instanceId = instanceId + self.scope = scope + self.sessionId = sessionId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.appId, forKey: .appId) + try container.encode(self.cardId, forKey: .cardId) + try container.encode(self.fileContentsBase64, forKey: .fileContentsBase64) + try container.encode(self.fileName, forKey: .fileName) + request.encodeHeader(self.instanceId, key: "instance-id") + try container.encode(self.scope, forKey: .scope) + try container.encodeIfPresent(self.sessionId, forKey: .sessionId) + } + + public func validate(name: String) throws { + try self.validate(self.appId, name: "appId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.cardId, name: "cardId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.fileName, name: "fileName", parent: name, max: 100) + try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case appId = "appId" + case cardId = "cardId" + case fileContentsBase64 = "fileContentsBase64" + case fileName = "fileName" + case scope = "scope" + case sessionId = "sessionId" + } + } + + public struct ImportDocumentOutput: AWSDecodableShape { + /// The unique identifier assigned to the uploaded file. + public let fileId: String? + + public init(fileId: String? = nil) { + self.fileId = fileId + } + + private enum CodingKeys: String, CodingKey { + case fileId = "fileId" + } + } + + public struct LibraryItemMember: AWSDecodableShape { + /// The unique identifier of the Q App associated with the library item. + public let appId: String + /// The version of the Q App associated with the library item. + public let appVersion: Int + /// The categories associated with the library item. + public let categories: [Category] + /// The date and time the library item was created. + @CustomCoding + public var createdAt: Date + /// The user who created the library item. + public let createdBy: String + /// Whether the current user has rated the library item. + public let isRatedByUser: Bool? + /// The unique identifier of the library item. + public let libraryItemId: String + /// The number of ratings the library item has received. + public let ratingCount: Int + /// The status of the library item. + public let status: String + /// The date and time the library item was last updated. + @OptionalCustomCoding + public var updatedAt: Date? + /// The user who last updated the library item. + public let updatedBy: String? + /// The number of users who have the associated Q App. + public let userCount: Int? + + public init(appId: String, appVersion: Int, categories: [Category], createdAt: Date, createdBy: String, isRatedByUser: Bool? = nil, libraryItemId: String, ratingCount: Int, status: String, updatedAt: Date? = nil, updatedBy: String? = nil, userCount: Int? = nil) { + self.appId = appId + self.appVersion = appVersion + self.categories = categories + self.createdAt = createdAt + self.createdBy = createdBy + self.isRatedByUser = isRatedByUser + self.libraryItemId = libraryItemId + self.ratingCount = ratingCount + self.status = status + self.updatedAt = updatedAt + self.updatedBy = updatedBy + self.userCount = userCount + } + + private enum CodingKeys: String, CodingKey { + case appId = "appId" + case appVersion = "appVersion" + case categories = "categories" + case createdAt = "createdAt" + case createdBy = "createdBy" + case isRatedByUser = "isRatedByUser" + case libraryItemId = "libraryItemId" + case ratingCount = "ratingCount" + case status = "status" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + case userCount = "userCount" + } + } + + public struct ListLibraryItemsInput: AWSEncodableShape { + /// Optional category to filter the library items by. + public let categoryId: String? + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// The maximum number of library items to return in the response. + public let limit: Int? + /// The token to request the next page of results. + public let nextToken: String? + + public init(categoryId: String? = nil, instanceId: String, limit: Int? = nil, nextToken: String? = nil) { + self.categoryId = categoryId + self.instanceId = instanceId + self.limit = limit + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.categoryId, key: "categoryId") + request.encodeHeader(self.instanceId, key: "instance-id") + request.encodeQuery(self.limit, key: "limit") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.categoryId, name: "categoryId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.limit, name: "limit", parent: name, max: 100) + try self.validate(self.limit, name: "limit", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 300) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListLibraryItemsOutput: AWSDecodableShape { + /// The list of library items meeting the request criteria. + public let libraryItems: [LibraryItemMember]? + /// The token to use to request the next page of results. + public let nextToken: String? + + public init(libraryItems: [LibraryItemMember]? = nil, nextToken: String? = nil) { + self.libraryItems = libraryItems + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case libraryItems = "libraryItems" + case nextToken = "nextToken" + } + } + + public struct ListQAppsInput: AWSEncodableShape { + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// The maximum number of Q Apps to return in the response. + public let limit: Int? + /// The token to request the next page of results. + public let nextToken: String? + + public init(instanceId: String, limit: Int? = nil, nextToken: String? = nil) { + self.instanceId = instanceId + self.limit = limit + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.instanceId, key: "instance-id") + request.encodeQuery(self.limit, key: "limit") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.limit, name: "limit", parent: name, max: 100) + try self.validate(self.limit, name: "limit", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 300) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListQAppsOutput: AWSDecodableShape { + /// The list of Amazon Q Apps meeting the request criteria. + public let apps: [UserAppItem] + /// The token to use to request the next page of results. + public let nextToken: String? + + public init(apps: [UserAppItem], nextToken: String? = nil) { + self.apps = apps + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case apps = "apps" + case nextToken = "nextToken" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource whose tags should be listed. + public let resourceARN: String + + public init(resourceARN: String) { + self.resourceARN = resourceARN + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceARN, key: "resourceARN") + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// The list of tags that are assigned to the resource. + public let tags: [String: String]? + + public init(tags: [String: String]? = nil) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct PredictAppDefinition: AWSDecodableShape { + /// The definition specifying the cards and flow of the generated Q App. + public let appDefinition: AppDefinitionInput + /// The description of the generated Q App definition. + public let description: String? + /// The title of the generated Q App definition. + public let title: String + + public init(appDefinition: AppDefinitionInput, description: String? = nil, title: String) { + self.appDefinition = appDefinition + self.description = description + self.title = title + } + + private enum CodingKeys: String, CodingKey { + case appDefinition = "appDefinition" + case description = "description" + case title = "title" + } + } + + public struct PredictQAppInput: AWSEncodableShape { + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// The input to generate the Q App definition from, either a conversation or problem statement. + public let options: PredictQAppInputOptions? + + public init(instanceId: String, options: PredictQAppInputOptions? = nil) { + self.instanceId = instanceId + self.options = options + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.instanceId, key: "instance-id") + try container.encodeIfPresent(self.options, forKey: .options) + } + + private enum CodingKeys: String, CodingKey { + case options = "options" + } + } + + public struct PredictQAppOutput: AWSDecodableShape { + /// The generated Q App definition. + public let app: PredictAppDefinition + /// The problem statement extracted from the input conversation, if provided. + public let problemStatement: String + + public init(app: PredictAppDefinition, problemStatement: String) { + self.app = app + self.problemStatement = problemStatement + } + + private enum CodingKeys: String, CodingKey { + case app = "app" + case problemStatement = "problemStatement" + } + } + + public struct QPluginCard: AWSDecodableShape { + /// Any dependencies or requirements for the plugin card. + public let dependencies: [String] + /// The unique identifier of the plugin card. + public let id: String + /// The unique identifier of the plugin used by the card. + public let pluginId: String + /// The type or category of the plugin used by the card. + public let pluginType: PluginType + /// The prompt or instructions displayed for the plugin card. + public let prompt: String + /// The title or label of the plugin card. + public let title: String + /// The type of the card. + public let type: CardType + + public init(dependencies: [String], id: String, pluginId: String, pluginType: PluginType, prompt: String, title: String, type: CardType) { + self.dependencies = dependencies + self.id = id + self.pluginId = pluginId + self.pluginType = pluginType + self.prompt = prompt + self.title = title + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case dependencies = "dependencies" + case id = "id" + case pluginId = "pluginId" + case pluginType = "pluginType" + case prompt = "prompt" + case title = "title" + case type = "type" + } + } + + public struct QPluginCardInput: AWSEncodableShape & AWSDecodableShape { + /// The unique identifier of the plugin card. + public let id: String + /// The unique identifier of the plugin used by the card. + public let pluginId: String + /// The prompt or instructions displayed for the plugin card. + public let prompt: String + /// The title or label of the plugin card. + public let title: String + /// The type of the card. + public let type: CardType + + public init(id: String, pluginId: String, prompt: String, title: String, type: CardType) { + self.id = id + self.pluginId = pluginId + self.prompt = prompt + self.title = title + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.id, name: "id", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.pluginId, name: "pluginId", parent: name, max: 36) + try self.validate(self.pluginId, name: "pluginId", parent: name, min: 36) + try self.validate(self.prompt, name: "prompt", parent: name, max: 7000) + try self.validate(self.title, name: "title", parent: name, max: 100) + } + + private enum CodingKeys: String, CodingKey { + case id = "id" + case pluginId = "pluginId" + case prompt = "prompt" + case title = "title" + case type = "type" + } + } + + public struct QQueryCard: AWSDecodableShape { + /// The Amazon Q Business filters applied in this query card when resolving data sources + public let attributeFilter: AttributeFilter? + /// Any dependencies or requirements for the query card. + public let dependencies: [String] + /// The unique identifier of the query card. + public let id: String + /// The source or type of output generated by the query card. + public let outputSource: CardOutputSource + /// The prompt or instructions displayed for the query card. + public let prompt: String + /// The title or label of the query card. + public let title: String + /// The type of the card. + public let type: CardType + + public init(attributeFilter: AttributeFilter? = nil, dependencies: [String], id: String, outputSource: CardOutputSource, prompt: String, title: String, type: CardType) { + self.attributeFilter = attributeFilter + self.dependencies = dependencies + self.id = id + self.outputSource = outputSource + self.prompt = prompt + self.title = title + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case attributeFilter = "attributeFilter" + case dependencies = "dependencies" + case id = "id" + case outputSource = "outputSource" + case prompt = "prompt" + case title = "title" + case type = "type" + } + } + + public struct QQueryCardInput: AWSEncodableShape & AWSDecodableShape { + /// Turns on filtering of responses based on document attributes or metadata fields. + public let attributeFilter: AttributeFilter? + /// The unique identifier of the query card. + public let id: String + /// The source or type of output to generate for the query card. + public let outputSource: CardOutputSource? + /// The prompt or instructions displayed for the query card. + public let prompt: String + /// The title or label of the query card. + public let title: String + /// The type of the card. + public let type: CardType + + public init(attributeFilter: AttributeFilter? = nil, id: String, outputSource: CardOutputSource? = nil, prompt: String, title: String, type: CardType) { + self.attributeFilter = attributeFilter + self.id = id + self.outputSource = outputSource + self.prompt = prompt + self.title = title + self.type = type + } + + public func validate(name: String) throws { + try self.attributeFilter?.validate(name: "\(name).attributeFilter") + try self.validate(self.id, name: "id", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.prompt, name: "prompt", parent: name, max: 7000) + try self.validate(self.title, name: "title", parent: name, max: 100) + } + + private enum CodingKeys: String, CodingKey { + case attributeFilter = "attributeFilter" + case id = "id" + case outputSource = "outputSource" + case prompt = "prompt" + case title = "title" + case type = "type" + } + } + + public struct StartQAppSessionInput: AWSEncodableShape { + /// The unique identifier of the Q App to start a session for. + public let appId: String + /// The version of the Q App to use for the session. + public let appVersion: Int + /// Optional initial input values to provide for the Q App session. + public let initialValues: [CardValue]? + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// Optional tags to associate with the new Q App session. + public let tags: [String: String]? + + public init(appId: String, appVersion: Int, initialValues: [CardValue]? = nil, instanceId: String, tags: [String: String]? = nil) { + self.appId = appId + self.appVersion = appVersion + self.initialValues = initialValues + self.instanceId = instanceId + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.appId, forKey: .appId) + try container.encode(self.appVersion, forKey: .appVersion) + try container.encodeIfPresent(self.initialValues, forKey: .initialValues) + request.encodeHeader(self.instanceId, key: "instance-id") + try container.encodeIfPresent(self.tags, forKey: .tags) + } + + public func validate(name: String) throws { + try self.validate(self.appId, name: "appId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.appVersion, name: "appVersion", parent: name, max: 2147483647) + try self.validate(self.appVersion, name: "appVersion", parent: name, min: 0) + try self.initialValues?.forEach { + try $0.validate(name: "\(name).initialValues[]") + } + try self.validate(self.initialValues, name: "initialValues", parent: name, max: 20) + } + + private enum CodingKeys: String, CodingKey { + case appId = "appId" + case appVersion = "appVersion" + case initialValues = "initialValues" + case tags = "tags" + } + } + + public struct StartQAppSessionOutput: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the new Q App session. + public let sessionArn: String + /// The unique identifier of the new Q App session. + public let sessionId: String + + public init(sessionArn: String, sessionId: String) { + self.sessionArn = sessionArn + self.sessionId = sessionId + } + + private enum CodingKeys: String, CodingKey { + case sessionArn = "sessionArn" + case sessionId = "sessionId" + } + } + + public struct StopQAppSessionInput: AWSEncodableShape { + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// The unique identifier of the Q App session to stop. + public let sessionId: String + + public init(instanceId: String, sessionId: String) { + self.instanceId = instanceId + self.sessionId = sessionId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.instanceId, key: "instance-id") + try container.encode(self.sessionId, forKey: .sessionId) + } + + public func validate(name: String) throws { + try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case sessionId = "sessionId" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource to tag. + public let resourceARN: String + /// The tags to associate with the resource. + public let tags: [String: String] + + public init(resourceARN: String, tags: [String: String]) { + self.resourceARN = resourceARN + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceARN, key: "resourceARN") + try container.encode(self.tags, forKey: .tags) + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.tags.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct TextInputCard: AWSDecodableShape { + /// The default value to pre-populate in the text input field. + public let defaultValue: String? + /// Any dependencies or requirements for the text input card. + public let dependencies: [String] + /// The unique identifier of the text input card. + public let id: String + /// The placeholder text to display in the text input field. + public let placeholder: String? + /// The title or label of the text input card. + public let title: String + /// The type of the card. + public let type: CardType + + public init(defaultValue: String? = nil, dependencies: [String], id: String, placeholder: String? = nil, title: String, type: CardType) { + self.defaultValue = defaultValue + self.dependencies = dependencies + self.id = id + self.placeholder = placeholder + self.title = title + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case defaultValue = "defaultValue" + case dependencies = "dependencies" + case id = "id" + case placeholder = "placeholder" + case title = "title" + case type = "type" + } + } + + public struct TextInputCardInput: AWSEncodableShape & AWSDecodableShape { + /// The default value to pre-populate in the text input field. + public let defaultValue: String? + /// The unique identifier of the text input card. + public let id: String + /// The placeholder text to display in the text input field. + public let placeholder: String? + /// The title or label of the text input card. + public let title: String + /// The type of the card. + public let type: CardType + + public init(defaultValue: String? = nil, id: String, placeholder: String? = nil, title: String, type: CardType) { + self.defaultValue = defaultValue + self.id = id + self.placeholder = placeholder + self.title = title + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.defaultValue, name: "defaultValue", parent: name, max: 500) + try self.validate(self.id, name: "id", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.placeholder, name: "placeholder", parent: name, max: 500) + try self.validate(self.title, name: "title", parent: name, max: 100) + } + + private enum CodingKeys: String, CodingKey { + case defaultValue = "defaultValue" + case id = "id" + case placeholder = "placeholder" + case title = "title" + case type = "type" + } + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource to disassociate the tag from. + public let resourceARN: String + /// The keys of the tags to disassociate from the resource. + public let tagKeys: [String] + + public init(resourceARN: String, tagKeys: [String]) { + self.resourceARN = resourceARN + self.tagKeys = tagKeys + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceARN, key: "resourceARN") + request.encodeQuery(self.tagKeys, key: "tagKeys") + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.tagKeys.forEach { + try validate($0, name: "tagKeys[]", parent: name, max: 128) + try validate($0, name: "tagKeys[]", parent: name, min: 1) + } + try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 200) + } + + private enum CodingKeys: CodingKey {} + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateLibraryItemInput: AWSEncodableShape { + /// The new categories to associate with the library item. + public let categories: [String]? + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// The unique identifier of the library item to update. + public let libraryItemId: String + /// The new status to set for the library item, such as "Published" or "Hidden". + public let status: LibraryItemStatus? + + public init(categories: [String]? = nil, instanceId: String, libraryItemId: String, status: LibraryItemStatus? = nil) { + self.categories = categories + self.instanceId = instanceId + self.libraryItemId = libraryItemId + self.status = status + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.categories, forKey: .categories) + request.encodeHeader(self.instanceId, key: "instance-id") + try container.encode(self.libraryItemId, forKey: .libraryItemId) + try container.encodeIfPresent(self.status, forKey: .status) + } + + public func validate(name: String) throws { + try self.categories?.forEach { + try validate($0, name: "categories[]", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + try self.validate(self.categories, name: "categories", parent: name, max: 3) + try self.validate(self.libraryItemId, name: "libraryItemId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case categories = "categories" + case libraryItemId = "libraryItemId" + case status = "status" + } + } + + public struct UpdateLibraryItemOutput: AWSDecodableShape { + /// The unique identifier of the Q App associated with the library item. + public let appId: String + /// The version of the Q App associated with the library item. + public let appVersion: Int + /// The categories associated with the updated library item. + public let categories: [Category] + /// The date and time the library item was originally created. + @CustomCoding + public var createdAt: Date + /// The user who originally created the library item. + public let createdBy: String + /// Whether the current user has rated the library item. + public let isRatedByUser: Bool? + /// The unique identifier of the updated library item. + public let libraryItemId: String + /// The number of ratings the library item has received. + public let ratingCount: Int + /// The new status of the updated library item. + public let status: String + /// The date and time the library item was last updated. + @OptionalCustomCoding + public var updatedAt: Date? + /// The user who last updated the library item. + public let updatedBy: String? + /// The number of users who have the associated Q App. + public let userCount: Int? + + public init(appId: String, appVersion: Int, categories: [Category], createdAt: Date, createdBy: String, isRatedByUser: Bool? = nil, libraryItemId: String, ratingCount: Int, status: String, updatedAt: Date? = nil, updatedBy: String? = nil, userCount: Int? = nil) { + self.appId = appId + self.appVersion = appVersion + self.categories = categories + self.createdAt = createdAt + self.createdBy = createdBy + self.isRatedByUser = isRatedByUser + self.libraryItemId = libraryItemId + self.ratingCount = ratingCount + self.status = status + self.updatedAt = updatedAt + self.updatedBy = updatedBy + self.userCount = userCount + } + + private enum CodingKeys: String, CodingKey { + case appId = "appId" + case appVersion = "appVersion" + case categories = "categories" + case createdAt = "createdAt" + case createdBy = "createdBy" + case isRatedByUser = "isRatedByUser" + case libraryItemId = "libraryItemId" + case ratingCount = "ratingCount" + case status = "status" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + case userCount = "userCount" + } + } + + public struct UpdateQAppInput: AWSEncodableShape { + /// The new definition specifying the cards and flow for the Q App. + public let appDefinition: AppDefinitionInput? + /// The unique identifier of the Q App to update. + public let appId: String + /// The new description for the Q App. + public let description: String? + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// The new title for the Q App. + public let title: String? + + public init(appDefinition: AppDefinitionInput? = nil, appId: String, description: String? = nil, instanceId: String, title: String? = nil) { + self.appDefinition = appDefinition + self.appId = appId + self.description = description + self.instanceId = instanceId + self.title = title + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.appDefinition, forKey: .appDefinition) + try container.encode(self.appId, forKey: .appId) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodeHeader(self.instanceId, key: "instance-id") + try container.encodeIfPresent(self.title, forKey: .title) + } + + public func validate(name: String) throws { + try self.appDefinition?.validate(name: "\(name).appDefinition") + try self.validate(self.appId, name: "appId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.validate(self.description, name: "description", parent: name, max: 500) + try self.validate(self.title, name: "title", parent: name, max: 100) + } + + private enum CodingKeys: String, CodingKey { + case appDefinition = "appDefinition" + case appId = "appId" + case description = "description" + case title = "title" + } + } + + public struct UpdateQAppOutput: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the updated Q App. + public let appArn: String + /// The unique identifier of the updated Q App. + public let appId: String + /// The new version of the updated Q App. + public let appVersion: Int + /// The date and time the Q App was originally created. + @CustomCoding + public var createdAt: Date + /// The user who originally created the Q App. + public let createdBy: String + /// The new description of the updated Q App. + public let description: String? + /// The initial prompt for the updated Q App. + public let initialPrompt: String? + /// The capabilities required for the updated Q App. + public let requiredCapabilities: [AppRequiredCapability]? + /// The status of the updated Q App. + public let status: AppStatus + /// The new title of the updated Q App. + public let title: String + /// The date and time the Q App was last updated. + @CustomCoding + public var updatedAt: Date + /// The user who last updated the Q App. + public let updatedBy: String + + public init(appArn: String, appId: String, appVersion: Int, createdAt: Date, createdBy: String, description: String? = nil, initialPrompt: String? = nil, requiredCapabilities: [AppRequiredCapability]? = nil, status: AppStatus, title: String, updatedAt: Date, updatedBy: String) { + self.appArn = appArn + self.appId = appId + self.appVersion = appVersion + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.initialPrompt = initialPrompt + self.requiredCapabilities = requiredCapabilities + self.status = status + self.title = title + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + case appId = "appId" + case appVersion = "appVersion" + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case initialPrompt = "initialPrompt" + case requiredCapabilities = "requiredCapabilities" + case status = "status" + case title = "title" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct UpdateQAppSessionInput: AWSEncodableShape { + /// The unique identifier of the Amazon Q Business application environment instance. + public let instanceId: String + /// The unique identifier of the Q App session to provide input for. + public let sessionId: String + /// The input values to provide for the current state of the Q App session. + public let values: [CardValue]? + + public init(instanceId: String, sessionId: String, values: [CardValue]? = nil) { + self.instanceId = instanceId + self.sessionId = sessionId + self.values = values + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.instanceId, key: "instance-id") + try container.encode(self.sessionId, forKey: .sessionId) + try container.encodeIfPresent(self.values, forKey: .values) + } + + public func validate(name: String) throws { + try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$") + try self.values?.forEach { + try $0.validate(name: "\(name).values[]") + } + try self.validate(self.values, name: "values", parent: name, max: 20) + } + + private enum CodingKeys: String, CodingKey { + case sessionId = "sessionId" + case values = "values" + } + } + + public struct UpdateQAppSessionOutput: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the updated Q App session. + public let sessionArn: String + /// The unique identifier of the updated Q App session. + public let sessionId: String + + public init(sessionArn: String, sessionId: String) { + self.sessionArn = sessionArn + self.sessionId = sessionId + } + + private enum CodingKeys: String, CodingKey { + case sessionArn = "sessionArn" + case sessionId = "sessionId" + } + } + + public struct UserAppItem: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Q App. + public let appArn: String + /// The unique identifier of the Q App. + public let appId: String + /// A flag indicating whether the user can edit the Q App. + public let canEdit: Bool? + /// The date and time the user's association with the Q App was created. + @CustomCoding + public var createdAt: Date + /// The description of the Q App. + public let description: String? + /// The status of the user's association with the Q App. + public let status: String? + /// The title of the Q App. + public let title: String + + public init(appArn: String, appId: String, canEdit: Bool? = nil, createdAt: Date, description: String? = nil, status: String? = nil, title: String) { + self.appArn = appArn + self.appId = appId + self.canEdit = canEdit + self.createdAt = createdAt + self.description = description + self.status = status + self.title = title + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + case appId = "appId" + case canEdit = "canEdit" + case createdAt = "createdAt" + case description = "description" + case status = "status" + case title = "title" + } + } +} + +// MARK: - Errors + +/// Error enum for QApps +public struct QAppsErrorType: AWSErrorType { + enum Code: String { + case accessDeniedException = "AccessDeniedException" + case conflictException = "ConflictException" + case contentTooLargeException = "ContentTooLargeException" + case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" + case serviceQuotaExceededException = "ServiceQuotaExceededException" + case throttlingException = "ThrottlingException" + case unauthorizedException = "UnauthorizedException" + case validationException = "ValidationException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize QApps + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// The client is not authorized to perform the requested operation. + public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// The requested operation could not be completed due to a conflict with the current state of the resource. + public static var conflictException: Self { .init(.conflictException) } + /// The requested operation could not be completed because the content exceeds the maximum allowed size. + public static var contentTooLargeException: Self { .init(.contentTooLargeException) } + /// An internal service error occurred while processing the request. + public static var internalServerException: Self { .init(.internalServerException) } + /// The requested resource could not be found. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// The requested operation could not be completed because it would exceed the service's quota or limit. + public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } + /// The requested operation could not be completed because too many requests were sent at once. Wait a bit and try again later. + public static var throttlingException: Self { .init(.throttlingException) } + /// The client is not authenticated or authorized to perform the requested operation. + public static var unauthorizedException: Self { .init(.unauthorizedException) } + /// The input failed to satisfy the constraints specified by the service. + public static var validationException: Self { .init(.validationException) } +} + +extension QAppsErrorType: Equatable { + public static func == (lhs: QAppsErrorType, rhs: QAppsErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension QAppsErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/QuickSight/QuickSight_api.swift b/Sources/Soto/Services/QuickSight/QuickSight_api.swift index b351db3a35..4db96a8947 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_api.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_api.swift @@ -73,6 +73,32 @@ public struct QuickSight: AWSService { // MARK: API Calls + /// Creates new reviewed answers for a Q Topic. + @Sendable + public func batchCreateTopicReviewedAnswer(_ input: BatchCreateTopicReviewedAnswerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchCreateTopicReviewedAnswerResponse { + return try await self.client.execute( + operation: "BatchCreateTopicReviewedAnswer", + path: "/accounts/{AwsAccountId}/topics/{TopicId}/batch-create-reviewed-answers", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes reviewed answers for Q Topic. + @Sendable + public func batchDeleteTopicReviewedAnswer(_ input: BatchDeleteTopicReviewedAnswerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchDeleteTopicReviewedAnswerResponse { + return try await self.client.execute( + operation: "BatchDeleteTopicReviewedAnswer", + path: "/accounts/{AwsAccountId}/topics/{TopicId}/batch-delete-reviewed-answers", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Cancels an ongoing ingestion of data into SPICE. @Sendable public func cancelIngestion(_ input: CancelIngestionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CancelIngestionResponse { @@ -1697,6 +1723,19 @@ public struct QuickSight: AWSService { ) } + /// Lists all reviewed answers for a Q Topic. + @Sendable + public func listTopicReviewedAnswers(_ input: ListTopicReviewedAnswersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTopicReviewedAnswersResponse { + return try await self.client.execute( + operation: "ListTopicReviewedAnswers", + path: "/accounts/{AwsAccountId}/topics/{TopicId}/reviewed-answers", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists all of the topics within an account. @Sendable public func listTopics(_ input: ListTopicsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTopicsResponse { diff --git a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift index 99adf0623a..9e22ecf9f9 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift @@ -26,6 +26,30 @@ import Foundation extension QuickSight { // MARK: Enums + public enum AggType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `var` = "VAR" + case average = "AVERAGE" + case column = "COLUMN" + case count = "COUNT" + case custom = "CUSTOM" + case distinctCount = "DISTINCT_COUNT" + case max = "MAX" + case median = "MEDIAN" + case min = "MIN" + case percentile = "PERCENTILE" + case ptdAverage = "PTD_AVERAGE" + case ptdCount = "PTD_COUNT" + case ptdDistinctCount = "PTD_DISTINCT_COUNT" + case ptdMax = "PTD_MAX" + case ptdMin = "PTD_MIN" + case ptdSum = "PTD_SUM" + case stdev = "STDEV" + case stdevp = "STDEVP" + case sum = "SUM" + case varp = "VARP" + public var description: String { return self.rawValue } + } + public enum AnalysisErrorType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case accessDenied = "ACCESS_DENIED" case columnGeographicRoleMismatch = "COLUMN_GEOGRAPHIC_ROLE_MISMATCH" @@ -56,6 +80,11 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum AnchorType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case today = "TODAY" + public var description: String { return self.rawValue } + } + public enum ArcThickness: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case large = "LARGE" case medium = "MEDIUM" @@ -306,6 +335,20 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum ComparisonMethodType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case diff = "DIFF" + case diffAsPerc = "DIFF_AS_PERC" + case movingAverage = "MOVING_AVERAGE" + case percDiff = "PERC_DIFF" + case percentOfTotal = "PERCENT_OF_TOTAL" + case popCurrentDiff = "POP_CURRENT_DIFF" + case popCurrentDiffAsPerc = "POP_CURRENT_DIFF_AS_PERC" + case popOvertimeDiff = "POP_OVERTIME_DIFF" + case popOvertimeDiffAsPerc = "POP_OVERTIME_DIFF_AS_PERC" + case runningSum = "RUNNING_SUM" + public var description: String { return self.rawValue } + } + public enum ConditionalFormattingIconDisplayOption: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case iconOnly = "ICON_ONLY" public var description: String { return self.rawValue } @@ -333,6 +376,21 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum ContributionAnalysisDirection: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case decrease = "DECREASE" + case increase = "INCREASE" + case neutral = "NEUTRAL" + public var description: String { return self.rawValue } + } + + public enum ContributionAnalysisSortType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case absoluteDifference = "ABSOLUTE_DIFFERENCE" + case contributionPercentage = "CONTRIBUTION_PERCENTAGE" + case deviationFromExpected = "DEVIATION_FROM_EXPECTED" + case percentageDifference = "PERCENTAGE_DIFFERENCE" + public var description: String { return self.rawValue } + } + public enum CrossDatasetTypes: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case allDatasets = "ALL_DATASETS" case singleDataset = "SINGLE_DATASET" @@ -1000,6 +1058,13 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum NullFilterOption: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case allValues = "ALL_VALUES" + case nonNullsOnly = "NON_NULLS_ONLY" + case nullsOnly = "NULLS_ONLY" + public var description: String { return self.rawValue } + } + public enum NumberScale: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case auto = "AUTO" case billions = "BILLIONS" @@ -1224,6 +1289,17 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum ReviewedAnswerErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case datasetDoesNotExist = "DATASET_DOES_NOT_EXIST" + case duplicatedAnswer = "DUPLICATED_ANSWER" + case internalError = "INTERNAL_ERROR" + case invalidData = "INVALID_DATA" + case invalidDatasetArn = "INVALID_DATASET_ARN" + case missingAnswer = "MISSING_ANSWER" + case missingRequiredFields = "MISSING_REQUIRED_FIELDS" + public var description: String { return self.rawValue } + } + public enum Role: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case admin = "ADMIN" case adminPro = "ADMIN_PRO" @@ -1515,6 +1591,33 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum TopicIRFilterFunction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case contains = "CONTAINS" + case containsString = "CONTAINS_STRING" + case endsWith = "ENDS_WITH" + case exact = "EXACT" + case last = "LAST" + case next = "NEXT" + case now = "NOW" + case previous = "PREVIOUS" + case startsWith = "STARTS_WITH" + case this = "THIS" + public var description: String { return self.rawValue } + } + + public enum TopicIRFilterType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case acceptAllFilter = "ACCEPT_ALL_FILTER" + case categoryFilter = "CATEGORY_FILTER" + case dateRangeFilter = "DATE_RANGE_FILTER" + case equals = "EQUALS" + case numericEqualityFilter = "NUMERIC_EQUALITY_FILTER" + case numericRangeFilter = "NUMERIC_RANGE_FILTER" + case rankLimitFilter = "RANK_LIMIT_FILTER" + case relativeDateFilter = "RELATIVE_DATE_FILTER" + case topBottomFilter = "TOP_BOTTOM_FILTER" + public var description: String { return self.rawValue } + } + public enum TopicNumericSeparatorSymbol: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case comma = "COMMA" case dot = "DOT" @@ -1547,6 +1650,12 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum TopicSortDirection: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case ascending = "ASCENDING" + case descending = "DESCENDING" + public var description: String { return self.rawValue } + } + public enum TopicTimeGranularity: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case day = "DAY" case hour = "HOUR" @@ -1642,6 +1751,15 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum VisualRole: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case complimentary = "COMPLIMENTARY" + case fallback = "FALLBACK" + case fragment = "FRAGMENT" + case multiIntent = "MULTI_INTENT" + case primary = "PRIMARY" + public var description: String { return self.rawValue } + } + public enum WidgetStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" @@ -2253,6 +2371,40 @@ extension QuickSight { } } + public struct AggFunction: AWSEncodableShape & AWSDecodableShape { + /// The aggregation of an Agg function. + public let aggregation: AggType? + /// The aggregation parameters for an Agg function. + public let aggregationFunctionParameters: [String: String]? + /// The period of an Agg function. + public let period: TopicTimeGranularity? + /// The period field for an Agg function. + public let periodField: String? + + public init(aggregation: AggType? = nil, aggregationFunctionParameters: [String: String]? = nil, period: TopicTimeGranularity? = nil, periodField: String? = nil) { + self.aggregation = aggregation + self.aggregationFunctionParameters = aggregationFunctionParameters + self.period = period + self.periodField = periodField + } + + public func validate(name: String) throws { + try self.aggregationFunctionParameters?.forEach { + try validate($0.key, name: "aggregationFunctionParameters.key", parent: name, max: 256) + try validate($0.key, name: "aggregationFunctionParameters.key", parent: name, min: 1) + try validate($0.value, name: "aggregationFunctionParameters[\"\($0.key)\"]", parent: name, max: 1024) + } + try self.validate(self.periodField, name: "periodField", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case aggregation = "Aggregation" + case aggregationFunctionParameters = "AggregationFunctionParameters" + case period = "Period" + case periodField = "PeriodField" + } + } + public struct AggregationFunction: AWSEncodableShape & AWSDecodableShape { /// Aggregation for attributes. public let attributeAggregationFunction: AttributeAggregationFunction? @@ -2282,6 +2434,27 @@ extension QuickSight { } } + public struct AggregationPartitionBy: AWSEncodableShape & AWSDecodableShape { + /// The field Name for an AggregationPartitionBy. + public let fieldName: String? + /// The TimeGranularity for an AggregationPartitionBy. + public let timeGranularity: TimeGranularity? + + public init(fieldName: String? = nil, timeGranularity: TimeGranularity? = nil) { + self.fieldName = fieldName + self.timeGranularity = timeGranularity + } + + public func validate(name: String) throws { + try self.validate(self.fieldName, name: "fieldName", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case fieldName = "FieldName" + case timeGranularity = "TimeGranularity" + } + } + public struct AggregationSortConfiguration: AWSEncodableShape & AWSDecodableShape { /// The function that aggregates the values in Column. public let aggregationFunction: AggregationFunction? @@ -2594,6 +2767,27 @@ extension QuickSight { } } + public struct Anchor: AWSEncodableShape & AWSDecodableShape { + /// The AnchorType for the Anchor. + public let anchorType: AnchorType? + /// The offset of the Anchor. + public let offset: Int? + /// The TimeGranularity of the Anchor. + public let timeGranularity: TimeGranularity? + + public init(anchorType: AnchorType? = nil, offset: Int? = nil, timeGranularity: TimeGranularity? = nil) { + self.anchorType = anchorType + self.offset = offset + self.timeGranularity = timeGranularity + } + + private enum CodingKeys: String, CodingKey { + case anchorType = "AnchorType" + case offset = "Offset" + case timeGranularity = "TimeGranularity" + } + } + public struct AnchorDateConfiguration: AWSEncodableShape & AWSDecodableShape { /// The options for the date configuration. Choose one of the options below: NOW public let anchorOption: AnchorOption? @@ -4707,6 +4901,170 @@ extension QuickSight { } } + public struct BatchCreateTopicReviewedAnswerRequest: AWSEncodableShape { + /// The definition of the Answers to be created. + public let answers: [CreateTopicReviewedAnswer] + /// The ID of the Amazon Web Services account that you want to create a reviewed answer in. + public let awsAccountId: String + /// The ID for the topic reviewed answer that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account. + public let topicId: String + + public init(answers: [CreateTopicReviewedAnswer], awsAccountId: String, topicId: String) { + self.answers = answers + self.awsAccountId = awsAccountId + self.topicId = topicId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.answers, forKey: .answers) + request.encodePath(self.awsAccountId, key: "AwsAccountId") + request.encodePath(self.topicId, key: "TopicId") + } + + public func validate(name: String) throws { + try self.answers.forEach { + try $0.validate(name: "\(name).answers[]") + } + try self.validate(self.answers, name: "answers", parent: name, max: 100) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, max: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, min: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^[0-9]{12}$") + try self.validate(self.topicId, name: "topicId", parent: name, max: 256) + try self.validate(self.topicId, name: "topicId", parent: name, pattern: "^[A-Za-z0-9-_.\\\\+]*$") + } + + private enum CodingKeys: String, CodingKey { + case answers = "Answers" + } + } + + public struct BatchCreateTopicReviewedAnswerResponse: AWSDecodableShape { + /// The definition of Answers that are invalid and not created. + public let invalidAnswers: [InvalidTopicReviewedAnswer]? + /// The Amazon Web Services request ID for this operation. + public let requestId: String? + /// The HTTP status of the request. + public let status: Int? + /// The definition of Answers that are successfully created. + public let succeededAnswers: [SucceededTopicReviewedAnswer]? + /// The Amazon Resource Name (ARN) of the topic. + public let topicArn: String? + /// The ID for the topic reviewed answer that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account. + public let topicId: String? + + public init(invalidAnswers: [InvalidTopicReviewedAnswer]? = nil, requestId: String? = nil, status: Int? = nil, succeededAnswers: [SucceededTopicReviewedAnswer]? = nil, topicArn: String? = nil, topicId: String? = nil) { + self.invalidAnswers = invalidAnswers + self.requestId = requestId + self.status = status + self.succeededAnswers = succeededAnswers + self.topicArn = topicArn + self.topicId = topicId + } + + public init(from decoder: Decoder) throws { + let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer + let container = try decoder.container(keyedBy: CodingKeys.self) + self.invalidAnswers = try container.decodeIfPresent([InvalidTopicReviewedAnswer].self, forKey: .invalidAnswers) + self.requestId = try container.decodeIfPresent(String.self, forKey: .requestId) + self.status = response.decodeStatus() + self.succeededAnswers = try container.decodeIfPresent([SucceededTopicReviewedAnswer].self, forKey: .succeededAnswers) + self.topicArn = try container.decodeIfPresent(String.self, forKey: .topicArn) + self.topicId = try container.decodeIfPresent(String.self, forKey: .topicId) + } + + private enum CodingKeys: String, CodingKey { + case invalidAnswers = "InvalidAnswers" + case requestId = "RequestId" + case succeededAnswers = "SucceededAnswers" + case topicArn = "TopicArn" + case topicId = "TopicId" + } + } + + public struct BatchDeleteTopicReviewedAnswerRequest: AWSEncodableShape { + /// The Answer IDs of the Answers to be deleted. + public let answerIds: [String]? + /// The ID of the Amazon Web Services account that you want to delete a reviewed answers in. + public let awsAccountId: String + /// The ID for the topic reviewed answer that you want to delete. This ID is unique per Amazon Web Services Region for each Amazon Web Services account. + public let topicId: String + + public init(answerIds: [String]? = nil, awsAccountId: String, topicId: String) { + self.answerIds = answerIds + self.awsAccountId = awsAccountId + self.topicId = topicId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.answerIds, forKey: .answerIds) + request.encodePath(self.awsAccountId, key: "AwsAccountId") + request.encodePath(self.topicId, key: "TopicId") + } + + public func validate(name: String) throws { + try self.answerIds?.forEach { + try validate($0, name: "answerIds[]", parent: name, max: 256) + try validate($0, name: "answerIds[]", parent: name, pattern: "^[A-Za-z0-9-_.\\\\+]*$") + } + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, max: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, min: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^[0-9]{12}$") + try self.validate(self.topicId, name: "topicId", parent: name, max: 256) + try self.validate(self.topicId, name: "topicId", parent: name, pattern: "^[A-Za-z0-9-_.\\\\+]*$") + } + + private enum CodingKeys: String, CodingKey { + case answerIds = "AnswerIds" + } + } + + public struct BatchDeleteTopicReviewedAnswerResponse: AWSDecodableShape { + /// The definition of Answers that are invalid and not deleted. + public let invalidAnswers: [InvalidTopicReviewedAnswer]? + /// The Amazon Web Services request ID for this operation. + public let requestId: String? + /// The HTTP status of the request. + public let status: Int? + /// The definition of Answers that are successfully deleted. + public let succeededAnswers: [SucceededTopicReviewedAnswer]? + /// The Amazon Resource Name (ARN) of the topic. + public let topicArn: String? + /// The ID of the topic reviewed answer that you want to delete. This ID is unique per Amazon Web Services Region for each Amazon Web Services account. + public let topicId: String? + + public init(invalidAnswers: [InvalidTopicReviewedAnswer]? = nil, requestId: String? = nil, status: Int? = nil, succeededAnswers: [SucceededTopicReviewedAnswer]? = nil, topicArn: String? = nil, topicId: String? = nil) { + self.invalidAnswers = invalidAnswers + self.requestId = requestId + self.status = status + self.succeededAnswers = succeededAnswers + self.topicArn = topicArn + self.topicId = topicId + } + + public init(from decoder: Decoder) throws { + let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer + let container = try decoder.container(keyedBy: CodingKeys.self) + self.invalidAnswers = try container.decodeIfPresent([InvalidTopicReviewedAnswer].self, forKey: .invalidAnswers) + self.requestId = try container.decodeIfPresent(String.self, forKey: .requestId) + self.status = response.decodeStatus() + self.succeededAnswers = try container.decodeIfPresent([SucceededTopicReviewedAnswer].self, forKey: .succeededAnswers) + self.topicArn = try container.decodeIfPresent(String.self, forKey: .topicArn) + self.topicId = try container.decodeIfPresent(String.self, forKey: .topicId) + } + + private enum CodingKeys: String, CodingKey { + case invalidAnswers = "InvalidAnswers" + case requestId = "RequestId" + case succeededAnswers = "SucceededAnswers" + case topicArn = "TopicArn" + case topicId = "TopicId" + } + } + public struct BigQueryParameters: AWSEncodableShape & AWSDecodableShape { /// The storage location where you create a Google BigQuery data source. public let dataSetRegion: String? @@ -5700,6 +6058,27 @@ extension QuickSight { } } + public struct CollectiveConstantEntry: AWSEncodableShape & AWSDecodableShape { + /// The ConstantType of a CollectiveConstantEntry. + public let constantType: ConstantType? + /// The value of a CollectiveConstantEntry. + public let value: String? + + public init(constantType: ConstantType? = nil, value: String? = nil) { + self.constantType = constantType + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.value, name: "value", parent: name, max: 1024) + } + + private enum CodingKeys: String, CodingKey { + case constantType = "ConstantType" + case value = "Value" + } + } + public struct ColorScale: AWSEncodableShape & AWSDecodableShape { /// Determines the color fill type. public let colorFillType: ColorFillType @@ -6616,6 +6995,45 @@ extension QuickSight { } } + public struct ContributionAnalysisFactor: AWSEncodableShape & AWSDecodableShape { + /// The field name of the ContributionAnalysisFactor. + public let fieldName: String? + + public init(fieldName: String? = nil) { + self.fieldName = fieldName + } + + public func validate(name: String) throws { + try self.validate(self.fieldName, name: "fieldName", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case fieldName = "FieldName" + } + } + + public struct ContributionAnalysisTimeRanges: AWSEncodableShape & AWSDecodableShape { + /// The end range for the ContributionAnalysisTimeRanges. + public let endRange: TopicIRFilterOption? + /// The start range for the ContributionAnalysisTimeRanges. + public let startRange: TopicIRFilterOption? + + public init(endRange: TopicIRFilterOption? = nil, startRange: TopicIRFilterOption? = nil) { + self.endRange = endRange + self.startRange = startRange + } + + public func validate(name: String) throws { + try self.endRange?.validate(name: "\(name).endRange") + try self.startRange?.validate(name: "\(name).startRange") + } + + private enum CodingKeys: String, CodingKey { + case endRange = "EndRange" + case startRange = "StartRange" + } + } + public struct CreateAccountCustomizationRequest: AWSEncodableShape { /// The Amazon QuickSight customizations you're adding in the current Amazon Web Services Region. You can add these to an Amazon Web Services account and a QuickSight namespace. For example, you can add a default theme by setting AccountCustomization to the midnight theme: "AccountCustomization": { "DefaultTheme": "arn:aws:quicksight::aws:theme/MIDNIGHT" }. Or, you can add a custom theme by specifying "AccountCustomization": { "DefaultTheme": "arn:aws:quicksight:us-west-2:111122223333:theme/bdb844d0-0fe9-4d9d-b520-0fe602d93639" }. public let accountCustomization: AccountCustomization @@ -8803,6 +9221,48 @@ extension QuickSight { } } + public struct CreateTopicReviewedAnswer: AWSEncodableShape { + /// The answer ID for the CreateTopicReviewedAnswer. + public let answerId: String + /// The Dataset arn for the CreateTopicReviewedAnswer. + public let datasetArn: String + /// The Mir for the CreateTopicReviewedAnswer. + public let mir: TopicIR? + /// The PrimaryVisual for the CreateTopicReviewedAnswer. + public let primaryVisual: TopicVisual? + /// The Question to be created. + public let question: String + /// The template for the CreateTopicReviewedAnswer. + public let template: TopicTemplate? + + public init(answerId: String, datasetArn: String, mir: TopicIR? = nil, primaryVisual: TopicVisual? = nil, question: String, template: TopicTemplate? = nil) { + self.answerId = answerId + self.datasetArn = datasetArn + self.mir = mir + self.primaryVisual = primaryVisual + self.question = question + self.template = template + } + + public func validate(name: String) throws { + try self.validate(self.answerId, name: "answerId", parent: name, max: 256) + try self.validate(self.answerId, name: "answerId", parent: name, pattern: "^[A-Za-z0-9-_.\\\\+]*$") + try self.mir?.validate(name: "\(name).mir") + try self.primaryVisual?.validate(name: "\(name).primaryVisual") + try self.validate(self.question, name: "question", parent: name, max: 256) + try self.template?.validate(name: "\(name).template") + } + + private enum CodingKeys: String, CodingKey { + case answerId = "AnswerId" + case datasetArn = "DatasetArn" + case mir = "Mir" + case primaryVisual = "PrimaryVisual" + case question = "Question" + case template = "Template" + } + } + public struct CreateVPCConnectionRequest: AWSEncodableShape { /// The Amazon Web Services account ID of the account where you want to create a new VPC /// connection. @@ -11023,15 +11483,21 @@ extension QuickSight { } public struct DateTimePickerControlDisplayOptions: AWSEncodableShape & AWSDecodableShape { + /// The date icon visibility of the DateTimePickerControlDisplayOptions. + public let dateIconVisibility: Visibility? /// Customize how dates are formatted in controls. public let dateTimeFormat: String? + /// The helper text visibility of the DateTimePickerControlDisplayOptions. + public let helperTextVisibility: Visibility? /// The configuration of info icon label options. public let infoIconLabelOptions: SheetControlInfoIconLabelOptions? /// The options to configure the title visibility, name, and font size. public let titleOptions: LabelOptions? - public init(dateTimeFormat: String? = nil, infoIconLabelOptions: SheetControlInfoIconLabelOptions? = nil, titleOptions: LabelOptions? = nil) { + public init(dateIconVisibility: Visibility? = nil, dateTimeFormat: String? = nil, helperTextVisibility: Visibility? = nil, infoIconLabelOptions: SheetControlInfoIconLabelOptions? = nil, titleOptions: LabelOptions? = nil) { + self.dateIconVisibility = dateIconVisibility self.dateTimeFormat = dateTimeFormat + self.helperTextVisibility = helperTextVisibility self.infoIconLabelOptions = infoIconLabelOptions self.titleOptions = titleOptions } @@ -11044,7 +11510,9 @@ extension QuickSight { } private enum CodingKeys: String, CodingKey { + case dateIconVisibility = "DateIconVisibility" case dateTimeFormat = "DateTimeFormat" + case helperTextVisibility = "HelperTextVisibility" case infoIconLabelOptions = "InfoIconLabelOptions" case titleOptions = "TitleOptions" } @@ -17149,6 +17617,31 @@ extension QuickSight { } } + public struct FilterAggMetrics: AWSEncodableShape & AWSDecodableShape { + /// The function for the FilterAggMetrics. + public let function: AggType? + /// The metric operand of the FilterAggMetrics. + public let metricOperand: Identifier? + /// The sort direction for FilterAggMetrics. + public let sortDirection: TopicSortDirection? + + public init(function: AggType? = nil, metricOperand: Identifier? = nil, sortDirection: TopicSortDirection? = nil) { + self.function = function + self.metricOperand = metricOperand + self.sortDirection = sortDirection + } + + public func validate(name: String) throws { + try self.metricOperand?.validate(name: "\(name).metricOperand") + } + + private enum CodingKeys: String, CodingKey { + case function = "Function" + case metricOperand = "MetricOperand" + case sortDirection = "SortDirection" + } + } + public struct FilterControl: AWSEncodableShape & AWSDecodableShape { /// A control from a filter that is scoped across more than one sheet. This represents your filter control on a sheet public let crossSheet: FilterCrossSheetControl? @@ -20105,6 +20598,23 @@ extension QuickSight { } } + public struct Identifier: AWSEncodableShape & AWSDecodableShape { + /// The identity of the identifier. + public let identity: String + + public init(identity: String) { + self.identity = identity + } + + public func validate(name: String) throws { + try self.validate(self.identity, name: "identity", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case identity = "Identity" + } + } + public struct IdentityCenterConfiguration: AWSEncodableShape & AWSDecodableShape { /// A Boolean option that controls whether Trusted Identity Propagation should be used. public let enableIdentityPropagation: Bool? @@ -20458,6 +20968,23 @@ extension QuickSight { } } + public struct InvalidTopicReviewedAnswer: AWSDecodableShape { + /// The answer ID for the InvalidTopicReviewedAnswer. + public let answerId: String? + /// The error that is returned for the InvalidTopicReviewedAnswer. + public let error: ReviewedAnswerErrorCode? + + public init(answerId: String? = nil, error: ReviewedAnswerErrorCode? = nil) { + self.answerId = answerId + self.error = error + } + + private enum CodingKeys: String, CodingKey { + case answerId = "AnswerId" + case error = "Error" + } + } + public struct ItemsLimitConfiguration: AWSEncodableShape & AWSDecodableShape { /// The limit on how many items of a field are showed in the chart. For example, the number of slices that are displayed in a pie chart. public let itemsLimit: Int64? @@ -23301,6 +23828,73 @@ extension QuickSight { } } + public struct ListTopicReviewedAnswersRequest: AWSEncodableShape { + /// The ID of the Amazon Web Services account that containd the reviewed answers that you want listed. + public let awsAccountId: String + /// The ID for the topic that contains the reviewed answer that you want to list. This ID is unique per Amazon Web Services Region for each Amazon Web Services account. + public let topicId: String + + public init(awsAccountId: String, topicId: String) { + self.awsAccountId = awsAccountId + self.topicId = topicId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.awsAccountId, key: "AwsAccountId") + request.encodePath(self.topicId, key: "TopicId") + } + + public func validate(name: String) throws { + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, max: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, min: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^[0-9]{12}$") + try self.validate(self.topicId, name: "topicId", parent: name, max: 256) + try self.validate(self.topicId, name: "topicId", parent: name, pattern: "^[A-Za-z0-9-_.\\\\+]*$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTopicReviewedAnswersResponse: AWSDecodableShape { + /// The definition of all Answers in the topic. + public let answers: [TopicReviewedAnswer]? + /// The Amazon Web Services request ID for this operation. + public let requestId: String? + /// The HTTP status of the request. + public let status: Int? + /// The Amazon Resource Name (ARN) of the topic. + public let topicArn: String? + /// The ID for the topic that contains the reviewed answer that you want to list. This ID is unique per Amazon Web Services Region for each Amazon Web Services account. + public let topicId: String? + + public init(answers: [TopicReviewedAnswer]? = nil, requestId: String? = nil, status: Int? = nil, topicArn: String? = nil, topicId: String? = nil) { + self.answers = answers + self.requestId = requestId + self.status = status + self.topicArn = topicArn + self.topicId = topicId + } + + public init(from decoder: Decoder) throws { + let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer + let container = try decoder.container(keyedBy: CodingKeys.self) + self.answers = try container.decodeIfPresent([TopicReviewedAnswer].self, forKey: .answers) + self.requestId = try container.decodeIfPresent(String.self, forKey: .requestId) + self.status = response.decodeStatus() + self.topicArn = try container.decodeIfPresent(String.self, forKey: .topicArn) + self.topicId = try container.decodeIfPresent(String.self, forKey: .topicId) + } + + private enum CodingKeys: String, CodingKey { + case answers = "Answers" + case requestId = "RequestId" + case topicArn = "TopicArn" + case topicId = "TopicId" + } + } + public struct ListTopicsRequest: AWSEncodableShape { /// The ID of the Amazon Web Services account that contains the topics that you want to list. public let awsAccountId: String @@ -24073,6 +24667,23 @@ extension QuickSight { } } + public struct NamedEntityRef: AWSEncodableShape & AWSDecodableShape { + /// The NamedEntityName for the NamedEntityRef. + public let namedEntityName: String? + + public init(namedEntityName: String? = nil) { + self.namedEntityName = namedEntityName + } + + public func validate(name: String) throws { + try self.validate(self.namedEntityName, name: "namedEntityName", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case namedEntityName = "NamedEntityName" + } + } + public struct NamespaceError: AWSDecodableShape { /// The message for the error. public let message: String? @@ -29629,6 +30240,28 @@ extension QuickSight { } } + public struct Slot: AWSEncodableShape & AWSDecodableShape { + /// The slot ID of the slot. + public let slotId: String? + /// The visual ID for the slot. + public let visualId: String? + + public init(slotId: String? = nil, visualId: String? = nil) { + self.slotId = slotId + self.visualId = visualId + } + + public func validate(name: String) throws { + try self.validate(self.slotId, name: "slotId", parent: name, max: 256) + try self.validate(self.visualId, name: "visualId", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case slotId = "SlotId" + case visualId = "VisualId" + } + } + public struct SmallMultiplesAxisProperties: AWSEncodableShape & AWSDecodableShape { /// Defines the placement of the axis. By default, axes are rendered OUTSIDE of the panels. Axes with INDEPENDENT scale are rendered INSIDE the panels. public let placement: SmallMultiplesAxisPlacement? @@ -30666,6 +31299,19 @@ extension QuickSight { } } + public struct SucceededTopicReviewedAnswer: AWSDecodableShape { + /// The answer ID for the SucceededTopicReviewedAnswer. + public let answerId: String? + + public init(answerId: String? = nil) { + self.answerId = answerId + } + + private enum CodingKeys: String, CodingKey { + case answerId = "AnswerId" + } + } + public struct SuccessfulKeyRegistrationEntry: AWSDecodableShape { /// The ARN of the KMS key that is associated with the SuccessfulKeyRegistrationEntry entry. public let keyArn: String @@ -32804,6 +33450,45 @@ extension QuickSight { } } + public struct TopicConstantValue: AWSEncodableShape & AWSDecodableShape { + /// The constant type of a TopicConstantValue. + public let constantType: ConstantType? + /// The maximum for the TopicConstantValue. + public let maximum: String? + /// The minimum for the TopicConstantValue. + public let minimum: String? + /// The value of the TopicConstantValue. + public let value: String? + /// The value list of the TopicConstantValue. + public let valueList: [CollectiveConstantEntry]? + + public init(constantType: ConstantType? = nil, maximum: String? = nil, minimum: String? = nil, value: String? = nil, valueList: [CollectiveConstantEntry]? = nil) { + self.constantType = constantType + self.maximum = maximum + self.minimum = minimum + self.value = value + self.valueList = valueList + } + + public func validate(name: String) throws { + try self.validate(self.maximum, name: "maximum", parent: name, max: 1024) + try self.validate(self.minimum, name: "minimum", parent: name, max: 1024) + try self.validate(self.value, name: "value", parent: name, max: 1024) + try self.valueList?.forEach { + try $0.validate(name: "\(name).valueList[]") + } + try self.validate(self.valueList, name: "valueList", parent: name, max: 2000) + } + + private enum CodingKeys: String, CodingKey { + case constantType = "ConstantType" + case maximum = "Maximum" + case minimum = "Minimum" + case value = "Value" + case valueList = "ValueList" + } + } + public struct TopicDateRangeFilter: AWSEncodableShape & AWSDecodableShape { /// The constant used in a date range filter. public let constant: TopicRangeFilterConstant? @@ -32926,6 +33611,313 @@ extension QuickSight { } } + public struct TopicIR: AWSEncodableShape & AWSDecodableShape { + /// The contribution analysis for the TopicIR. + public let contributionAnalysis: TopicIRContributionAnalysis? + /// The filters for the TopicIR. + public let filters: [[TopicIRFilterOption]]? + /// The GroupBy list for the TopicIR. + public let groupByList: [TopicIRGroupBy]? + /// The metrics for the TopicIR. + public let metrics: [TopicIRMetric]? + /// The sort for the TopicIR. + public let sort: TopicSortClause? + /// The visual for the TopicIR. + public let visual: VisualOptions? + + public init(contributionAnalysis: TopicIRContributionAnalysis? = nil, filters: [[TopicIRFilterOption]]? = nil, groupByList: [TopicIRGroupBy]? = nil, metrics: [TopicIRMetric]? = nil, sort: TopicSortClause? = nil, visual: VisualOptions? = nil) { + self.contributionAnalysis = contributionAnalysis + self.filters = filters + self.groupByList = groupByList + self.metrics = metrics + self.sort = sort + self.visual = visual + } + + public func validate(name: String) throws { + try self.contributionAnalysis?.validate(name: "\(name).contributionAnalysis") + try self.filters?.forEach { + try validate($0, name: "filters[]", parent: name, max: 2000) + } + try self.validate(self.filters, name: "filters", parent: name, max: 2000) + try self.groupByList?.forEach { + try $0.validate(name: "\(name).groupByList[]") + } + try self.validate(self.groupByList, name: "groupByList", parent: name, max: 2000) + try self.metrics?.forEach { + try $0.validate(name: "\(name).metrics[]") + } + try self.validate(self.metrics, name: "metrics", parent: name, max: 2000) + try self.sort?.validate(name: "\(name).sort") + try self.visual?.validate(name: "\(name).visual") + } + + private enum CodingKeys: String, CodingKey { + case contributionAnalysis = "ContributionAnalysis" + case filters = "Filters" + case groupByList = "GroupByList" + case metrics = "Metrics" + case sort = "Sort" + case visual = "Visual" + } + } + + public struct TopicIRComparisonMethod: AWSEncodableShape & AWSDecodableShape { + /// The period for the TopicIRComparisonMethod. + public let period: TopicTimeGranularity? + /// The type for the TopicIRComparisonMethod. + public let type: ComparisonMethodType? + /// The window size for the TopicIRComparisonMethod. + public let windowSize: Int? + + public init(period: TopicTimeGranularity? = nil, type: ComparisonMethodType? = nil, windowSize: Int? = nil) { + self.period = period + self.type = type + self.windowSize = windowSize + } + + private enum CodingKeys: String, CodingKey { + case period = "Period" + case type = "Type" + case windowSize = "WindowSize" + } + } + + public struct TopicIRContributionAnalysis: AWSEncodableShape & AWSDecodableShape { + /// The direction for the TopicIRContributionAnalysis. + public let direction: ContributionAnalysisDirection? + /// The factors for a TopicIRContributionAnalysis. + public let factors: [ContributionAnalysisFactor]? + /// The sort type for the TopicIRContributionAnalysis. + public let sortType: ContributionAnalysisSortType? + /// The time ranges for the TopicIRContributionAnalysis. + public let timeRanges: ContributionAnalysisTimeRanges? + + public init(direction: ContributionAnalysisDirection? = nil, factors: [ContributionAnalysisFactor]? = nil, sortType: ContributionAnalysisSortType? = nil, timeRanges: ContributionAnalysisTimeRanges? = nil) { + self.direction = direction + self.factors = factors + self.sortType = sortType + self.timeRanges = timeRanges + } + + public func validate(name: String) throws { + try self.factors?.forEach { + try $0.validate(name: "\(name).factors[]") + } + try self.validate(self.factors, name: "factors", parent: name, max: 50) + try self.timeRanges?.validate(name: "\(name).timeRanges") + } + + private enum CodingKeys: String, CodingKey { + case direction = "Direction" + case factors = "Factors" + case sortType = "SortType" + case timeRanges = "TimeRanges" + } + } + + public struct TopicIRFilterOption: AWSEncodableShape & AWSDecodableShape { + /// The agg metrics for the TopicIRFilterOption. + public let aggMetrics: [FilterAggMetrics]? + /// The aggregation for the TopicIRFilterOption. + public let aggregation: AggType? + /// The aggregation function parameters for the TopicIRFilterOption. + public let aggregationFunctionParameters: [String: String]? + /// The AggregationPartitionBy for the TopicIRFilterOption. + public let aggregationPartitionBy: [AggregationPartitionBy]? + /// The anchor for the TopicIRFilterOption. + public let anchor: Anchor? + /// The constant for the TopicIRFilterOption. + public let constant: TopicConstantValue? + /// The filter class for the TopicIRFilterOption. + public let filterClass: FilterClass? + /// The filter type for the TopicIRFilterOption. + public let filterType: TopicIRFilterType? + /// The function for the TopicIRFilterOption. + public let function: TopicIRFilterFunction? + /// The inclusive for the TopicIRFilterOption. + public let inclusive: Bool? + /// The inverse for the TopicIRFilterOption. + public let inverse: Bool? + /// The last next offset for the TopicIRFilterOption. + public let lastNextOffset: TopicConstantValue? + /// The null filter for the TopicIRFilterOption. + public let nullFilter: NullFilterOption? + /// The operand field for the TopicIRFilterOption. + public let operandField: Identifier? + /// The range for the TopicIRFilterOption. + public let range: TopicConstantValue? + /// The sort direction for the TopicIRFilterOption. + public let sortDirection: TopicSortDirection? + /// The time granularity for the TopicIRFilterOption. + public let timeGranularity: TimeGranularity? + /// The TopBottomLimit for the TopicIRFilterOption. + public let topBottomLimit: TopicConstantValue? + + public init(aggMetrics: [FilterAggMetrics]? = nil, aggregation: AggType? = nil, aggregationFunctionParameters: [String: String]? = nil, aggregationPartitionBy: [AggregationPartitionBy]? = nil, anchor: Anchor? = nil, constant: TopicConstantValue? = nil, filterClass: FilterClass? = nil, filterType: TopicIRFilterType? = nil, function: TopicIRFilterFunction? = nil, inclusive: Bool? = nil, inverse: Bool? = nil, lastNextOffset: TopicConstantValue? = nil, nullFilter: NullFilterOption? = nil, operandField: Identifier? = nil, range: TopicConstantValue? = nil, sortDirection: TopicSortDirection? = nil, timeGranularity: TimeGranularity? = nil, topBottomLimit: TopicConstantValue? = nil) { + self.aggMetrics = aggMetrics + self.aggregation = aggregation + self.aggregationFunctionParameters = aggregationFunctionParameters + self.aggregationPartitionBy = aggregationPartitionBy + self.anchor = anchor + self.constant = constant + self.filterClass = filterClass + self.filterType = filterType + self.function = function + self.inclusive = inclusive + self.inverse = inverse + self.lastNextOffset = lastNextOffset + self.nullFilter = nullFilter + self.operandField = operandField + self.range = range + self.sortDirection = sortDirection + self.timeGranularity = timeGranularity + self.topBottomLimit = topBottomLimit + } + + public func validate(name: String) throws { + try self.aggMetrics?.forEach { + try $0.validate(name: "\(name).aggMetrics[]") + } + try self.validate(self.aggMetrics, name: "aggMetrics", parent: name, max: 100) + try self.aggregationFunctionParameters?.forEach { + try validate($0.key, name: "aggregationFunctionParameters.key", parent: name, max: 256) + try validate($0.key, name: "aggregationFunctionParameters.key", parent: name, min: 1) + try validate($0.value, name: "aggregationFunctionParameters[\"\($0.key)\"]", parent: name, max: 1024) + } + try self.aggregationPartitionBy?.forEach { + try $0.validate(name: "\(name).aggregationPartitionBy[]") + } + try self.validate(self.aggregationPartitionBy, name: "aggregationPartitionBy", parent: name, max: 50) + try self.constant?.validate(name: "\(name).constant") + try self.lastNextOffset?.validate(name: "\(name).lastNextOffset") + try self.operandField?.validate(name: "\(name).operandField") + try self.range?.validate(name: "\(name).range") + try self.topBottomLimit?.validate(name: "\(name).topBottomLimit") + } + + private enum CodingKeys: String, CodingKey { + case aggMetrics = "AggMetrics" + case aggregation = "Aggregation" + case aggregationFunctionParameters = "AggregationFunctionParameters" + case aggregationPartitionBy = "AggregationPartitionBy" + case anchor = "Anchor" + case constant = "Constant" + case filterClass = "FilterClass" + case filterType = "FilterType" + case function = "Function" + case inclusive = "Inclusive" + case inverse = "Inverse" + case lastNextOffset = "LastNextOffset" + case nullFilter = "NullFilter" + case operandField = "OperandField" + case range = "Range" + case sortDirection = "SortDirection" + case timeGranularity = "TimeGranularity" + case topBottomLimit = "TopBottomLimit" + } + } + + public struct TopicIRGroupBy: AWSEncodableShape & AWSDecodableShape { + /// The display format for the TopicIRGroupBy. + public let displayFormat: DisplayFormat? + public let displayFormatOptions: DisplayFormatOptions? + /// The field name for the TopicIRGroupBy. + public let fieldName: Identifier? + /// The named entity for the TopicIRGroupBy. + public let namedEntity: NamedEntityRef? + /// The sort for the TopicIRGroupBy. + public let sort: TopicSortClause? + /// The time granularity for the TopicIRGroupBy. + public let timeGranularity: TopicTimeGranularity? + + public init(displayFormat: DisplayFormat? = nil, displayFormatOptions: DisplayFormatOptions? = nil, fieldName: Identifier? = nil, namedEntity: NamedEntityRef? = nil, sort: TopicSortClause? = nil, timeGranularity: TopicTimeGranularity? = nil) { + self.displayFormat = displayFormat + self.displayFormatOptions = displayFormatOptions + self.fieldName = fieldName + self.namedEntity = namedEntity + self.sort = sort + self.timeGranularity = timeGranularity + } + + public func validate(name: String) throws { + try self.displayFormatOptions?.validate(name: "\(name).displayFormatOptions") + try self.fieldName?.validate(name: "\(name).fieldName") + try self.namedEntity?.validate(name: "\(name).namedEntity") + try self.sort?.validate(name: "\(name).sort") + } + + private enum CodingKeys: String, CodingKey { + case displayFormat = "DisplayFormat" + case displayFormatOptions = "DisplayFormatOptions" + case fieldName = "FieldName" + case namedEntity = "NamedEntity" + case sort = "Sort" + case timeGranularity = "TimeGranularity" + } + } + + public struct TopicIRMetric: AWSEncodableShape & AWSDecodableShape { + /// The calculated field references for the TopicIRMetric. + public let calculatedFieldReferences: [Identifier]? + /// The comparison method for the TopicIRMetric. + public let comparisonMethod: TopicIRComparisonMethod? + /// The display format for the TopicIRMetric. + public let displayFormat: DisplayFormat? + public let displayFormatOptions: DisplayFormatOptions? + /// The expression for the TopicIRMetric. + public let expression: String? + /// The function for the TopicIRMetric. + public let function: AggFunction? + /// The metric ID for the TopicIRMetric. + public let metricId: Identifier? + /// The named entity for the TopicIRMetric. + public let namedEntity: NamedEntityRef? + /// The operands for the TopicIRMetric. + public let operands: [Identifier]? + + public init(calculatedFieldReferences: [Identifier]? = nil, comparisonMethod: TopicIRComparisonMethod? = nil, displayFormat: DisplayFormat? = nil, displayFormatOptions: DisplayFormatOptions? = nil, expression: String? = nil, function: AggFunction? = nil, metricId: Identifier? = nil, namedEntity: NamedEntityRef? = nil, operands: [Identifier]? = nil) { + self.calculatedFieldReferences = calculatedFieldReferences + self.comparisonMethod = comparisonMethod + self.displayFormat = displayFormat + self.displayFormatOptions = displayFormatOptions + self.expression = expression + self.function = function + self.metricId = metricId + self.namedEntity = namedEntity + self.operands = operands + } + + public func validate(name: String) throws { + try self.calculatedFieldReferences?.forEach { + try $0.validate(name: "\(name).calculatedFieldReferences[]") + } + try self.validate(self.calculatedFieldReferences, name: "calculatedFieldReferences", parent: name, max: 250) + try self.displayFormatOptions?.validate(name: "\(name).displayFormatOptions") + try self.validate(self.expression, name: "expression", parent: name, max: 4096) + try self.validate(self.expression, name: "expression", parent: name, min: 1) + try self.function?.validate(name: "\(name).function") + try self.metricId?.validate(name: "\(name).metricId") + try self.namedEntity?.validate(name: "\(name).namedEntity") + try self.operands?.forEach { + try $0.validate(name: "\(name).operands[]") + } + try self.validate(self.operands, name: "operands", parent: name, max: 25) + } + + private enum CodingKeys: String, CodingKey { + case calculatedFieldReferences = "CalculatedFieldReferences" + case comparisonMethod = "ComparisonMethod" + case displayFormat = "DisplayFormat" + case displayFormatOptions = "DisplayFormatOptions" + case expression = "Expression" + case function = "Function" + case metricId = "MetricId" + case namedEntity = "NamedEntity" + case operands = "Operands" + } + } + public struct TopicNamedEntity: AWSEncodableShape & AWSDecodableShape { /// The definition of a named entity. public let definition: [NamedEntityDefinition]? @@ -33143,6 +34135,43 @@ extension QuickSight { } } + public struct TopicReviewedAnswer: AWSDecodableShape { + /// The answer ID of the reviewed answer. + public let answerId: String + /// The Amazon Resource Name (ARN) of the reviewed answer. + public let arn: String? + /// The Dataset ARN for the TopicReviewedAnswer. + public let datasetArn: String + /// The mir for the TopicReviewedAnswer. + public let mir: TopicIR? + /// The primary visual for the TopicReviewedAnswer. + public let primaryVisual: TopicVisual? + /// The question for the TopicReviewedAnswer. + public let question: String + /// The template for the TopicReviewedAnswer. + public let template: TopicTemplate? + + public init(answerId: String, arn: String? = nil, datasetArn: String, mir: TopicIR? = nil, primaryVisual: TopicVisual? = nil, question: String, template: TopicTemplate? = nil) { + self.answerId = answerId + self.arn = arn + self.datasetArn = datasetArn + self.mir = mir + self.primaryVisual = primaryVisual + self.question = question + self.template = template + } + + private enum CodingKeys: String, CodingKey { + case answerId = "AnswerId" + case arn = "Arn" + case datasetArn = "DatasetArn" + case mir = "Mir" + case primaryVisual = "PrimaryVisual" + case question = "Question" + case template = "Template" + } + } + public struct TopicSingularFilterConstant: AWSEncodableShape & AWSDecodableShape { /// The type of the singular filter constant. Valid values for this structure are SINGULAR. public let constantType: ConstantType? @@ -33164,6 +34193,27 @@ extension QuickSight { } } + public struct TopicSortClause: AWSEncodableShape & AWSDecodableShape { + /// The operand for a TopicSortClause. + public let operand: Identifier? + /// The sort direction for the TopicSortClause. + public let sortDirection: TopicSortDirection? + + public init(operand: Identifier? = nil, sortDirection: TopicSortDirection? = nil) { + self.operand = operand + self.sortDirection = sortDirection + } + + public func validate(name: String) throws { + try self.operand?.validate(name: "\(name).operand") + } + + private enum CodingKeys: String, CodingKey { + case operand = "Operand" + case sortDirection = "SortDirection" + } + } + public struct TopicSummary: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the topic. public let arn: String? @@ -33189,6 +34239,63 @@ extension QuickSight { } } + public struct TopicTemplate: AWSEncodableShape & AWSDecodableShape { + /// The slots for the TopicTemplate. + public let slots: [Slot]? + /// The template type for the TopicTemplate. + public let templateType: String? + + public init(slots: [Slot]? = nil, templateType: String? = nil) { + self.slots = slots + self.templateType = templateType + } + + public func validate(name: String) throws { + try self.slots?.forEach { + try $0.validate(name: "\(name).slots[]") + } + try self.validate(self.templateType, name: "templateType", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case slots = "Slots" + case templateType = "TemplateType" + } + } + + public struct TopicVisual: AWSEncodableShape & AWSDecodableShape { + /// The ir for the TopicVisual. + public let ir: TopicIR? + /// The role for the TopicVisual. + public let role: VisualRole? + /// The supporting visuals for the TopicVisual. + public let supportingVisuals: [TopicVisual]? + /// The visual ID for the TopicVisual. + public let visualId: String? + + public init(ir: TopicIR? = nil, role: VisualRole? = nil, supportingVisuals: [TopicVisual]? = nil, visualId: String? = nil) { + self.ir = ir + self.role = role + self.supportingVisuals = supportingVisuals + self.visualId = visualId + } + + public func validate(name: String) throws { + try self.ir?.validate(name: "\(name).ir") + try self.supportingVisuals?.forEach { + try $0.validate(name: "\(name).supportingVisuals[]") + } + try self.validate(self.visualId, name: "visualId", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case ir = "Ir" + case role = "Role" + case supportingVisuals = "SupportingVisuals" + case visualId = "VisualId" + } + } + public struct TotalAggregationComputation: AWSEncodableShape & AWSDecodableShape { /// The ID for a computation. public let computationId: String @@ -37158,6 +38265,23 @@ extension QuickSight { } } + public struct VisualOptions: AWSEncodableShape & AWSDecodableShape { + /// The type for a VisualOptions. + public let type: String? + + public init(type: String? = nil) { + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.type, name: "type", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case type = "type" + } + } + public struct VisualPalette: AWSEncodableShape & AWSDecodableShape { /// The chart color options for the visual palette. public let chartColor: String? diff --git a/Sources/Soto/Services/RDS/RDS_api.swift b/Sources/Soto/Services/RDS/RDS_api.swift index 0caaa7ab91..c49e7df95a 100644 --- a/Sources/Soto/Services/RDS/RDS_api.swift +++ b/Sources/Soto/Services/RDS/RDS_api.swift @@ -1313,7 +1313,7 @@ public struct RDS: AWSService { ) } - /// Returns a list of resources (for example, DB instances) that have at least one pending maintenance action. + /// Returns a list of resources (for example, DB instances) that have at least one pending maintenance action. This API follows an eventual consistency model. This means that the result of the DescribePendingMaintenanceActions command might not be immediately visible to all subsequent RDS commands. Keep this in mind when you use DescribePendingMaintenanceActions immediately after using a previous API command such as ApplyPendingMaintenanceActions. @Sendable public func describePendingMaintenanceActions(_ input: DescribePendingMaintenanceActionsMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> PendingMaintenanceActionsMessage { return try await self.client.execute( @@ -2838,7 +2838,7 @@ extension RDS { ) } - /// Returns a list of resources (for example, DB instances) that have at least one pending maintenance action. + /// Returns a list of resources (for example, DB instances) that have at least one pending maintenance action. This API follows an eventual consistency model. This means that the result of the DescribePendingMaintenanceActions command might not be immediately visible to all subsequent RDS commands. Keep this in mind when you use DescribePendingMaintenanceActions immediately after using a previous API command such as ApplyPendingMaintenanceActions. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/RDS/RDS_shapes.swift b/Sources/Soto/Services/RDS/RDS_shapes.swift index adae8357f4..1a5a033cd3 100644 --- a/Sources/Soto/Services/RDS/RDS_shapes.swift +++ b/Sources/Soto/Services/RDS/RDS_shapes.swift @@ -1354,7 +1354,7 @@ extension RDS { public let preferredMaintenanceWindow: String? /// When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another, an URL that contains a Signature Version 4 signed request for the CreateDBCluster operation to be called in the source Amazon Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl only when you are performing cross-Region replication from an encrypted DB cluster. The presigned URL must be a valid request for the CreateDBCluster API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster to copy. The presigned URL request must contain the following parameter values: KmsKeyId - The KMS key identifier for the KMS key to use to encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL. DestinationRegion - The name of the Amazon Web Services Region that Aurora read replica will be created in. ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB cluster from the us-west-2 Amazon Web Services Region, then your ReplicationSourceIdentifier would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1. To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process. If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters only public let preSignedUrl: String? - /// Specifies whether the DB cluster is publicly accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address. Valid for Cluster Type: Multi-AZ DB clusters only Default: The default behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private. If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public. + /// Specifies whether the DB cluster is publicly accessible. When the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address. Valid for Cluster Type: Multi-AZ DB clusters only Default: The default behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private. If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public. public let publiclyAccessible: Bool? /// Reserved for future use. public let rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? @@ -1695,7 +1695,7 @@ extension RDS { public var processorFeatures: [ProcessorFeature]? /// The order of priority in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 public let promotionTier: Int? - /// Specifies whether the DB instance is publicly accessible. When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private. If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public. + /// Specifies whether the DB instance is publicly accessible. When the DB instance is publicly accessible and you connect from outside of the DB instance's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB instance, the endpoint resolves to the private IP address. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private. If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public. public let publiclyAccessible: Bool? /// Specifes whether the DB instance is encrypted. By default, it isn't encrypted. For RDS Custom DB instances, either enable this setting or leave it unset. Otherwise, Amazon RDS reports an error. This setting doesn't apply to Amazon Aurora DB instances. The encryption for DB instances is managed by the DB cluster. public let storageEncrypted: Bool? @@ -2808,7 +2808,7 @@ extension RDS { public let preferredBackupWindow: String? /// The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). public let preferredMaintenanceWindow: String? - /// Indicates whether the DB cluster is publicly accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address. For more information, see CreateDBCluster. This setting is only for non-Aurora Multi-AZ DB clusters. + /// Indicates whether the DB cluster is publicly accessible. When the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address. For more information, see CreateDBCluster. This setting is only for non-Aurora Multi-AZ DB clusters. public let publiclyAccessible: Bool? /// Reserved for future use. public let rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? @@ -3993,7 +3993,7 @@ extension RDS { public var processorFeatures: [ProcessorFeature]? /// The order of priority in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. public let promotionTier: Int? - /// Indicates whether the DB instance is publicly accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance. + /// Indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible and you connect from outside of the DB instance's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB instance, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance. public let publiclyAccessible: Bool? /// The identifiers of Aurora DB clusters to which the RDS DB instance is replicated as a read replica. For example, when you create an Aurora read replica of an RDS for MySQL DB instance, the Aurora MySQL DB cluster for the Aurora read replica is shown. This output doesn't contain information about cross-Region Aurora read replicas. Currently, each RDS DB instance can have only one Aurora read replica. @OptionalCustomCoding> @@ -5452,7 +5452,7 @@ extension RDS { public struct DeleteDBClusterMessage: AWSEncodableShape { /// The DB cluster identifier for the DB cluster to be deleted. This parameter isn't case-sensitive. Constraints: Must match an existing DBClusterIdentifier. public let dbClusterIdentifier: String? - /// Specifies whether to remove automated backups immediately after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB cluster is deleted. + /// Specifies whether to remove automated backups immediately after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB cluster is deleted. You must delete automated backups for Amazon RDS Multi-AZ DB clusters. For more information about managing automated backups for RDS Multi-AZ DB clusters, see Managing automated backups. public let deleteAutomatedBackups: Bool? /// The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is disabled. Specifying this parameter and also skipping the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter results in an error. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter Can't end with a hyphen or contain two consecutive hyphens public let finalDBSnapshotIdentifier: String? @@ -6103,7 +6103,7 @@ extension RDS { public let marker: String? /// The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. public let maxRecords: Int? - /// A specific source to return parameters for. Valid Values: customer engine service + /// A specific source to return parameters for. Valid Values: user engine service public let source: String? public init(dbClusterParameterGroupName: String? = nil, filters: [Filter]? = nil, marker: String? = nil, maxRecords: Int? = nil, source: String? = nil) { @@ -9129,7 +9129,7 @@ extension RDS { public var processorFeatures: [ProcessorFeature]? /// The order of priority in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 public let promotionTier: Int? - /// Specifies whether the DB instance is publicly accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled for it to be publicly accessible. Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter. + /// Specifies whether the DB instance is publicly accessible. When the DB instance is publicly accessible and you connect from outside of the DB instance's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB instance, the endpoint resolves to the private IP address. Access to the DB instance is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB instance doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled for it to be publicly accessible. Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter. public let publiclyAccessible: Bool? /// A value that sets the open mode of a replica database to either mounted or read-only. Currently, this parameter is only supported for Oracle DB instances. Mounted DB replicas are included in Oracle Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom DB instances. public let replicaMode: ReplicaMode? diff --git a/Sources/Soto/Services/Redshift/Redshift_api.swift b/Sources/Soto/Services/Redshift/Redshift_api.swift index 542a75cd36..db6cce87f4 100644 --- a/Sources/Soto/Services/Redshift/Redshift_api.swift +++ b/Sources/Soto/Services/Redshift/Redshift_api.swift @@ -74,7 +74,10 @@ public struct Redshift: AWSService { /// custom endpoints for regions static var serviceEndpoints: [String: String] {[ "us-gov-east-1": "redshift.us-gov-east-1.amazonaws.com", - "us-gov-west-1": "redshift.us-gov-west-1.amazonaws.com" + "us-gov-west-1": "redshift.us-gov-west-1.amazonaws.com", + "us-iso-east-1": "redshift.us-iso-east-1.c2s.ic.gov", + "us-iso-west-1": "redshift.us-iso-west-1.c2s.ic.gov", + "us-isob-east-1": "redshift.us-isob-east-1.sc2s.sgov.gov" ]} @@ -85,9 +88,6 @@ public struct Redshift: AWSService { "ca-west-1": "redshift-fips.ca-west-1.amazonaws.com", "us-east-1": "redshift-fips.us-east-1.amazonaws.com", "us-east-2": "redshift-fips.us-east-2.amazonaws.com", - "us-iso-east-1": "redshift-fips.us-iso-east-1.c2s.ic.gov", - "us-iso-west-1": "redshift-fips.us-iso-west-1.c2s.ic.gov", - "us-isob-east-1": "redshift-fips.us-isob-east-1.sc2s.sgov.gov", "us-west-1": "redshift-fips.us-west-1.amazonaws.com", "us-west-2": "redshift-fips.us-west-2.amazonaws.com" ]) diff --git a/Sources/Soto/Services/SSO/SSO_api.swift b/Sources/Soto/Services/SSO/SSO_api.swift index 12e947d662..c3380491ed 100644 --- a/Sources/Soto/Services/SSO/SSO_api.swift +++ b/Sources/Soto/Services/SSO/SSO_api.swift @@ -84,6 +84,7 @@ public struct SSO: AWSService { "ap-southeast-3": "portal.sso.ap-southeast-3.amazonaws.com", "ap-southeast-4": "portal.sso.ap-southeast-4.amazonaws.com", "ca-central-1": "portal.sso.ca-central-1.amazonaws.com", + "ca-west-1": "portal.sso.ca-west-1.amazonaws.com", "cn-north-1": "portal.sso.cn-north-1.amazonaws.com.cn", "cn-northwest-1": "portal.sso.cn-northwest-1.amazonaws.com.cn", "eu-central-1": "portal.sso.eu-central-1.amazonaws.com", diff --git a/Sources/Soto/Services/SageMaker/SageMaker_api.swift b/Sources/Soto/Services/SageMaker/SageMaker_api.swift index 18af345111..dbd1ec69f1 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_api.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_api.swift @@ -737,6 +737,19 @@ public struct SageMaker: AWSService { ) } + /// Creates a job that optimizes a model for inference performance. To create the job, you provide the location of a source model, and you provide the settings for the optimization techniques that you want the job to apply. When the job completes successfully, SageMaker uploads the new optimized model to the output destination that you specify. For more information about how to use this action, and about the supported optimization techniques, see Optimize model inference with Amazon SageMaker. + @Sendable + public func createOptimizationJob(_ input: CreateOptimizationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateOptimizationJobResponse { + return try await self.client.execute( + operation: "CreateOptimizationJob", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a pipeline using a JSON pipeline definition. @Sendable public func createPipeline(_ input: CreatePipelineRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePipelineResponse { @@ -750,7 +763,7 @@ public struct SageMaker: AWSService { ) } - /// Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. + /// Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. @Sendable public func createPresignedDomainUrl(_ input: CreatePresignedDomainUrlRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePresignedDomainUrlResponse { return try await self.client.execute( @@ -1466,6 +1479,19 @@ public struct SageMaker: AWSService { ) } + /// Deletes an optimization job. + @Sendable + public func deleteOptimizationJob(_ input: DeleteOptimizationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteOptimizationJob", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes a pipeline if there are no running instances of the pipeline. To delete a pipeline, you must stop all running instances of the pipeline using the StopPipelineExecution API. When you delete a pipeline, all instances of the pipeline are deleted. @Sendable public func deletePipeline(_ input: DeletePipelineRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeletePipelineResponse { @@ -2220,6 +2246,19 @@ public struct SageMaker: AWSService { ) } + /// Provides the properties of the specified optimization job. + @Sendable + public func describeOptimizationJob(_ input: DescribeOptimizationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeOptimizationJobResponse { + return try await self.client.execute( + operation: "DescribeOptimizationJob", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Describes the details of a pipeline. @Sendable public func describePipeline(_ input: DescribePipelineRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribePipelineResponse { @@ -3273,6 +3312,19 @@ public struct SageMaker: AWSService { ) } + /// Lists the optimization jobs in your account and their properties. + @Sendable + public func listOptimizationJobs(_ input: ListOptimizationJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListOptimizationJobsResponse { + return try await self.client.execute( + operation: "ListOptimizationJobs", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets a list of PipeLineExecutionStep objects. @Sendable public func listPipelineExecutionSteps(_ input: ListPipelineExecutionStepsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListPipelineExecutionStepsResponse { @@ -3858,6 +3910,19 @@ public struct SageMaker: AWSService { ) } + /// Ends a running inference optimization job. + @Sendable + public func stopOptimizationJob(_ input: StopOptimizationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "StopOptimizationJob", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Stops a pipeline execution. Callback Step A pipeline execution won't stop while a callback step is running. When you call StopPipelineExecution on a pipeline execution with a running callback step, SageMaker Pipelines sends an additional Amazon SQS message to the specified SQS queue. The body of the SQS message contains a "Status" field which is set to "Stopping". You should add logic to your Amazon SQS message consumer to take any needed action (for example, resource cleanup) upon receipt of the message followed by a call to SendPipelineExecutionStepSuccess or SendPipelineExecutionStepFailure. Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution. Lambda Step A pipeline execution can't be stopped while a lambda step is running because the Lambda function invoked by the lambda step can't be stopped. If you attempt to stop the execution while the Lambda function is running, the pipeline waits for the Lambda function to finish or until the timeout is hit, whichever occurs first, and then stops. If the Lambda function finishes, the pipeline execution status is Stopped. If the timeout is hit the pipeline execution status is Failed. @Sendable public func stopPipelineExecution(_ input: StopPipelineExecutionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopPipelineExecutionResponse { @@ -5387,6 +5452,25 @@ extension SageMaker { ) } + /// Lists the optimization jobs in your account and their properties. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listOptimizationJobsPaginator( + _ input: ListOptimizationJobsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listOptimizationJobs, + inputKey: \ListOptimizationJobsRequest.nextToken, + outputKey: \ListOptimizationJobsResponse.nextToken, + logger: logger + ) + } + /// Gets a list of PipeLineExecutionStep objects. /// Return PaginatorSequence for operation. /// @@ -6589,6 +6673,24 @@ extension SageMaker.ListNotebookInstancesInput: AWSPaginateToken { } } +extension SageMaker.ListOptimizationJobsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> SageMaker.ListOptimizationJobsRequest { + return .init( + creationTimeAfter: self.creationTimeAfter, + creationTimeBefore: self.creationTimeBefore, + lastModifiedTimeAfter: self.lastModifiedTimeAfter, + lastModifiedTimeBefore: self.lastModifiedTimeBefore, + maxResults: self.maxResults, + nameContains: self.nameContains, + nextToken: token, + optimizationContains: self.optimizationContains, + sortBy: self.sortBy, + sortOrder: self.sortOrder, + statusEquals: self.statusEquals + ) + } +} + extension SageMaker.ListPipelineExecutionStepsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> SageMaker.ListPipelineExecutionStepsRequest { return .init( diff --git a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift index 5b678d4a95..d7a27d3d82 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift @@ -1443,6 +1443,13 @@ extension SageMaker { public var description: String { return self.rawValue } } + public enum ListOptimizationJobsSortBy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case creationTime = "CreationTime" + case name = "Name" + case status = "Status" + public var description: String { return self.rawValue } + } + public enum ListWorkforcesSortByOptions: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case createDate = "CreateDate" case name = "Name" @@ -1760,6 +1767,46 @@ extension SageMaker { public var description: String { return self.rawValue } } + public enum OptimizationJobDeploymentInstanceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case mlG512Xlarge = "ml.g5.12xlarge" + case mlG516Xlarge = "ml.g5.16xlarge" + case mlG524Xlarge = "ml.g5.24xlarge" + case mlG52Xlarge = "ml.g5.2xlarge" + case mlG548Xlarge = "ml.g5.48xlarge" + case mlG54Xlarge = "ml.g5.4xlarge" + case mlG58Xlarge = "ml.g5.8xlarge" + case mlG5Xlarge = "ml.g5.xlarge" + case mlG612Xlarge = "ml.g6.12xlarge" + case mlG616Xlarge = "ml.g6.16xlarge" + case mlG624Xlarge = "ml.g6.24xlarge" + case mlG62Xlarge = "ml.g6.2xlarge" + case mlG648Xlarge = "ml.g6.48xlarge" + case mlG64Xlarge = "ml.g6.4xlarge" + case mlG68Xlarge = "ml.g6.8xlarge" + case mlG6Xlarge = "ml.g6.xlarge" + case mlInf224Xlarge = "ml.inf2.24xlarge" + case mlInf248Xlarge = "ml.inf2.48xlarge" + case mlInf28Xlarge = "ml.inf2.8xlarge" + case mlInf2Xlarge = "ml.inf2.xlarge" + case mlP4D24Xlarge = "ml.p4d.24xlarge" + case mlP4De24Xlarge = "ml.p4de.24xlarge" + case mlP548Xlarge = "ml.p5.48xlarge" + case mlTrn12Xlarge = "ml.trn1.2xlarge" + case mlTrn132Xlarge = "ml.trn1.32xlarge" + case mlTrn1N32Xlarge = "ml.trn1n.32xlarge" + public var description: String { return self.rawValue } + } + + public enum OptimizationJobStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case completed = "COMPLETED" + case failed = "FAILED" + case inprogress = "INPROGRESS" + case starting = "STARTING" + case stopped = "STOPPED" + case stopping = "STOPPING" + public var description: String { return self.rawValue } + } + public enum OrderKey: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case ascending = "Ascending" case descending = "Descending" @@ -1818,6 +1865,14 @@ extension SageMaker { case mlG4Dn4Xlarge = "ml.g4dn.4xlarge" case mlG4Dn8Xlarge = "ml.g4dn.8xlarge" case mlG4DnXlarge = "ml.g4dn.xlarge" + case mlG512Xlarge = "ml.g5.12xlarge" + case mlG516Xlarge = "ml.g5.16xlarge" + case mlG524Xlarge = "ml.g5.24xlarge" + case mlG52Xlarge = "ml.g5.2xlarge" + case mlG548Xlarge = "ml.g5.48xlarge" + case mlG54Xlarge = "ml.g5.4xlarge" + case mlG58Xlarge = "ml.g5.8xlarge" + case mlG5Xlarge = "ml.g5.xlarge" case mlM410Xlarge = "ml.m4.10xlarge" case mlM416Xlarge = "ml.m4.16xlarge" case mlM42Xlarge = "ml.m4.2xlarge" @@ -1841,6 +1896,14 @@ extension SageMaker { case mlR52Xlarge = "ml.r5.2xlarge" case mlR54Xlarge = "ml.r5.4xlarge" case mlR58Xlarge = "ml.r5.8xlarge" + case mlR5D12Xlarge = "ml.r5d.12xlarge" + case mlR5D16Xlarge = "ml.r5d.16xlarge" + case mlR5D24Xlarge = "ml.r5d.24xlarge" + case mlR5D2Xlarge = "ml.r5d.2xlarge" + case mlR5D4Xlarge = "ml.r5d.4xlarge" + case mlR5D8Xlarge = "ml.r5d.8xlarge" + case mlR5DLarge = "ml.r5d.large" + case mlR5DXlarge = "ml.r5d.xlarge" case mlR5Large = "ml.r5.large" case mlR5Xlarge = "ml.r5.xlarge" case mlT32Xlarge = "ml.t3.2xlarge" @@ -2703,6 +2766,26 @@ extension SageMaker { case mlP4D24Xlarge = "ml.p4d.24xlarge" case mlP4De24Xlarge = "ml.p4de.24xlarge" case mlP548Xlarge = "ml.p5.48xlarge" + case mlR512Xlarge = "ml.r5.12xlarge" + case mlR516Xlarge = "ml.r5.16xlarge" + case mlR524Xlarge = "ml.r5.24xlarge" + case mlR52Xlarge = "ml.r5.2xlarge" + case mlR54Xlarge = "ml.r5.4xlarge" + case mlR58Xlarge = "ml.r5.8xlarge" + case mlR5D12Xlarge = "ml.r5d.12xlarge" + case mlR5D16Xlarge = "ml.r5d.16xlarge" + case mlR5D24Xlarge = "ml.r5d.24xlarge" + case mlR5D2Xlarge = "ml.r5d.2xlarge" + case mlR5D4Xlarge = "ml.r5d.4xlarge" + case mlR5D8Xlarge = "ml.r5d.8xlarge" + case mlR5DLarge = "ml.r5d.large" + case mlR5DXlarge = "ml.r5d.xlarge" + case mlR5Large = "ml.r5.large" + case mlR5Xlarge = "ml.r5.xlarge" + case mlT32Xlarge = "ml.t3.2xlarge" + case mlT3Large = "ml.t3.large" + case mlT3Medium = "ml.t3.medium" + case mlT3Xlarge = "ml.t3.xlarge" case mlTrn12Xlarge = "ml.trn1.2xlarge" case mlTrn132Xlarge = "ml.trn1.32xlarge" case mlTrn1N32Xlarge = "ml.trn1n.32xlarge" @@ -3063,6 +3146,56 @@ extension SageMaker { } } + public enum OptimizationConfig: AWSEncodableShape & AWSDecodableShape, Sendable { + /// Settings for the model compilation technique that's applied by a model optimization job. + case modelCompilationConfig(ModelCompilationConfig) + /// Settings for the model quantization technique that's applied by a model optimization job. + case modelQuantizationConfig(ModelQuantizationConfig) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .modelCompilationConfig: + let value = try container.decode(ModelCompilationConfig.self, forKey: .modelCompilationConfig) + self = .modelCompilationConfig(value) + case .modelQuantizationConfig: + let value = try container.decode(ModelQuantizationConfig.self, forKey: .modelQuantizationConfig) + self = .modelQuantizationConfig(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .modelCompilationConfig(let value): + try container.encode(value, forKey: .modelCompilationConfig) + case .modelQuantizationConfig(let value): + try container.encode(value, forKey: .modelQuantizationConfig) + } + } + + public func validate(name: String) throws { + switch self { + case .modelCompilationConfig(let value): + try value.validate(name: "\(name).modelCompilationConfig") + case .modelQuantizationConfig(let value): + try value.validate(name: "\(name).modelQuantizationConfig") + } + } + + private enum CodingKeys: String, CodingKey { + case modelCompilationConfig = "ModelCompilationConfig" + case modelQuantizationConfig = "ModelQuantizationConfig" + } + } + public enum TrialComponentParameterValue: AWSEncodableShape & AWSDecodableShape, Sendable { /// The numeric value of a numeric hyperparameter. If you specify a value for this parameter, you can't specify the StringValue parameter. case numberValue(Double) @@ -3325,6 +3458,29 @@ extension SageMaker { } } + public struct AdditionalModelDataSource: AWSEncodableShape & AWSDecodableShape { + /// A custom name for this AdditionalModelDataSource object. + public let channelName: String? + public let s3DataSource: S3ModelDataSource? + + public init(channelName: String? = nil, s3DataSource: S3ModelDataSource? = nil) { + self.channelName = channelName + self.s3DataSource = s3DataSource + } + + public func validate(name: String) throws { + try self.validate(self.channelName, name: "channelName", parent: name, max: 64) + try self.validate(self.channelName, name: "channelName", parent: name, min: 1) + try self.validate(self.channelName, name: "channelName", parent: name, pattern: "^[A-Za-z0-9\\.\\-_]+$") + try self.s3DataSource?.validate(name: "\(name).s3DataSource") + } + + private enum CodingKeys: String, CodingKey { + case channelName = "ChannelName" + case s3DataSource = "S3DataSource" + } + } + public struct AdditionalS3DataSource: AWSEncodableShape & AWSDecodableShape { /// The type of compression used for an additional data source used in inference or training. Specify None if your additional data source is not compressed. public let compressionType: CompressionType? @@ -3576,6 +3732,27 @@ extension SageMaker { } } + public struct AmazonQSettings: AWSEncodableShape & AWSDecodableShape { + /// The ARN of the Amazon Q profile used within the domain. + public let qProfileArn: String? + /// Whether Amazon Q has been enabled within the domain. + public let status: FeatureStatus? + + public init(qProfileArn: String? = nil, status: FeatureStatus? = nil) { + self.qProfileArn = qProfileArn + self.status = status + } + + public func validate(name: String) throws { + try self.validate(self.qProfileArn, name: "qProfileArn", parent: name, pattern: "^arn:[-.a-z0-9]{1,63}:codewhisperer:([-.a-z0-9]{0,63}:){2}([a-zA-Z0-9-_:/]){1,1023}$") + } + + private enum CodingKeys: String, CodingKey { + case qProfileArn = "QProfileArn" + case status = "Status" + } + } + public struct AnnotationConsolidationConfig: AWSEncodableShape & AWSDecodableShape { /// The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data. This parameter is required for all labeling jobs. For built-in task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for AnnotationConsolidationLambdaArn. For custom labeling workflows, see Post-annotation Lambda. Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes. arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassMultiLabel arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassMultiLabel arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassMultiLabel arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassMultiLabel arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassMultiLabel arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassMultiLabel arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassMultiLabel arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassMultiLabel arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassMultiLabel arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassMultiLabel arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassMultiLabel arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClassMultiLabel Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as "votes" for the correct label. arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassMultiLabel arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassMultiLabel arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassMultiLabel arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassMultiLabel arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassMultiLabel arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassMultiLabel arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassMultiLabel arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassMultiLabel arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassMultiLabel arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassMultiLabel arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassMultiLabel arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClassMultiLabel Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label. arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition Video Classification - Use this task type when you need workers to classify videos using predefined labels that you specify. Workers are shown videos and are asked to choose one label for each video. arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoMultiClass arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoMultiClass arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoMultiClass arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoMultiClass arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoMultiClass arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoMultiClass arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoMultiClass arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoMultiClass arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoMultiClass arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoMultiClass Video Frame Object Detection - Use this task type to have workers identify and locate objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to identify and localize various objects in a series of video frames, such as cars, bikes, and pedestrians. arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectDetection arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectDetection arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectDetection arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectDetection arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectDetection arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectDetection arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectDetection arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectDetection arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectDetection arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectDetection arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectDetection arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectDetection Video Frame Object Tracking - Use this task type to have workers track the movement of objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to track the movement of objects, such as cars, bikes, and pedestrians. arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectTracking arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectTracking arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectTracking arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectTracking arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectTracking arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectTracking arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectTracking arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectTracking arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectTracking arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectTracking arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectTracking arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectTracking 3D Point Cloud Object Detection - Use this task type when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For example, you can use this task type to ask workers to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians. arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectDetection arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectDetection arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectDetection arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectDetection arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectDetection arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectDetection arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectDetection arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectDetection arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectDetection arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectDetection arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectDetection arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectDetection 3D Point Cloud Object Tracking - Use this task type when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames. For example, you can use this task type to ask workers to track the movement of vehicles across multiple point cloud frames. arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectTracking arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectTracking arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectTracking arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectTracking arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectTracking arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectTracking arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectTracking arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectTracking arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectTracking arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectTracking arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectTracking arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectTracking 3D Point Cloud Semantic Segmentation - Use this task type when you want workers to create a point-level semantic segmentation masks by painting objects in a 3D point cloud using different colors where each color is assigned to one of the classes you specify. arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudSemanticSegmentation Use the following ARNs for Label Verification and Adjustment Jobs Use label verification and adjustment jobs to review and adjust labels. To learn more, see Verify and Adjust Labels . Semantic Segmentation Adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as "votes" for the correct label. arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentation arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentation arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentation arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentation arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentation arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentation arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentation arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentation arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentation arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentation arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentation arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation Semantic Segmentation Verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentation arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationSemanticSegmentation arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationSemanticSegmentation arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationSemanticSegmentation arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationSemanticSegmentation arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationSemanticSegmentation arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationSemanticSegmentation arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationSemanticSegmentation arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationSemanticSegmentation arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationSemanticSegmentation arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationSemanticSegmentation arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationSemanticSegmentation Bounding Box Adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations. arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBox arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentBoundingBox arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentBoundingBox arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentBoundingBox arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentBoundingBox arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentBoundingBox arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentBoundingBox arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentBoundingBox arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentBoundingBox arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentBoundingBox arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBox arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox Bounding Box Verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBox arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBox arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBox arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBox arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBox arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBox arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBox arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBox arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBox arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBox arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBox arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox Video Frame Object Detection Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to classify and localize objects in a sequence of video frames. arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectDetection arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectDetection arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectDetection arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectDetection arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectDetection arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectDetection arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectDetection arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectDetection arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectDetection arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectDetection arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectDetection arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectDetection Video Frame Object Tracking Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to track object movement across a sequence of video frames. arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectTracking arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectTracking arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectTracking arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectTracking arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectTracking arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectTracking arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectTracking arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectTracking arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectTracking arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectTracking arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectTracking arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectTracking 3D Point Cloud Object Detection Adjustment - Use this task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud. arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectDetection arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectDetection arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectDetection arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectDetection arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectDetection arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectDetection arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectDetection arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectDetection arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectDetection arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectDetection arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectDetection arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectDetection 3D Point Cloud Object Tracking Adjustment - Use this task type when you want workers to adjust 3D cuboids around objects that appear in a sequence of 3D point cloud frames. arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectTracking arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectTracking arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectTracking arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectTracking arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectTracking arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectTracking arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectTracking arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectTracking arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectTracking arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectTracking arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectTracking arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectTracking 3D Point Cloud Semantic Segmentation Adjustment - Use this task type when you want workers to adjust a point-level semantic segmentation masks using a paint tool. arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudSemanticSegmentation arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudSemanticSegmentation arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudSemanticSegmentation arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudSemanticSegmentation arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudSemanticSegmentation arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudSemanticSegmentation arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudSemanticSegmentation arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudSemanticSegmentation arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudSemanticSegmentation arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudSemanticSegmentation arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudSemanticSegmentation arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudSemanticSegmentation public let annotationConsolidationLambdaArn: String? @@ -6082,6 +6259,8 @@ extension SageMaker { } public struct ContainerDefinition: AWSEncodableShape & AWSDecodableShape { + /// Data sources that are available to your model in addition to the one that you specify for ModelDataSource when you use the CreateModel action. + public let additionalModelDataSources: [AdditionalModelDataSource]? /// This parameter is ignored for models that contain only a PrimaryContainer. When a ContainerDefinition is part of an inference pipeline, the value of the parameter uniquely identifies the container for the purposes of logging and metrics. For information, see Use Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a ContainerDefinition that is part of an inference pipeline, a unique name is automatically assigned based on the position of the ContainerDefinition in the pipeline. If you specify a value for the ContainerHostName for any ContainerDefinition that is part of an inference pipeline, you must specify a value for the ContainerHostName parameter of every ContainerDefinition in that pipeline. public let containerHostname: String? /// The environment variables to set in the Docker container. The maximum length of each key and value in the Environment map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a CreateModel request, then the maximum length of all of their maps, combined, is also 32 KB. @@ -6103,7 +6282,8 @@ extension SageMaker { /// Specifies additional configuration for multi-model endpoints. public let multiModelConfig: MultiModelConfig? - public init(containerHostname: String? = nil, environment: [String: String]? = nil, image: String? = nil, imageConfig: ImageConfig? = nil, inferenceSpecificationName: String? = nil, mode: ContainerMode? = nil, modelDataSource: ModelDataSource? = nil, modelDataUrl: String? = nil, modelPackageName: String? = nil, multiModelConfig: MultiModelConfig? = nil) { + public init(additionalModelDataSources: [AdditionalModelDataSource]? = nil, containerHostname: String? = nil, environment: [String: String]? = nil, image: String? = nil, imageConfig: ImageConfig? = nil, inferenceSpecificationName: String? = nil, mode: ContainerMode? = nil, modelDataSource: ModelDataSource? = nil, modelDataUrl: String? = nil, modelPackageName: String? = nil, multiModelConfig: MultiModelConfig? = nil) { + self.additionalModelDataSources = additionalModelDataSources self.containerHostname = containerHostname self.environment = environment self.image = image @@ -6117,6 +6297,10 @@ extension SageMaker { } public func validate(name: String) throws { + try self.additionalModelDataSources?.forEach { + try $0.validate(name: "\(name).additionalModelDataSources[]") + } + try self.validate(self.additionalModelDataSources, name: "additionalModelDataSources", parent: name, max: 5) try self.validate(self.containerHostname, name: "containerHostname", parent: name, max: 63) try self.validate(self.containerHostname, name: "containerHostname", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") try self.environment?.forEach { @@ -6141,6 +6325,7 @@ extension SageMaker { } private enum CodingKeys: String, CodingKey { + case additionalModelDataSources = "AdditionalModelDataSources" case containerHostname = "ContainerHostname" case environment = "Environment" case image = "Image" @@ -9366,6 +9551,94 @@ extension SageMaker { } } + public struct CreateOptimizationJobRequest: AWSEncodableShape { + /// The type of instance that hosts the optimized model that you create with the optimization job. + public let deploymentInstanceType: OptimizationJobDeploymentInstanceType? + /// The location of the source model to optimize with an optimization job. + public let modelSource: OptimizationJobModelSource? + /// Settings for each of the optimization techniques that the job applies. + public let optimizationConfigs: [OptimizationConfig]? + /// The environment variables to set in the model container. + public let optimizationEnvironment: [String: String]? + /// A custom name for the new optimization job. + public let optimizationJobName: String? + /// Details for where to store the optimized model that you create with the optimization job. + public let outputConfig: OptimizationJobOutputConfig? + /// The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. During model optimization, Amazon SageMaker needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles. + public let roleArn: String? + public let stoppingCondition: StoppingCondition? + /// A list of key-value pairs associated with the optimization job. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide. + public let tags: [Tag]? + /// A VPC in Amazon VPC that your optimized model has access to. + public let vpcConfig: OptimizationVpcConfig? + + public init(deploymentInstanceType: OptimizationJobDeploymentInstanceType? = nil, modelSource: OptimizationJobModelSource? = nil, optimizationConfigs: [OptimizationConfig]? = nil, optimizationEnvironment: [String: String]? = nil, optimizationJobName: String? = nil, outputConfig: OptimizationJobOutputConfig? = nil, roleArn: String? = nil, stoppingCondition: StoppingCondition? = nil, tags: [Tag]? = nil, vpcConfig: OptimizationVpcConfig? = nil) { + self.deploymentInstanceType = deploymentInstanceType + self.modelSource = modelSource + self.optimizationConfigs = optimizationConfigs + self.optimizationEnvironment = optimizationEnvironment + self.optimizationJobName = optimizationJobName + self.outputConfig = outputConfig + self.roleArn = roleArn + self.stoppingCondition = stoppingCondition + self.tags = tags + self.vpcConfig = vpcConfig + } + + public func validate(name: String) throws { + try self.modelSource?.validate(name: "\(name).modelSource") + try self.optimizationConfigs?.forEach { + try $0.validate(name: "\(name).optimizationConfigs[]") + } + try self.validate(self.optimizationConfigs, name: "optimizationConfigs", parent: name, max: 10) + try self.optimizationEnvironment?.forEach { + try validate($0.key, name: "optimizationEnvironment.key", parent: name, max: 256) + try validate($0.key, name: "optimizationEnvironment.key", parent: name, pattern: "^(?!\\s*$).+$") + try validate($0.value, name: "optimizationEnvironment[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.optimizationEnvironment, name: "optimizationEnvironment", parent: name, max: 25) + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, max: 63) + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, min: 1) + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.outputConfig?.validate(name: "\(name).outputConfig") + try self.validate(self.roleArn, name: "roleArn", parent: name, max: 2048) + try self.validate(self.roleArn, name: "roleArn", parent: name, min: 20) + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + try self.stoppingCondition?.validate(name: "\(name).stoppingCondition") + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 50) + try self.vpcConfig?.validate(name: "\(name).vpcConfig") + } + + private enum CodingKeys: String, CodingKey { + case deploymentInstanceType = "DeploymentInstanceType" + case modelSource = "ModelSource" + case optimizationConfigs = "OptimizationConfigs" + case optimizationEnvironment = "OptimizationEnvironment" + case optimizationJobName = "OptimizationJobName" + case outputConfig = "OutputConfig" + case roleArn = "RoleArn" + case stoppingCondition = "StoppingCondition" + case tags = "Tags" + case vpcConfig = "VpcConfig" + } + } + + public struct CreateOptimizationJobResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the optimization job. + public let optimizationJobArn: String? + + public init(optimizationJobArn: String? = nil) { + self.optimizationJobArn = optimizationJobArn + } + + private enum CodingKeys: String, CodingKey { + case optimizationJobArn = "OptimizationJobArn" + } + } + public struct CreatePipelineRequest: AWSEncodableShape { /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time. public let clientRequestToken: String? @@ -10284,9 +10557,9 @@ extension SageMaker { public struct CreateUserProfileRequest: AWSEncodableShape { /// The ID of the associated Domain. public let domainId: String? - /// A specifier for the type of value specified in SingleSignOnUserValue. Currently, the only supported value is "UserName". If the Domain's AuthMode is IAM Identity Center, this field is required. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified. + /// A specifier for the type of value specified in SingleSignOnUserValue. Currently, the only supported value is "UserName". If the Domain's AuthMode is IAM Identity Center, this field is required. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified. public let singleSignOnUserIdentifier: String? - /// The username of the associated Amazon Web Services Single Sign-On User for this UserProfile. If the Domain's AuthMode is IAM Identity Center, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified. + /// The username of the associated Amazon Web Services Single Sign-On User for this UserProfile. If the Domain's AuthMode is IAM Identity Center, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified. public let singleSignOnUserValue: String? /// Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags that you specify for the User Profile are also added to all Apps that the User Profile launches. public let tags: [Tag]? @@ -12041,6 +12314,25 @@ extension SageMaker { } } + public struct DeleteOptimizationJobRequest: AWSEncodableShape { + /// The name that you assigned to the optimization job. + public let optimizationJobName: String? + + public init(optimizationJobName: String? = nil) { + self.optimizationJobName = optimizationJobName + } + + public func validate(name: String) throws { + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, max: 63) + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, min: 1) + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + } + + private enum CodingKeys: String, CodingKey { + case optimizationJobName = "OptimizationJobName" + } + } + public struct DeletePipelineRequest: AWSEncodableShape { /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time. public let clientRequestToken: String? @@ -16197,6 +16489,101 @@ extension SageMaker { } } + public struct DescribeOptimizationJobRequest: AWSEncodableShape { + /// The name that you assigned to the optimization job. + public let optimizationJobName: String? + + public init(optimizationJobName: String? = nil) { + self.optimizationJobName = optimizationJobName + } + + public func validate(name: String) throws { + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, max: 63) + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, min: 1) + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + } + + private enum CodingKeys: String, CodingKey { + case optimizationJobName = "OptimizationJobName" + } + } + + public struct DescribeOptimizationJobResponse: AWSDecodableShape { + /// The time when you created the optimization job. + public let creationTime: Date? + /// The type of instance that hosts the optimized model that you create with the optimization job. + public let deploymentInstanceType: OptimizationJobDeploymentInstanceType? + /// If the optimization job status is FAILED, the reason for the failure. + public let failureReason: String? + /// The time when the optimization job was last updated. + public let lastModifiedTime: Date? + /// The location of the source model to optimize with an optimization job. + public let modelSource: OptimizationJobModelSource? + /// Settings for each of the optimization techniques that the job applies. + public let optimizationConfigs: [OptimizationConfig]? + /// The time when the optimization job finished processing. + public let optimizationEndTime: Date? + /// The environment variables to set in the model container. + public let optimizationEnvironment: [String: String]? + /// The Amazon Resource Name (ARN) of the optimization job. + public let optimizationJobArn: String? + /// The name that you assigned to the optimization job. + public let optimizationJobName: String? + /// The current status of the optimization job. + public let optimizationJobStatus: OptimizationJobStatus? + /// Output values produced by an optimization job. + public let optimizationOutput: OptimizationOutput? + /// The time when the optimization job started. + public let optimizationStartTime: Date? + /// Details for where to store the optimized model that you create with the optimization job. + public let outputConfig: OptimizationJobOutputConfig? + /// The ARN of the IAM role that you assigned to the optimization job. + public let roleArn: String? + public let stoppingCondition: StoppingCondition? + /// A VPC in Amazon VPC that your optimized model has access to. + public let vpcConfig: OptimizationVpcConfig? + + public init(creationTime: Date? = nil, deploymentInstanceType: OptimizationJobDeploymentInstanceType? = nil, failureReason: String? = nil, lastModifiedTime: Date? = nil, modelSource: OptimizationJobModelSource? = nil, optimizationConfigs: [OptimizationConfig]? = nil, optimizationEndTime: Date? = nil, optimizationEnvironment: [String: String]? = nil, optimizationJobArn: String? = nil, optimizationJobName: String? = nil, optimizationJobStatus: OptimizationJobStatus? = nil, optimizationOutput: OptimizationOutput? = nil, optimizationStartTime: Date? = nil, outputConfig: OptimizationJobOutputConfig? = nil, roleArn: String? = nil, stoppingCondition: StoppingCondition? = nil, vpcConfig: OptimizationVpcConfig? = nil) { + self.creationTime = creationTime + self.deploymentInstanceType = deploymentInstanceType + self.failureReason = failureReason + self.lastModifiedTime = lastModifiedTime + self.modelSource = modelSource + self.optimizationConfigs = optimizationConfigs + self.optimizationEndTime = optimizationEndTime + self.optimizationEnvironment = optimizationEnvironment + self.optimizationJobArn = optimizationJobArn + self.optimizationJobName = optimizationJobName + self.optimizationJobStatus = optimizationJobStatus + self.optimizationOutput = optimizationOutput + self.optimizationStartTime = optimizationStartTime + self.outputConfig = outputConfig + self.roleArn = roleArn + self.stoppingCondition = stoppingCondition + self.vpcConfig = vpcConfig + } + + private enum CodingKeys: String, CodingKey { + case creationTime = "CreationTime" + case deploymentInstanceType = "DeploymentInstanceType" + case failureReason = "FailureReason" + case lastModifiedTime = "LastModifiedTime" + case modelSource = "ModelSource" + case optimizationConfigs = "OptimizationConfigs" + case optimizationEndTime = "OptimizationEndTime" + case optimizationEnvironment = "OptimizationEnvironment" + case optimizationJobArn = "OptimizationJobArn" + case optimizationJobName = "OptimizationJobName" + case optimizationJobStatus = "OptimizationJobStatus" + case optimizationOutput = "OptimizationOutput" + case optimizationStartTime = "OptimizationStartTime" + case outputConfig = "OutputConfig" + case roleArn = "RoleArn" + case stoppingCondition = "StoppingCondition" + case vpcConfig = "VpcConfig" + } + } + public struct DescribePipelineDefinitionForExecutionRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the pipeline execution. public let pipelineExecutionArn: String? @@ -17717,6 +18104,8 @@ extension SageMaker { } public struct DomainSettings: AWSEncodableShape & AWSDecodableShape { + /// A collection of settings that configure the Amazon Q experience within the domain. The AuthMode that you use to create the domain must be SSO. + public let amazonQSettings: AmazonQSettings? /// A collection of settings that configure the domain's Docker interaction. public let dockerSettings: DockerSettings? /// The configuration for attaching a SageMaker user profile name to the execution role as a sts:SourceIdentity key. @@ -17726,7 +18115,8 @@ extension SageMaker { /// The security groups for the Amazon Virtual Private Cloud that the Domain uses for communication between Domain-level apps and user apps. public let securityGroupIds: [String]? - public init(dockerSettings: DockerSettings? = nil, executionRoleIdentityConfig: ExecutionRoleIdentityConfig? = nil, rStudioServerProDomainSettings: RStudioServerProDomainSettings? = nil, securityGroupIds: [String]? = nil) { + public init(amazonQSettings: AmazonQSettings? = nil, dockerSettings: DockerSettings? = nil, executionRoleIdentityConfig: ExecutionRoleIdentityConfig? = nil, rStudioServerProDomainSettings: RStudioServerProDomainSettings? = nil, securityGroupIds: [String]? = nil) { + self.amazonQSettings = amazonQSettings self.dockerSettings = dockerSettings self.executionRoleIdentityConfig = executionRoleIdentityConfig self.rStudioServerProDomainSettings = rStudioServerProDomainSettings @@ -17734,6 +18124,7 @@ extension SageMaker { } public func validate(name: String) throws { + try self.amazonQSettings?.validate(name: "\(name).amazonQSettings") try self.dockerSettings?.validate(name: "\(name).dockerSettings") try self.rStudioServerProDomainSettings?.validate(name: "\(name).rStudioServerProDomainSettings") try self.securityGroupIds?.forEach { @@ -17744,6 +18135,7 @@ extension SageMaker { } private enum CodingKeys: String, CodingKey { + case amazonQSettings = "AmazonQSettings" case dockerSettings = "DockerSettings" case executionRoleIdentityConfig = "ExecutionRoleIdentityConfig" case rStudioServerProDomainSettings = "RStudioServerProDomainSettings" @@ -17752,6 +18144,8 @@ extension SageMaker { } public struct DomainSettingsForUpdate: AWSEncodableShape { + /// A collection of settings that configure the Amazon Q experience within the domain. + public let amazonQSettings: AmazonQSettings? /// A collection of settings that configure the domain's Docker interaction. public let dockerSettings: DockerSettings? /// The configuration for attaching a SageMaker user profile name to the execution role as a sts:SourceIdentity key. This configuration can only be modified if there are no apps in the InService or Pending state. @@ -17761,7 +18155,8 @@ extension SageMaker { /// The security groups for the Amazon Virtual Private Cloud that the Domain uses for communication between Domain-level apps and user apps. public let securityGroupIds: [String]? - public init(dockerSettings: DockerSettings? = nil, executionRoleIdentityConfig: ExecutionRoleIdentityConfig? = nil, rStudioServerProDomainSettingsForUpdate: RStudioServerProDomainSettingsForUpdate? = nil, securityGroupIds: [String]? = nil) { + public init(amazonQSettings: AmazonQSettings? = nil, dockerSettings: DockerSettings? = nil, executionRoleIdentityConfig: ExecutionRoleIdentityConfig? = nil, rStudioServerProDomainSettingsForUpdate: RStudioServerProDomainSettingsForUpdate? = nil, securityGroupIds: [String]? = nil) { + self.amazonQSettings = amazonQSettings self.dockerSettings = dockerSettings self.executionRoleIdentityConfig = executionRoleIdentityConfig self.rStudioServerProDomainSettingsForUpdate = rStudioServerProDomainSettingsForUpdate @@ -17769,6 +18164,7 @@ extension SageMaker { } public func validate(name: String) throws { + try self.amazonQSettings?.validate(name: "\(name).amazonQSettings") try self.dockerSettings?.validate(name: "\(name).dockerSettings") try self.rStudioServerProDomainSettingsForUpdate?.validate(name: "\(name).rStudioServerProDomainSettingsForUpdate") try self.securityGroupIds?.forEach { @@ -17779,6 +18175,7 @@ extension SageMaker { } private enum CodingKeys: String, CodingKey { + case amazonQSettings = "AmazonQSettings" case dockerSettings = "DockerSettings" case executionRoleIdentityConfig = "ExecutionRoleIdentityConfig" case rStudioServerProDomainSettingsForUpdate = "RStudioServerProDomainSettingsForUpdate" @@ -26123,6 +26520,87 @@ extension SageMaker { } } + public struct ListOptimizationJobsRequest: AWSEncodableShape { + /// Filters the results to only those optimization jobs that were created after the specified time. + public let creationTimeAfter: Date? + /// Filters the results to only those optimization jobs that were created before the specified time. + public let creationTimeBefore: Date? + /// Filters the results to only those optimization jobs that were updated after the specified time. + public let lastModifiedTimeAfter: Date? + /// Filters the results to only those optimization jobs that were updated before the specified time. + public let lastModifiedTimeBefore: Date? + /// The maximum number of optimization jobs to return in the response. The default is 50. + public let maxResults: Int? + /// Filters the results to only those optimization jobs with a name that contains the specified string. + public let nameContains: String? + /// A token that you use to get the next set of results following a truncated response. If the response to the previous request was truncated, that response provides the value for this token. + public let nextToken: String? + /// Filters the results to only those optimization jobs that apply the specified optimization techniques. You can specify either Quantization or Compilation. + public let optimizationContains: String? + /// The field by which to sort the optimization jobs in the response. The default is CreationTime + public let sortBy: ListOptimizationJobsSortBy? + /// The sort order for results. The default is Ascending + public let sortOrder: SortOrder? + /// Filters the results to only those optimization jobs with the specified status. + public let statusEquals: OptimizationJobStatus? + + public init(creationTimeAfter: Date? = nil, creationTimeBefore: Date? = nil, lastModifiedTimeAfter: Date? = nil, lastModifiedTimeBefore: Date? = nil, maxResults: Int? = nil, nameContains: String? = nil, nextToken: String? = nil, optimizationContains: String? = nil, sortBy: ListOptimizationJobsSortBy? = nil, sortOrder: SortOrder? = nil, statusEquals: OptimizationJobStatus? = nil) { + self.creationTimeAfter = creationTimeAfter + self.creationTimeBefore = creationTimeBefore + self.lastModifiedTimeAfter = lastModifiedTimeAfter + self.lastModifiedTimeBefore = lastModifiedTimeBefore + self.maxResults = maxResults + self.nameContains = nameContains + self.nextToken = nextToken + self.optimizationContains = optimizationContains + self.sortBy = sortBy + self.sortOrder = sortOrder + self.statusEquals = statusEquals + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nameContains, name: "nameContains", parent: name, max: 63) + try self.validate(self.nameContains, name: "nameContains", parent: name, pattern: "^[a-zA-Z0-9\\-]+$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: ".*") + try self.validate(self.optimizationContains, name: "optimizationContains", parent: name, max: 63) + try self.validate(self.optimizationContains, name: "optimizationContains", parent: name, pattern: "^[a-zA-Z0-9\\-]+$") + } + + private enum CodingKeys: String, CodingKey { + case creationTimeAfter = "CreationTimeAfter" + case creationTimeBefore = "CreationTimeBefore" + case lastModifiedTimeAfter = "LastModifiedTimeAfter" + case lastModifiedTimeBefore = "LastModifiedTimeBefore" + case maxResults = "MaxResults" + case nameContains = "NameContains" + case nextToken = "NextToken" + case optimizationContains = "OptimizationContains" + case sortBy = "SortBy" + case sortOrder = "SortOrder" + case statusEquals = "StatusEquals" + } + } + + public struct ListOptimizationJobsResponse: AWSDecodableShape { + /// The token to use in a subsequent request to get the next set of results following a truncated response. + public let nextToken: String? + /// A list of optimization jobs and their properties that matches any of the filters you specified in the request. + public let optimizationJobSummaries: [OptimizationJobSummary]? + + public init(nextToken: String? = nil, optimizationJobSummaries: [OptimizationJobSummary]? = nil) { + self.nextToken = nextToken + self.optimizationJobSummaries = optimizationJobSummaries + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case optimizationJobSummaries = "OptimizationJobSummaries" + } + } + public struct ListPipelineExecutionStepsRequest: AWSEncodableShape { /// The maximum number of pipeline execution steps to return in the response. public let maxResults: Int? @@ -27926,6 +28404,34 @@ extension SageMaker { } } + public struct ModelCompilationConfig: AWSEncodableShape & AWSDecodableShape { + /// The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization. + public let image: String? + /// Environment variables that override the default ones in the model container. + public let overrideEnvironment: [String: String]? + + public init(image: String? = nil, overrideEnvironment: [String: String]? = nil) { + self.image = image + self.overrideEnvironment = overrideEnvironment + } + + public func validate(name: String) throws { + try self.validate(self.image, name: "image", parent: name, max: 255) + try self.validate(self.image, name: "image", parent: name, pattern: "^[\\S]+$") + try self.overrideEnvironment?.forEach { + try validate($0.key, name: "overrideEnvironment.key", parent: name, max: 256) + try validate($0.key, name: "overrideEnvironment.key", parent: name, pattern: "^(?!\\s*$).+$") + try validate($0.value, name: "overrideEnvironment[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.overrideEnvironment, name: "overrideEnvironment", parent: name, max: 25) + } + + private enum CodingKeys: String, CodingKey { + case image = "Image" + case overrideEnvironment = "OverrideEnvironment" + } + } + public struct ModelConfiguration: AWSDecodableShape { /// The name of the compilation job used to create the recommended model artifacts. public let compilationJobName: String? @@ -29037,6 +29543,34 @@ extension SageMaker { } } + public struct ModelQuantizationConfig: AWSEncodableShape & AWSDecodableShape { + /// The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization. + public let image: String? + /// Environment variables that override the default ones in the model container. + public let overrideEnvironment: [String: String]? + + public init(image: String? = nil, overrideEnvironment: [String: String]? = nil) { + self.image = image + self.overrideEnvironment = overrideEnvironment + } + + public func validate(name: String) throws { + try self.validate(self.image, name: "image", parent: name, max: 255) + try self.validate(self.image, name: "image", parent: name, pattern: "^[\\S]+$") + try self.overrideEnvironment?.forEach { + try validate($0.key, name: "overrideEnvironment.key", parent: name, max: 256) + try validate($0.key, name: "overrideEnvironment.key", parent: name, pattern: "^(?!\\s*$).+$") + try validate($0.value, name: "overrideEnvironment[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.overrideEnvironment, name: "overrideEnvironment", parent: name, max: 25) + } + + private enum CodingKeys: String, CodingKey { + case image = "Image" + case overrideEnvironment = "OverrideEnvironment" + } + } + public struct ModelRegisterSettings: AWSEncodableShape & AWSDecodableShape { /// The Amazon Resource Name (ARN) of the SageMaker model registry account. Required only to register model versions created by a different SageMaker Canvas Amazon Web Services account than the Amazon Web Services account in which SageMaker model registry is set up. public let crossAccountModelRegisterRoleArn: String? @@ -30349,6 +30883,172 @@ extension SageMaker { } } + public struct OptimizationJobModelSource: AWSEncodableShape & AWSDecodableShape { + /// The Amazon S3 location of a source model to optimize with an optimization job. + public let s3: OptimizationJobModelSourceS3? + + public init(s3: OptimizationJobModelSourceS3? = nil) { + self.s3 = s3 + } + + public func validate(name: String) throws { + try self.s3?.validate(name: "\(name).s3") + } + + private enum CodingKeys: String, CodingKey { + case s3 = "S3" + } + } + + public struct OptimizationJobModelSourceS3: AWSEncodableShape & AWSDecodableShape { + /// The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA). + public let modelAccessConfig: OptimizationModelAccessConfig? + /// An Amazon S3 URI that locates a source model to optimize with an optimization job. + public let s3Uri: String? + + public init(modelAccessConfig: OptimizationModelAccessConfig? = nil, s3Uri: String? = nil) { + self.modelAccessConfig = modelAccessConfig + self.s3Uri = s3Uri + } + + public func validate(name: String) throws { + try self.validate(self.s3Uri, name: "s3Uri", parent: name, max: 1024) + try self.validate(self.s3Uri, name: "s3Uri", parent: name, pattern: "^(https|s3)://([^/]+)/?(.*)$") + } + + private enum CodingKeys: String, CodingKey { + case modelAccessConfig = "ModelAccessConfig" + case s3Uri = "S3Uri" + } + } + + public struct OptimizationJobOutputConfig: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of a key in Amazon Web Services KMS. SageMaker uses they key to encrypt the artifacts of the optimized model when SageMaker uploads the model to Amazon S3. + public let kmsKeyId: String? + /// The Amazon S3 URI for where to store the optimized model that you create with an optimization job. + public let s3OutputLocation: String? + + public init(kmsKeyId: String? = nil, s3OutputLocation: String? = nil) { + self.kmsKeyId = kmsKeyId + self.s3OutputLocation = s3OutputLocation + } + + public func validate(name: String) throws { + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2048) + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, pattern: "^[a-zA-Z0-9:/_-]*$") + try self.validate(self.s3OutputLocation, name: "s3OutputLocation", parent: name, max: 1024) + try self.validate(self.s3OutputLocation, name: "s3OutputLocation", parent: name, pattern: "^(https|s3)://([^/]+)/?(.*)$") + } + + private enum CodingKeys: String, CodingKey { + case kmsKeyId = "KmsKeyId" + case s3OutputLocation = "S3OutputLocation" + } + } + + public struct OptimizationJobSummary: AWSDecodableShape { + /// The time when you created the optimization job. + public let creationTime: Date? + /// The type of instance that hosts the optimized model that you create with the optimization job. + public let deploymentInstanceType: OptimizationJobDeploymentInstanceType? + /// The time when the optimization job was last updated. + public let lastModifiedTime: Date? + /// The time when the optimization job finished processing. + public let optimizationEndTime: Date? + /// The Amazon Resource Name (ARN) of the optimization job. + public let optimizationJobArn: String? + /// The name that you assigned to the optimization job. + public let optimizationJobName: String? + /// The current status of the optimization job. + public let optimizationJobStatus: OptimizationJobStatus? + /// The time when the optimization job started. + public let optimizationStartTime: Date? + /// The optimization techniques that are applied by the optimization job. + public let optimizationTypes: [String]? + + public init(creationTime: Date? = nil, deploymentInstanceType: OptimizationJobDeploymentInstanceType? = nil, lastModifiedTime: Date? = nil, optimizationEndTime: Date? = nil, optimizationJobArn: String? = nil, optimizationJobName: String? = nil, optimizationJobStatus: OptimizationJobStatus? = nil, optimizationStartTime: Date? = nil, optimizationTypes: [String]? = nil) { + self.creationTime = creationTime + self.deploymentInstanceType = deploymentInstanceType + self.lastModifiedTime = lastModifiedTime + self.optimizationEndTime = optimizationEndTime + self.optimizationJobArn = optimizationJobArn + self.optimizationJobName = optimizationJobName + self.optimizationJobStatus = optimizationJobStatus + self.optimizationStartTime = optimizationStartTime + self.optimizationTypes = optimizationTypes + } + + private enum CodingKeys: String, CodingKey { + case creationTime = "CreationTime" + case deploymentInstanceType = "DeploymentInstanceType" + case lastModifiedTime = "LastModifiedTime" + case optimizationEndTime = "OptimizationEndTime" + case optimizationJobArn = "OptimizationJobArn" + case optimizationJobName = "OptimizationJobName" + case optimizationJobStatus = "OptimizationJobStatus" + case optimizationStartTime = "OptimizationStartTime" + case optimizationTypes = "OptimizationTypes" + } + } + + public struct OptimizationModelAccessConfig: AWSEncodableShape & AWSDecodableShape { + /// Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as True in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model. + public let acceptEula: Bool? + + public init(acceptEula: Bool? = nil) { + self.acceptEula = acceptEula + } + + private enum CodingKeys: String, CodingKey { + case acceptEula = "AcceptEula" + } + } + + public struct OptimizationOutput: AWSDecodableShape { + /// The image that SageMaker recommends that you use to host the optimized model that you created with an optimization job. + public let recommendedInferenceImage: String? + + public init(recommendedInferenceImage: String? = nil) { + self.recommendedInferenceImage = recommendedInferenceImage + } + + private enum CodingKeys: String, CodingKey { + case recommendedInferenceImage = "RecommendedInferenceImage" + } + } + + public struct OptimizationVpcConfig: AWSEncodableShape & AWSDecodableShape { + /// The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security groups for the VPC that is specified in the Subnets field. + public let securityGroupIds: [String]? + /// The ID of the subnets in the VPC to which you want to connect your optimized model. + public let subnets: [String]? + + public init(securityGroupIds: [String]? = nil, subnets: [String]? = nil) { + self.securityGroupIds = securityGroupIds + self.subnets = subnets + } + + public func validate(name: String) throws { + try self.securityGroupIds?.forEach { + try validate($0, name: "securityGroupIds[]", parent: name, max: 32) + try validate($0, name: "securityGroupIds[]", parent: name, pattern: "^[-0-9a-zA-Z]+$") + } + try self.validate(self.securityGroupIds, name: "securityGroupIds", parent: name, max: 5) + try self.validate(self.securityGroupIds, name: "securityGroupIds", parent: name, min: 1) + try self.subnets?.forEach { + try validate($0, name: "subnets[]", parent: name, max: 32) + try validate($0, name: "subnets[]", parent: name, pattern: "^[-0-9a-zA-Z]+$") + } + try self.validate(self.subnets, name: "subnets", parent: name, max: 16) + try self.validate(self.subnets, name: "subnets", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case securityGroupIds = "SecurityGroupIds" + case subnets = "Subnets" + } + } + public struct OutputConfig: AWSEncodableShape & AWSDecodableShape { /// Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions. DTYPE: Specifies the data type for the input. When compiling for ml_* (except for ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's input. "float32" is used if "DTYPE" is not specified. Options for data type are: float32: Use either "float" or "float32". int64: Use either "int64" or "long". For example, {"dtype" : "float32"}. CPU: Compilation for CPU supports the following compiler options. mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} ARM: Details of ARM CPU compilations. NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors. For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform with the NEON support. NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. gpu_code: Specifies the targeted architecture. trt-ver: Specifies the TensorRT versions in x.y.z. format. cuda-ver: Specifies the CUDA version in x.y format. For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} ANDROID: Compilation for the Android OS supports the following compiler options: ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}. mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit platform with NEON support. INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. For example, "CompilerOptions": "\"--verbose 1 --num-neuroncores 2 -O2\"". For information about supported compiler options, see Neuron Compiler CLI Reference Guide. CoreML: Compilation for the CoreML OutputConfig TargetDevice supports the following compiler options: class_labels: Specifies the classification labels file name inside input tar.gz file. For example, {"class_labels": "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated by newlines. EIA: Compilation for the Elastic Inference Accelerator supports the following compiler options: precision_mode: Specifies the precision of compiled artifacts. Supported values are "FP16" and "FP32". Default is "FP32". signature_def_key: Specifies the signature to use for models in SavedModel format. Defaults is TensorFlow's default signature def key. output_names: Specifies a list of output tensor names for models in FrozenGraph format. Set at most one API field, either: signature_def_key or output_names. For example: {"precision_mode": "FP32", "output_names": ["output:0"]} public let compilerOptions: String? @@ -34807,6 +35507,25 @@ extension SageMaker { } } + public struct StopOptimizationJobRequest: AWSEncodableShape { + /// The name that you assigned to the optimization job. + public let optimizationJobName: String? + + public init(optimizationJobName: String? = nil) { + self.optimizationJobName = optimizationJobName + } + + public func validate(name: String) throws { + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, max: 63) + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, min: 1) + try self.validate(self.optimizationJobName, name: "optimizationJobName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + } + + private enum CodingKeys: String, CodingKey { + case optimizationJobName = "OptimizationJobName" + } + } + public struct StopPipelineExecutionRequest: AWSEncodableShape { /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than once. public let clientRequestToken: String? diff --git a/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift b/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift index caaf967d32..1b2f16fc93 100644 --- a/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift +++ b/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift @@ -160,7 +160,7 @@ public struct SecretsManager: AWSService { } /// Creates a new secret. A secret can be a password, a set of credentials such as a user name and password, an OAuth token, or other secret information that you store in an encrypted form in Secrets Manager. The secret also includes the connection information to access a database or other service, which Secrets Manager doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret. For secrets that use managed rotation, you need to create the secret through the managing service. For more information, see Secrets Manager secrets managed by other Amazon Web Services services. - /// For information about creating a secret in the console, see Create a secret. To create a secret, you can provide the secret value to be encrypted in either the SecretString parameter or the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager creates an initial secret version and automatically attaches the staging label AWSCURRENT to it. For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the JSON you store in the SecretString matches the JSON structure of a database secret. If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result. If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use aws/secretsmanager to encrypt the secret, and you must create and use a customer managed KMS key. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:CreateSecret. If you include tags in the secret, you also need secretsmanager:TagResource. To add replica Regions, you must also have secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key. + /// For information about creating a secret in the console, see Create a secret. To create a secret, you can provide the secret value to be encrypted in either the SecretString parameter or the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager creates an initial secret version and automatically attaches the staging label AWSCURRENT to it. For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the JSON you store in the SecretString matches the JSON structure of a database secret. If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result. If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use aws/secretsmanager to encrypt the secret, and you must create and use a customer managed KMS key. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:CreateSecret. If you include tags in the secret, you also need secretsmanager:TagResource. To add replica Regions, you must also have secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key. When you enter commands in a command shell, there is a risk of the command history being accessed or utilities having access to your command parameters. This is a concern if the command includes the value of a secret. Learn how to Mitigate the risks of using command-line tools to store Secrets Manager secrets. @Sendable public func createSecret(_ input: CreateSecretRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSecretResponse { return try await self.client.execute( @@ -290,7 +290,7 @@ public struct SecretsManager: AWSService { ) } - /// Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions. You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it. If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from. This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. + /// Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions. You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it. If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from. This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. When you enter commands in a command shell, there is a risk of the command history being accessed or utilities having access to your command parameters. This is a concern if the command includes the value of a secret. Learn how to Mitigate the risks of using command-line tools to store Secrets Manager secrets. @Sendable public func putSecretValue(_ input: PutSecretValueRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutSecretValueResponse { return try await self.client.execute( @@ -394,7 +394,7 @@ public struct SecretsManager: AWSService { ) } - /// Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue. To change the rotation configuration of a secret, use RotateSecret instead. To change a secret so that it is managed by another service, you need to recreate the secret in that service. See Secrets Manager secrets managed by other Amazon Web Services services. We recommend you avoid calling UpdateSecret at a sustained rate of more than once every 10 minutes. When you call UpdateSecret to update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you update the secret value more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions. If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically moves the staging label AWSCURRENT to the new version. Then it attaches the label AWSPREVIOUS to the version that AWSCURRENT was removed from. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, the operation results in an error. You can't modify an existing version, you can only create a new version. To remove a version, remove all staging labels from it. See UpdateSecretVersionStage. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:UpdateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and kms:Decrypt permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more information, see Secret encryption and decryption. + /// Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue. To change the rotation configuration of a secret, use RotateSecret instead. To change a secret so that it is managed by another service, you need to recreate the secret in that service. See Secrets Manager secrets managed by other Amazon Web Services services. We recommend you avoid calling UpdateSecret at a sustained rate of more than once every 10 minutes. When you call UpdateSecret to update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you update the secret value more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions. If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically moves the staging label AWSCURRENT to the new version. Then it attaches the label AWSPREVIOUS to the version that AWSCURRENT was removed from. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, the operation results in an error. You can't modify an existing version, you can only create a new version. To remove a version, remove all staging labels from it. See UpdateSecretVersionStage. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:UpdateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and kms:Decrypt permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-encrypt existing secret versions with the new key. For more information, see Secret encryption and decryption. When you enter commands in a command shell, there is a risk of the command history being accessed or utilities having access to your command parameters. This is a concern if the command includes the value of a secret. Learn how to Mitigate the risks of using command-line tools to store Secrets Manager secrets. @Sendable public func updateSecret(_ input: UpdateSecretRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateSecretResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/SecretsManager/SecretsManager_shapes.swift b/Sources/Soto/Services/SecretsManager/SecretsManager_shapes.swift index 1aa2aad379..5799580b2d 100644 --- a/Sources/Soto/Services/SecretsManager/SecretsManager_shapes.swift +++ b/Sources/Soto/Services/SecretsManager/SecretsManager_shapes.swift @@ -1401,7 +1401,7 @@ extension SecretsManager { public let clientRequestToken: String? /// The description of the secret. public let description: String? - /// The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more information about versions and staging labels, see Concepts: Version. A key alias is always prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About aliases. If you set this to an empty string, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result. You can only use the Amazon Web Services managed key aws/secretsmanager if you call this operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in a different account, then you must use a customer managed key and provide the ARN of that KMS key in this field. The user making the call must have permissions to both the secret and the KMS key in their respective accounts. + /// The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-encrypt existing secret versions with the new key. For more information about versions and staging labels, see Concepts: Version. A key alias is always prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About aliases. If you set this to an empty string, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result. You can only use the Amazon Web Services managed key aws/secretsmanager if you call this operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in a different account, then you must use a customer managed key and provide the ARN of that KMS key in this field. The user making the call must have permissions to both the secret and the KMS key in their respective accounts. public let kmsKeyId: String? /// The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretBinary or SecretString must have a value, but not both. You can't access this parameter in the Secrets Manager console. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field. public let secretBinary: AWSBase64Data? diff --git a/Sources/Soto/Services/TaxSettings/TaxSettings_api.swift b/Sources/Soto/Services/TaxSettings/TaxSettings_api.swift index 1e35f7ab4c..3b5e895b98 100644 --- a/Sources/Soto/Services/TaxSettings/TaxSettings_api.swift +++ b/Sources/Soto/Services/TaxSettings/TaxSettings_api.swift @@ -33,7 +33,6 @@ public struct TaxSettings: AWSService { /// Initialize the TaxSettings client /// - parameters: /// - client: AWSClient used to process requests - /// - region: Region of server you want to communicate with. This will override the partition parameter. /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). /// - endpoint: Custom endpoint URL to use instead of standard AWS servers /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded @@ -42,7 +41,6 @@ public struct TaxSettings: AWSService { /// - options: Service options public init( client: AWSClient, - region: SotoCore.Region? = nil, partition: AWSPartition = .aws, endpoint: String? = nil, middleware: AWSMiddlewareProtocol? = nil, @@ -52,13 +50,15 @@ public struct TaxSettings: AWSService { ) { self.client = client self.config = AWSServiceConfig( - region: region, - partition: region?.partition ?? partition, + region: nil, + partition: partition, serviceName: "TaxSettings", serviceIdentifier: "tax", serviceProtocol: .restjson, apiVersion: "2018-05-10", endpoint: endpoint, + serviceEndpoints: Self.serviceEndpoints, + partitionEndpoints: Self.partitionEndpoints, errorType: TaxSettingsErrorType.self, middleware: middleware, timeout: timeout, @@ -68,7 +68,15 @@ public struct TaxSettings: AWSService { } + /// custom endpoints for regions + static var serviceEndpoints: [String: String] {[ + "aws-global": "tax.us-east-1.amazonaws.com" + ]} + /// Default endpoint and region to use for each partition + static var partitionEndpoints: [AWSPartition: (endpoint: String, region: SotoCore.Region)] {[ + .aws: (endpoint: "aws-global", region: .useast1) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/Textract/Textract_api.swift b/Sources/Soto/Services/Textract/Textract_api.swift index 1054f20207..76f257b153 100644 --- a/Sources/Soto/Services/Textract/Textract_api.swift +++ b/Sources/Soto/Services/Textract/Textract_api.swift @@ -74,6 +74,32 @@ public struct Textract: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "ap-northeast-2": "textract.ap-northeast-2.api.aws", + "ap-south-1": "textract.ap-south-1.api.aws", + "ap-southeast-1": "textract.ap-southeast-1.api.aws", + "ap-southeast-2": "textract.ap-southeast-2.api.aws", + "ca-central-1": "textract.ca-central-1.api.aws", + "eu-central-1": "textract.eu-central-1.api.aws", + "eu-west-1": "textract.eu-west-1.api.aws", + "eu-west-2": "textract.eu-west-2.api.aws", + "eu-west-3": "textract.eu-west-3.api.aws", + "us-east-1": "textract.us-east-1.api.aws", + "us-east-2": "textract.us-east-2.api.aws", + "us-gov-east-1": "textract.us-gov-east-1.api.aws", + "us-gov-west-1": "textract.us-gov-west-1.api.aws", + "us-west-1": "textract.us-west-1.api.aws", + "us-west-2": "textract.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "textract-fips.ca-central-1.api.aws", + "us-east-1": "textract-fips.us-east-1.api.aws", + "us-east-2": "textract-fips.us-east-2.api.aws", + "us-gov-east-1": "textract-fips.us-gov-east-1.api.aws", + "us-gov-west-1": "textract-fips.us-gov-west-1.api.aws", + "us-west-1": "textract-fips.us-west-1.api.aws", + "us-west-2": "textract-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ "ca-central-1": "textract-fips.ca-central-1.amazonaws.com", "us-east-1": "textract-fips.us-east-1.amazonaws.com", diff --git a/Sources/Soto/Services/TimestreamQuery/TimestreamQuery_shapes.swift b/Sources/Soto/Services/TimestreamQuery/TimestreamQuery_shapes.swift index 864b63718d..00c2101c27 100644 --- a/Sources/Soto/Services/TimestreamQuery/TimestreamQuery_shapes.swift +++ b/Sources/Soto/Services/TimestreamQuery/TimestreamQuery_shapes.swift @@ -1279,7 +1279,7 @@ extension TimestreamQuery { public struct UpdateAccountSettingsRequest: AWSEncodableShape { /// The maximum number of compute units the service will use at any point in time to serve your queries. To run queries, you must set a minimum capacity of 4 TCU. You can set the maximum number of TCU in multiples of 4, for example, 4, 8, 16, 32, and so on. The maximum value supported for MaxQueryTCU is 1000. To request an increase to this soft limit, contact Amazon Web Services Support. For information about the default quota for maxQueryTCU, see Default quotas. public let maxQueryTCU: Int? - /// The pricing model for queries in an account. + /// The pricing model for queries in an account. The QueryPricingModel parameter is used by several Timestream operations; however, the UpdateAccountSettings API operation doesn't recognize any values other than COMPUTE_UNITS. public let queryPricingModel: QueryPricingModel? public init(maxQueryTCU: Int? = nil, queryPricingModel: QueryPricingModel? = nil) { diff --git a/Sources/Soto/Services/Translate/Translate_api.swift b/Sources/Soto/Services/Translate/Translate_api.swift index 776aa08ae7..6bbb3f8f9a 100644 --- a/Sources/Soto/Services/Translate/Translate_api.swift +++ b/Sources/Soto/Services/Translate/Translate_api.swift @@ -78,6 +78,7 @@ public struct Translate: AWSService { "us-east-1": "translate-fips.us-east-1.amazonaws.com", "us-east-2": "translate-fips.us-east-2.amazonaws.com", "us-gov-west-1": "translate-fips.us-gov-west-1.amazonaws.com", + "us-west-1": "translate-fips.us-west-1.amazonaws.com", "us-west-2": "translate-fips.us-west-2.amazonaws.com" ]) ]} diff --git a/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift b/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift index 8524db2acd..069c7c21b2 100644 --- a/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift +++ b/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift @@ -479,7 +479,7 @@ extension WorkSpacesThinClient { public let desktopEndpoint: String? /// The type of streaming desktop for the environment. public let desktopType: DesktopType? - /// "The tag keys and optional values for the newly created devices for this environment." + /// The tag keys and optional values for the newly created devices for this environment. public let deviceCreationTags: [String: String]? /// The ID of the environment. public let id: String? diff --git a/models/acm-pca.json b/models/acm-pca.json index eef153c64d..4c2a880b7f 100644 --- a/models/acm-pca.json +++ b/models/acm-pca.json @@ -1,37 +1,8 @@ { "smithy": "2.0", - "metadata": { - "suppressions": [ - { - "id": "HttpMethodSemantics", - "namespace": "*" - }, - { - "id": "HttpResponseCodeSemantics", - "namespace": "*" - }, - { - "id": "PaginatedTrait", - "namespace": "*" - }, - { - "id": "HttpHeaderTrait", - "namespace": "*" - }, - { - "id": "HttpUriConflict", - "namespace": "*" - }, - { - "id": "Service", - "namespace": "*" - } - ] - }, "shapes": { "com.amazonaws.acmpca#ACMPrivateCA": { "type": "service", - "version": "2017-08-22", "operations": [ { "target": "com.amazonaws.acmpca#CreateCertificateAuthority" @@ -109,6 +80,7 @@ "arnNamespace": "acm-pca", "cloudFormationName": "ACMPCA", "cloudTrailEventSource": "acm-pca.amazonaws.com", + "docId": "acm-pca-2017-08-22", "endpointPrefix": "acm-pca" }, "aws.auth#sigv4": { @@ -1223,7 +1195,7 @@ "CustomAttributes": { "target": "com.amazonaws.acmpca#CustomAttributeList", "traits": { - "smithy.api#documentation": "

\n

Contains a sequence of one or more X.500 relative distinguished names (RDNs), each of\n\t\t\twhich consists of an object identifier (OID) and a value. For more information, see\n\t\t\tNIST’s definition of Object Identifier (OID).

\n \n

Custom attributes cannot be used in combination with standard attributes.

\n
" + "smithy.api#documentation": "

\n

Contains a sequence of one or more X.500 relative distinguished names (RDNs), each of\n\t\t\twhich consists of an object identifier (OID) and a value. For more information, see\n\t\t\tNIST’s definition of Object Identifier (OID).

\n \n

Custom attributes cannot be used in combination with standard attributes.

\n
" } } }, @@ -2099,7 +2071,7 @@ "Value": { "target": "com.amazonaws.acmpca#String1To256", "traits": { - "smithy.api#documentation": "

\n

Specifies the attribute value of relative distinguished name (RDN).

", + "smithy.api#documentation": "

\n

Specifies the attribute value of relative distinguished name (RDN).

", "smithy.api#required": {} } } @@ -2126,14 +2098,14 @@ "ObjectIdentifier": { "target": "com.amazonaws.acmpca#CustomObjectIdentifier", "traits": { - "smithy.api#documentation": "

\n

Specifies the object identifier (OID) of the X.509 extension. For more information,\n\t\t\tsee the Global OID reference database.\n

", + "smithy.api#documentation": "

\n

Specifies the object identifier (OID) of the X.509 extension. For more information,\n\t\t\tsee the Global OID reference database.\n

", "smithy.api#required": {} } }, "Value": { "target": "com.amazonaws.acmpca#Base64String1To4096", "traits": { - "smithy.api#documentation": "

\n

Specifies the base64-encoded value of the X.509 extension.

", + "smithy.api#documentation": "

\n

Specifies the base64-encoded value of the X.509 extension.

", "smithy.api#required": {} } }, @@ -2141,12 +2113,12 @@ "target": "com.amazonaws.acmpca#Boolean", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

\n

Specifies the critical flag of the X.509 extension.

" + "smithy.api#documentation": "

\n

Specifies the critical flag of the X.509 extension.

" } } }, "traits": { - "smithy.api#documentation": "

\n

Specifies the X.509 extension information for a certificate.

\n

Extensions present in CustomExtensions follow the\n\t\t\t\tApiPassthrough\n template\n\t\t\t\trules.

" + "smithy.api#documentation": "

\n

Specifies the X.509 extension information for a certificate.

\n

Extensions present in CustomExtensions follow the\n\t\t\t\tApiPassthrough\n template\n\t\t\t\trules.

" } }, "com.amazonaws.acmpca#CustomExtensionList": { @@ -2394,7 +2366,8 @@ } } ], - "minDelay": 3 + "minDelay": 3, + "maxDelay": 180 } } } @@ -2621,7 +2594,7 @@ "CustomExtensions": { "target": "com.amazonaws.acmpca#CustomExtensionList", "traits": { - "smithy.api#documentation": "

\n

Contains a sequence of one or more X.509 extensions, each of which consists of an\n\t\t\tobject identifier (OID), a base64-encoded value, and the critical flag. For more\n\t\t\tinformation, see the Global OID reference\n\t\t\t\tdatabase.\n

" + "smithy.api#documentation": "

\n

Contains a sequence of one or more X.509 extensions, each of which consists of an\n\t\t\tobject identifier (OID), a base64-encoded value, and the critical flag. For more\n\t\t\tinformation, see the Global OID reference\n\t\t\t\tdatabase.\n

" } } }, @@ -2770,7 +2743,8 @@ } } ], - "minDelay": 1 + "minDelay": 1, + "maxDelay": 60 } } } @@ -2886,7 +2860,8 @@ } } ], - "minDelay": 3 + "minDelay": 3, + "maxDelay": 180 } } } @@ -3065,7 +3040,7 @@ } ], "traits": { - "smithy.api#documentation": "

Imports a signed private CA certificate into Amazon Web Services Private CA. This action is used when you\n\t\t\tare using a chain of trust whose root is located outside Amazon Web Services Private CA. Before you can call\n\t\t\tthis action, the following preparations must in place:

\n
    \n
  1. \n

    In Amazon Web Services Private CA, call the CreateCertificateAuthority action to create the private CA that you\n\t\t\t\t\tplan to back with the imported certificate.

    \n
  2. \n
  3. \n

    Call the GetCertificateAuthorityCsr action to generate a certificate signing\n\t\t\t\t\trequest (CSR).

    \n
  4. \n
  5. \n

    Sign the CSR using a root or intermediate CA hosted by either an on-premises\n\t\t\t\t\tPKI hierarchy or by a commercial CA.

    \n
  6. \n
  7. \n

    Create a certificate chain and copy the signed certificate and the certificate\n\t\t\t\t\tchain to your working directory.

    \n
  8. \n
\n

Amazon Web Services Private CA supports three scenarios for installing a CA certificate:

\n
    \n
  • \n

    Installing a certificate for a root CA hosted by Amazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is hosted by\n\t\t\t\t\tAmazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is externally\n\t\t\t\t\thosted.

    \n
  • \n
\n

The following additional requirements apply when you import a CA certificate.

\n
    \n
  • \n

    Only a self-signed certificate can be imported as a root CA.

    \n
  • \n
  • \n

    A self-signed certificate cannot be imported as a subordinate CA.

    \n
  • \n
  • \n

    Your certificate chain must not include the private CA certificate that you\n\t\t\t\t\tare importing.

    \n
  • \n
  • \n

    Your root CA must be the last certificate in your chain. The subordinate\n\t\t\t\t\tcertificate, if any, that your root CA signed must be next to last. The\n\t\t\t\t\tsubordinate certificate signed by the preceding subordinate CA must come next,\n\t\t\t\t\tand so on until your chain is built.

    \n
  • \n
  • \n

    The chain must be PEM-encoded.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate is 32 KB.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate chain is 2 MB.

    \n
  • \n
\n

\n Enforcement of Critical Constraints\n

\n

Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA\n\t\t\tcertificate or chain.

\n
    \n
  • \n

    Authority key identifier

    \n
  • \n
  • \n

    Basic constraints (must be marked critical)

    \n
  • \n
  • \n

    Certificate policies

    \n
  • \n
  • \n

    Extended key usage

    \n
  • \n
  • \n

    Inhibit anyPolicy

    \n
  • \n
  • \n

    Issuer alternative name

    \n
  • \n
  • \n

    Key usage

    \n
  • \n
  • \n

    Name constraints

    \n
  • \n
  • \n

    Policy mappings

    \n
  • \n
  • \n

    Subject alternative name

    \n
  • \n
  • \n

    Subject directory attributes

    \n
  • \n
  • \n

    Subject key identifier

    \n
  • \n
  • \n

    Subject information access

    \n
  • \n
\n

Amazon Web Services Private CA rejects the following extensions when they are marked critical in an\n\t\t\timported CA certificate or chain.

\n
    \n
  • \n

    Authority information access

    \n
  • \n
  • \n

    CRL distribution points

    \n
  • \n
  • \n

    Freshest CRL

    \n
  • \n
  • \n

    Policy constraints

    \n
  • \n
\n

Amazon Web Services Private Certificate Authority will also reject any other extension marked as critical not contained on the preceding list of allowed extensions.

" + "smithy.api#documentation": "

Imports a signed private CA certificate into Amazon Web Services Private CA. This action is used when you\n\t\t\tare using a chain of trust whose root is located outside Amazon Web Services Private CA. Before you can call\n\t\t\tthis action, the following preparations must in place:

\n
    \n
  1. \n

    In Amazon Web Services Private CA, call the CreateCertificateAuthority action to create the private CA that you\n\t\t\t\t\tplan to back with the imported certificate.

    \n
  2. \n
  3. \n

    Call the GetCertificateAuthorityCsr action to generate a certificate signing\n\t\t\t\t\trequest (CSR).

    \n
  4. \n
  5. \n

    Sign the CSR using a root or intermediate CA hosted by either an on-premises\n\t\t\t\t\tPKI hierarchy or by a commercial CA.

    \n
  6. \n
  7. \n

    Create a certificate chain and copy the signed certificate and the certificate\n\t\t\t\t\tchain to your working directory.

    \n
  8. \n
\n

Amazon Web Services Private CA supports three scenarios for installing a CA certificate:

\n
    \n
  • \n

    Installing a certificate for a root CA hosted by Amazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is hosted by\n\t\t\t\t\tAmazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is externally\n\t\t\t\t\thosted.

    \n
  • \n
\n

The following additional requirements apply when you import a CA certificate.

\n
    \n
  • \n

    Only a self-signed certificate can be imported as a root CA.

    \n
  • \n
  • \n

    A self-signed certificate cannot be imported as a subordinate CA.

    \n
  • \n
  • \n

    Your certificate chain must not include the private CA certificate that you\n\t\t\t\t\tare importing.

    \n
  • \n
  • \n

    Your root CA must be the last certificate in your chain. The subordinate\n\t\t\t\t\tcertificate, if any, that your root CA signed must be next to last. The\n\t\t\t\t\tsubordinate certificate signed by the preceding subordinate CA must come next,\n\t\t\t\t\tand so on until your chain is built.

    \n
  • \n
  • \n

    The chain must be PEM-encoded.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate is 32 KB.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate chain is 2 MB.

    \n
  • \n
\n

\n Enforcement of Critical Constraints\n

\n

Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA\n\t\t\tcertificate or chain.

\n
    \n
  • \n

    Basic constraints (must be marked critical)

    \n
  • \n
  • \n

    Subject alternative names

    \n
  • \n
  • \n

    Key usage

    \n
  • \n
  • \n

    Extended key usage

    \n
  • \n
  • \n

    Authority key identifier

    \n
  • \n
  • \n

    Subject key identifier

    \n
  • \n
  • \n

    Issuer alternative name

    \n
  • \n
  • \n

    Subject directory attributes

    \n
  • \n
  • \n

    Subject information access

    \n
  • \n
  • \n

    Certificate policies

    \n
  • \n
  • \n

    Policy mappings

    \n
  • \n
  • \n

    Inhibit anyPolicy

    \n
  • \n
\n

Amazon Web Services Private CA rejects the following extensions when they are marked critical in an\n\t\t\timported CA certificate or chain.

\n
    \n
  • \n

    Name constraints

    \n
  • \n
  • \n

    Policy constraints

    \n
  • \n
  • \n

    CRL distribution points

    \n
  • \n
  • \n

    Authority information access

    \n
  • \n
  • \n

    Freshest CRL

    \n
  • \n
  • \n

    Any other extension

    \n
  • \n
" } }, "com.amazonaws.acmpca#ImportCertificateAuthorityCertificateRequest": { @@ -3463,18 +3438,18 @@ "com.amazonaws.acmpca#ListCertificateAuthoritiesRequest": { "type": "structure", "members": { - "NextToken": { - "target": "com.amazonaws.acmpca#NextToken", - "traits": { - "smithy.api#documentation": "

Use this parameter when paginating results in a subsequent request after you receive a\n\t\t\tresponse with truncated results. Set it to the value of the NextToken\n\t\t\tparameter from the response you just received.

" - } - }, "MaxResults": { "target": "com.amazonaws.acmpca#MaxResults", "traits": { "smithy.api#documentation": "

Use this parameter when paginating results to specify the maximum number of items to\n\t\t\treturn in the response on each page. If additional items exist beyond the number you\n\t\t\tspecify, the NextToken element is sent in the response. Use this\n\t\t\t\tNextToken value in a subsequent request to retrieve additional\n\t\t\titems.

\n

Although the maximum value is 1000, the action only returns a maximum of 100 items.

" } }, + "NextToken": { + "target": "com.amazonaws.acmpca#NextToken", + "traits": { + "smithy.api#documentation": "

Use this parameter when paginating results in a subsequent request after you receive a\n\t\t\tresponse with truncated results. Set it to the value of the NextToken\n\t\t\tparameter from the response you just received.

" + } + }, "ResourceOwner": { "target": "com.amazonaws.acmpca#ResourceOwner", "traits": { @@ -3489,17 +3464,17 @@ "com.amazonaws.acmpca#ListCertificateAuthoritiesResponse": { "type": "structure", "members": { - "CertificateAuthorities": { - "target": "com.amazonaws.acmpca#CertificateAuthorities", - "traits": { - "smithy.api#documentation": "

Summary information about each certificate authority you have created.

" - } - }, "NextToken": { "target": "com.amazonaws.acmpca#NextToken", "traits": { "smithy.api#documentation": "

When the list is truncated, this value is present and should be used for the\n\t\t\t\tNextToken parameter in a subsequent pagination request.

" } + }, + "CertificateAuthorities": { + "target": "com.amazonaws.acmpca#CertificateAuthorities", + "traits": { + "smithy.api#documentation": "

Summary information about each certificate authority you have created.

" + } } }, "traits": { @@ -3544,11 +3519,10 @@ "com.amazonaws.acmpca#ListPermissionsRequest": { "type": "structure", "members": { - "CertificateAuthorityArn": { - "target": "com.amazonaws.acmpca#Arn", + "MaxResults": { + "target": "com.amazonaws.acmpca#MaxResults", "traits": { - "smithy.api#documentation": "

The Amazon Resource Number (ARN) of the private CA to inspect. You can find the ARN by\n\t\t\tcalling the ListCertificateAuthorities action. This must be of the form:\n\t\t\t\tarn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012\n\t\t\tYou can get a private CA's ARN by running the ListCertificateAuthorities action.

", - "smithy.api#required": {} + "smithy.api#documentation": "

When paginating results, use this parameter to specify the maximum number of items to\n\t\t\treturn in the response. If additional items exist beyond the number you specify, the\n\t\t\t\tNextToken element is sent in the response. Use this\n\t\t\t\tNextToken value in a subsequent request to retrieve\n\t\t\tadditional items.

" } }, "NextToken": { @@ -3557,10 +3531,11 @@ "smithy.api#documentation": "

When paginating results, use this parameter in a subsequent request after you receive\n\t\t\ta response with truncated results. Set it to the value of NextToken from the response you just received.

" } }, - "MaxResults": { - "target": "com.amazonaws.acmpca#MaxResults", + "CertificateAuthorityArn": { + "target": "com.amazonaws.acmpca#Arn", "traits": { - "smithy.api#documentation": "

When paginating results, use this parameter to specify the maximum number of items to\n\t\t\treturn in the response. If additional items exist beyond the number you specify, the\n\t\t\t\tNextToken element is sent in the response. Use this\n\t\t\t\tNextToken value in a subsequent request to retrieve\n\t\t\tadditional items.

" + "smithy.api#documentation": "

The Amazon Resource Number (ARN) of the private CA to inspect. You can find the ARN by\n\t\t\tcalling the ListCertificateAuthorities action. This must be of the form:\n\t\t\t\tarn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012\n\t\t\tYou can get a private CA's ARN by running the ListCertificateAuthorities action.

", + "smithy.api#required": {} } } }, @@ -3571,17 +3546,17 @@ "com.amazonaws.acmpca#ListPermissionsResponse": { "type": "structure", "members": { - "Permissions": { - "target": "com.amazonaws.acmpca#PermissionList", - "traits": { - "smithy.api#documentation": "

Summary information about each permission assigned by the specified private CA,\n\t\t\tincluding the action enabled, the policy provided, and the time of creation.

" - } - }, "NextToken": { "target": "com.amazonaws.acmpca#NextToken", "traits": { "smithy.api#documentation": "

When the list is truncated, this value is present and should be used for the NextToken parameter in a subsequent pagination request.\n\t\t

" } + }, + "Permissions": { + "target": "com.amazonaws.acmpca#PermissionList", + "traits": { + "smithy.api#documentation": "

Summary information about each permission assigned by the specified private CA,\n\t\t\tincluding the action enabled, the policy provided, and the time of creation.

" + } } }, "traits": { @@ -3620,11 +3595,10 @@ "com.amazonaws.acmpca#ListTagsRequest": { "type": "structure", "members": { - "CertificateAuthorityArn": { - "target": "com.amazonaws.acmpca#Arn", + "MaxResults": { + "target": "com.amazonaws.acmpca#MaxResults", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form:

\n

\n arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012\n \n

", - "smithy.api#required": {} + "smithy.api#documentation": "

Use this parameter when paginating results to specify the maximum number of items to\n\t\t\treturn in the response. If additional items exist beyond the number you specify, the\n\t\t\t\tNextToken element is sent in the response. Use this\n\t\t\t\tNextToken value in a subsequent request to retrieve\n\t\t\tadditional items.

" } }, "NextToken": { @@ -3633,10 +3607,11 @@ "smithy.api#documentation": "

Use this parameter when paginating results in a subsequent request after you receive a\n\t\t\tresponse with truncated results. Set it to the value of NextToken from the response you just received.

" } }, - "MaxResults": { - "target": "com.amazonaws.acmpca#MaxResults", + "CertificateAuthorityArn": { + "target": "com.amazonaws.acmpca#Arn", "traits": { - "smithy.api#documentation": "

Use this parameter when paginating results to specify the maximum number of items to\n\t\t\treturn in the response. If additional items exist beyond the number you specify, the\n\t\t\t\tNextToken element is sent in the response. Use this\n\t\t\t\tNextToken value in a subsequent request to retrieve\n\t\t\tadditional items.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form:

\n

\n arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012\n \n

", + "smithy.api#required": {} } } }, @@ -3647,17 +3622,17 @@ "com.amazonaws.acmpca#ListTagsResponse": { "type": "structure", "members": { - "Tags": { - "target": "com.amazonaws.acmpca#TagList", - "traits": { - "smithy.api#documentation": "

The tags associated with your private CA.

" - } - }, "NextToken": { "target": "com.amazonaws.acmpca#NextToken", "traits": { "smithy.api#documentation": "

When the list is truncated, this value is present and should be used for the NextToken parameter in a subsequent pagination request.\n\t\t

" } + }, + "Tags": { + "target": "com.amazonaws.acmpca#TagList", + "traits": { + "smithy.api#documentation": "

The tags associated with your private CA.

" + } } }, "traits": { @@ -3963,7 +3938,7 @@ "ResourceArn": { "target": "com.amazonaws.acmpca#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Number (ARN) of the private CA to associate with the policy. The\n\t\t\tARN of the CA can be found by calling the ListCertificateAuthorities action.

\n

", + "smithy.api#documentation": "

The Amazon Resource Number (ARN) of the private CA to associate with the policy. The\n\t\t\tARN of the CA can be found by calling the ListCertificateAuthorities action.

\n

", "smithy.api#required": {} } }, @@ -4709,4 +4684,4 @@ } } } -} +} \ No newline at end of file diff --git a/models/arc-zonal-shift.json b/models/arc-zonal-shift.json index e09f3dd19a..b592b485f3 100644 --- a/models/arc-zonal-shift.json +++ b/models/arc-zonal-shift.json @@ -94,14 +94,14 @@ "appliedStatus": { "target": "com.amazonaws.arczonalshift#AutoshiftAppliedStatus", "traits": { - "smithy.api#documentation": "

The appliedStatus field specifies which application traffic shift is in effect for a\n\t\t\tresource when there is more than one traffic shift active. There can be more than one application traffic \n\t\t\tshift in progress at the same time - that is, practice run zonal shifts, customer-started zonal shifts, \n\t\t\tor an autoshift. The appliedStatus field for an autoshift for a resource can have one of two \n\t\t\tvalues: APPLIED or NOT_APPLIED. The zonal shift or autoshift \n\t\t\tthat is currently in effect for the resource has an applied status set to APPLIED.

\n

The overall principle for precedence is that zonal shifts that you start as a customer take precedence \n\t\t\tautoshifts, which take precedence over practice runs. That is, customer-started zonal shifts > autoshifts > practice run \n\t\t\tzonal shifts.

\n

For more information, see \n\t\t\tHow zonal autoshift \n\t\t\t\tand practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

", + "smithy.api#documentation": "

The appliedStatus field specifies which application traffic shift is in effect for a\n\t\t\tresource when there is more than one active traffic shift. There can be more than one application traffic \n\t\t\tshift in progress at the same time - that is, practice run zonal shifts, customer-initiated zonal shifts, \n\t\t\tor an autoshift. The appliedStatus field for a shift that is in progress for a resource can \n\t\t\thave one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift \n\t\t\tthat is currently in effect for the resource has an appliedStatus set to APPLIED.

\n

The overall principle for precedence is that zonal shifts that you start as a customer take precedence \n\t\t\tautoshifts, which take precedence over practice runs. That is, customer-initiated zonal shifts > autoshifts > practice run \n\t\t\tzonal shifts.

\n

For more information, see \n\t\t\tHow zonal autoshift \n\t\t\t\tand practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

", "smithy.api#required": {} } }, "awayFrom": { "target": "com.amazonaws.arczonalshift#AvailabilityZone", "traits": { - "smithy.api#documentation": "

The Availability Zone that traffic is shifted away from for a resource, when Amazon Web Services starts an autoshift. \n\t\t\tUntil the autoshift ends, traffic for the resource is instead directed to other Availability Zones in the Amazon Web Services Region.\n\t\t\tAn autoshift can end for a resource, for example, when Amazon Web Services ends the autoshift for the Availability Zone or when \n\t\t\tyou disable zonal autoshift for the resource.

", + "smithy.api#documentation": "

The Availability Zone (for example, use1-az1) that traffic is shifted away from for a resource, when Amazon Web Services starts an autoshift. \n\t\t\tUntil the autoshift ends, traffic for the resource is instead directed to other Availability Zones in the Amazon Web Services Region.\n\t\t\tAn autoshift can end for a resource, for example, when Amazon Web Services ends the autoshift for the Availability Zone or when \n\t\t\tyou disable zonal autoshift for the resource.

", "smithy.api#required": {} } }, @@ -117,6 +117,34 @@ "smithy.api#documentation": "

A complex structure that lists an autoshift that is currently active for a managed resource and information about\n\t\t\tthe autoshift.

\n

For more information, see How zonal autoshift \n\t\t\tand practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

" } }, + "com.amazonaws.arczonalshift#AutoshiftObserverNotification": { + "type": "resource", + "operations": [ + { + "target": "com.amazonaws.arczonalshift#GetAutoshiftObserverNotificationStatus" + }, + { + "target": "com.amazonaws.arczonalshift#UpdateAutoshiftObserverNotificationStatus" + } + ] + }, + "com.amazonaws.arczonalshift#AutoshiftObserverNotificationStatus": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.arczonalshift#AutoshiftSummaries": { "type": "list", "member": { @@ -129,7 +157,7 @@ "awayFrom": { "target": "com.amazonaws.arczonalshift#AvailabilityZone", "traits": { - "smithy.api#documentation": "

The Availability Zone that traffic is shifted away from for a resource when Amazon Web Services starts an autoshift. \n\t\t\tUntil the autoshift ends, traffic for the resource is instead directed to other Availability Zones in the Amazon Web Services Region.\n\t\t\tAn autoshift can end for a resource, for example, when Amazon Web Services ends the autoshift for the Availability Zone or when \n\t\t\tyou disable zonal autoshift for the resource.

", + "smithy.api#documentation": "

The Availability Zone (for example, use1-az1) that traffic is shifted away from for a resource when Amazon Web Services starts an autoshift. \n\t\t\tUntil the autoshift ends, traffic for the resource is instead directed to other Availability Zones in the Amazon Web Services Region.\n\t\t\tAn autoshift can end for a resource, for example, when Amazon Web Services ends the autoshift for the Availability Zone or when \n\t\t\tyou disable zonal autoshift for the resource.

", "smithy.api#required": {} } }, @@ -159,6 +187,9 @@ "smithy.api#documentation": "

Information about an autoshift. Amazon Web Services starts an autoshift to temporarily move traffic for a resource \n\t\t\taway from an Availability Zone in an Amazon Web Services Region\n\t\t\twhen Amazon Web Services determines that there's an issue in the Availability Zone that could potentially affect customers.\n\t\t\tYou can configure zonal autoshift in Route 53 ARC for managed resources in your Amazon Web Services account in a Region. \n\t\t\tSupported Amazon Web Services resources are automatically registered with Route 53 ARC.

\n

Autoshifts are temporary. When the Availability Zone recovers, Amazon Web Services ends the autoshift, and \n\t\t\ttraffic for the resource is no longer directed to the other Availability Zones in the Region.

\n

You can stop an autoshift for a resource by disabling zonal autoshift.

" } }, + "com.amazonaws.arczonalshift#AutoshiftTriggerResource": { + "type": "resource" + }, "com.amazonaws.arczonalshift#AutoshiftsInResource": { "type": "list", "member": { @@ -311,34 +342,44 @@ } }, "com.amazonaws.arczonalshift#ConflictExceptionReason": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "ZonalShiftAlreadyExists", - "name": "ZONAL_SHIFT_ALREADY_EXISTS" - }, - { - "value": "ZonalShiftStatusNotActive", - "name": "ZONAL_SHIFT_STATUS_NOT_ACTIVE" - }, - { - "value": "SimultaneousZonalShiftsConflict", - "name": "SIMULTANEOUS_ZONAL_SHIFTS_CONFLICT" - }, - { - "value": "PracticeConfigurationAlreadyExists", - "name": "PRACTICE_CONFIGURATION_ALREADY_EXISTS" - }, - { - "value": "AutoShiftEnabled", - "name": "AUTOSHIFT_ENABLED" - }, - { - "value": "PracticeConfigurationDoesNotExist", - "name": "PRACTICE_CONFIGURATION_DOES_NOT_EXIST" + "type": "enum", + "members": { + "ZONAL_SHIFT_ALREADY_EXISTS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ZonalShiftAlreadyExists" } - ] + }, + "ZONAL_SHIFT_STATUS_NOT_ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ZonalShiftStatusNotActive" + } + }, + "SIMULTANEOUS_ZONAL_SHIFTS_CONFLICT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SimultaneousZonalShiftsConflict" + } + }, + "PRACTICE_CONFIGURATION_ALREADY_EXISTS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PracticeConfigurationAlreadyExists" + } + }, + "AUTOSHIFT_ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AutoShiftEnabled" + } + }, + "PRACTICE_CONFIGURATION_DOES_NOT_EXIST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PracticeConfigurationDoesNotExist" + } + } } }, "com.amazonaws.arczonalshift#ControlCondition": { @@ -347,20 +388,20 @@ "type": { "target": "com.amazonaws.arczonalshift#ControlConditionType", "traits": { - "smithy.api#documentation": "

The type of alarm specified for a practice run. The only valid value is CLOUDWATCH.

", + "smithy.api#documentation": "

The type of alarm specified for a practice run. You can only specify Amazon CloudWatch alarms for practice runs, so the \n\t\t\tonly valid value is CLOUDWATCH.

", "smithy.api#required": {} } }, "alarmIdentifier": { "target": "com.amazonaws.arczonalshift#ResourceArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the Amazon CloudWatch alarm that you specify as a control condition for a practice run.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for an Amazon CloudWatch alarm that you specify as a control condition for a practice run.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

A control condition is an alarm that you specify for a practice run. When you configure practice runs\n\t\t\twith zonal autoshift for a resource, you specify Amazon CloudWatch alarms, which you create in CloudWatch \n\t\t\tto use with the practice run. The alarms that you specify are an \n\t\t\toutcome alarm, to monitor application health during practice runs and, \n\t\t\toptionally, a blocking alarm, to block practice runs from starting.

\n

Control condition alarms do not apply for autoshifts.

\n

For more information, see \n\t\t\t\n\t\t\t\tConsiderations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

" + "smithy.api#documentation": "

A control condition is an alarm that you specify for a practice run. When you configure practice runs\n\t\t\twith zonal autoshift for a resource, you specify Amazon CloudWatch alarms, which you create in CloudWatch \n\t\t\tto use with the practice run. The alarms that you specify are an \n\t\t\toutcome alarm, to monitor application health during practice runs and, \n\t\t\toptionally, a blocking alarm, to block practice runs from starting or to interrupt\n\t\t\ta practice run in progress.

\n

Control condition alarms do not apply for autoshifts.

\n

For more information, see \n\t\t\t\n\t\t\t\tConsiderations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

" } }, "com.amazonaws.arczonalshift#ControlConditionType": { @@ -415,7 +456,7 @@ } ], "traits": { - "smithy.api#documentation": "

A practice run configuration for zonal autoshift is required when you enable zonal autoshift.\n\t\t\tA practice run configuration includes specifications for blocked dates and blocked time windows,\n\t\tand for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an \n\t\t\toutcome alarm, to monitor application health during practice runs and, \n\t\t\toptionally, a blocking alarm, to block practice runs from starting.

\n

For more information, see \n\t\t\t\n\t\t\t\tConsiderations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

", + "smithy.api#documentation": "

A practice run configuration for zonal autoshift is required when you enable zonal autoshift.\n\t\t\tA practice run configuration includes specifications for blocked dates and blocked time windows,\n\t\tand for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an \n\t\t\toutcome alarm, to monitor application health during practice runs and, \n\t\t\toptionally, a blocking alarm, to block practice runs from starting.

\n

When a resource has a practice run configuration, Route 53 ARC starts zonal shifts for the resource\n\t\t\tweekly, to shift traffic for practice runs. Practice runs help you to ensure that \n\t\t\tshifting away traffic from an Availability Zone during an autoshift is safe for your application.

\n

For more information, see \n\t\t\t\n\t\t\t\tConsiderations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

", "smithy.api#http": { "method": "POST", "uri": "/configuration", @@ -429,7 +470,7 @@ "resourceIdentifier": { "target": "com.amazonaws.arczonalshift#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

The identifier of the resource to shift away traffic for when a practice\n\t\t\trun starts a zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource.

\n

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

", + "smithy.api#documentation": "

The identifier of the resource that Amazon Web Services shifts traffic for with a practice \n\t\t\trun zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource.

\n

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

", "smithy.api#required": {} } }, @@ -488,7 +529,7 @@ "zonalAutoshiftStatus": { "target": "com.amazonaws.arczonalshift#ZonalAutoshiftStatus", "traits": { - "smithy.api#documentation": "

The status for zonal autoshift for a resource. When you specify the\n\t\t\tautoshift status as ENABLED, Amazon Web Services shifts traffic \n\t\t\taway from shifts away application resource traffic from an Availability Zone, \n\t\t\ton your behalf, when Amazon Web Services determines that there's an issue in \n\t\t\tthe Availability Zone that could potentially affect customers.

\n

When you enable zonal autoshift, you must also configure practice runs for\n\t\t\tthe resource.

", + "smithy.api#documentation": "

The status for zonal autoshift for a resource. When you specify ENABLED\n\t\t\tfor the autoshift status, Amazon Web Services shifts traffic \n\t\t\taway from shifts away application resource traffic from an Availability Zone, \n\t\t\ton your behalf, when internal telemetry indicates that there is an Availability \n\t\t\tZone impairment that could potentially impact customers.

\n

When you enable zonal autoshift, you must also configure practice runs for\n\t\t\tthe resource.

", "smithy.api#required": {} } }, @@ -605,6 +646,62 @@ "com.amazonaws.arczonalshift#ExpiryTime": { "type": "timestamp" }, + "com.amazonaws.arczonalshift#GetAutoshiftObserverNotificationStatus": { + "type": "operation", + "input": { + "target": "com.amazonaws.arczonalshift#GetAutoshiftObserverNotificationStatusRequest" + }, + "output": { + "target": "com.amazonaws.arczonalshift#GetAutoshiftObserverNotificationStatusResponse" + }, + "errors": [ + { + "target": "com.amazonaws.arczonalshift#AccessDeniedException" + }, + { + "target": "com.amazonaws.arczonalshift#InternalServerException" + }, + { + "target": "com.amazonaws.arczonalshift#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the status of autoshift observer notification. Autoshift observer\n\t\t\tnotification enables you to be notified, through Amazon EventBridge, when\n\t\t\tthere is an autoshift event for zonal autoshift.

\n

If the status is ENABLED,\n\t\t\tRoute 53 ARC includes all autoshift events when you use the EventBridge pattern\n\t\t\tAutoshift In Progress. When the status is DISABLED,\n\t\t\tRoute 53 ARC includes only autoshift events for autoshifts when one or more of your\n\t\t\tresources is included in the autoshift.

\n

For more information, see \n\t\t\t\n\t\t\t\tNotifications for practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller Developer Guide.

", + "smithy.api#http": { + "method": "GET", + "uri": "/autoshift-observer-notification", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.arczonalshift#GetAutoshiftObserverNotificationStatusRequest": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#input": {}, + "smithy.api#references": [ + { + "resource": "com.amazonaws.arczonalshift#AutoshiftObserverNotification" + } + ] + } + }, + "com.amazonaws.arczonalshift#GetAutoshiftObserverNotificationStatusResponse": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.arczonalshift#AutoshiftObserverNotificationStatus", + "traits": { + "smithy.api#documentation": "

The status of autoshift observer notification. If the status is ENABLED,\n\t\t\tRoute 53 ARC includes all autoshift events when you use the Amazon EventBridge pattern\n\t\t\tAutoshift In Progress. When the status is DISABLED,\n\t\t\tRoute 53 ARC includes only autoshift events for autoshifts when one or more of your\n\t\t\tresources is included in the autoshift.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.arczonalshift#GetManagedResource": { "type": "operation", "input": { @@ -646,7 +743,7 @@ "resourceIdentifier": { "target": "com.amazonaws.arczonalshift#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

The identifier for the resource to shift away traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

\n

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

", + "smithy.api#documentation": "

The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

\n

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -749,7 +846,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the active autoshifts for a specified resource.

", + "smithy.api#documentation": "

Returns a list of autoshifts for an Amazon Web Services Region. By default, the call returns\n\t\t\tonly ACTIVE autoshifts. Optionally, you can specify the status parameter to return\n\t\t\tCOMPLETED autoshifts.\n\t\t

", "smithy.api#http": { "method": "GET", "uri": "/autoshifts", @@ -917,7 +1014,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region.\n \t\tListZonalShifts returns customer-started zonal shifts, as well as practice run zonal shifts that Route 53 ARC started on \n \t\tyour behalf for zonal autoshift.

\n

The ListZonalShifts operation does not list autoshifts. For more information about listing\n \t\tautoshifts, see \">ListAutoshifts.

", + "smithy.api#documentation": "

Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region.\n \t\tListZonalShifts returns customer-initiated zonal shifts, as well as practice run zonal shifts that Route 53 ARC started on \n \t\tyour behalf for zonal autoshift.

\n

The ListZonalShifts operation does not list autoshifts. For more information about listing\n \t\tautoshifts, see \">ListAutoshifts.

", "smithy.api#http": { "method": "GET", "uri": "/zonalshifts", @@ -1092,6 +1189,12 @@ { "target": "com.amazonaws.arczonalshift#Autoshift" }, + { + "target": "com.amazonaws.arczonalshift#AutoshiftObserverNotification" + }, + { + "target": "com.amazonaws.arczonalshift#AutoshiftTriggerResource" + }, { "target": "com.amazonaws.arczonalshift#ManagedResource" }, @@ -1137,7 +1240,7 @@ "Date" ] }, - "smithy.api#documentation": "

Welcome to the Zonal Shift API Reference Guide for Amazon Route 53 Application Recovery Controller (Route 53 ARC).

\n

You can start a zonal shift to move traffic for a load balancer resource away from an Availability Zone to\n\t\t\thelp your application recover quickly from an impairment in an Availability Zone. For example, \n\t\t\tyou can recover your application from a developer's bad code deployment or from an \n\t\t\tAmazon Web Services infrastructure failure in a single Availability Zone.

\n

You can also configure zonal autoshift for a load balancer resource. Zonal autoshift \n\t\t\tis a capability in Route 53 ARC where Amazon Web Services shifts away application resource \n\t\t\ttraffic from an Availability Zone, on your behalf, to help reduce your time to recovery during events.\n\t\t\tAmazon Web Services shifts away traffic for resources that are enabled for zonal autoshift whenever Amazon Web Services \n\t\t\tdetermines that there's an issue in the Availability Zone that could potentially affect \n\t\t\tcustomers.

\n

To ensure that zonal autoshift is safe for your application, you must \n\t\t\talso configure practice runs when you enable zonal autoshift for a resource. Practice runs start \n\t\t\tweekly zonal shifts for a resource, to shift\n\t\t\ttraffic for the resource out of an Availability Zone. Practice runs make sure, on a regular basis, \n\t\t\tthat you have enough capacity in all the Availability Zones in an Amazon Web Services Region \n\t\t\tfor your application to continue to operate normally \n\t\t\twhen traffic for a resource is shifted away from one Availability Zone.

\n \n

You must prescale resource capacity in all Availability Zones in the Region \n\t\t\twhere your application is deployed, before you configure practice runs or enable zonal autoshift\n\t\t\tfor a resource. You should not rely on scaling on demand when an autoshift or practice run \n\t\t\tstarts.

\n
\n

For more information about using zonal shift and zonal autoshift, see the \n\t\t\tAmazon Route 53 Application Recovery Controller \n\t\t\t\tDeveloper Guide.

", + "smithy.api#documentation": "

Welcome to the API Reference Guide for zonal shift and zonal autoshift in Amazon Route 53 Application Recovery Controller (Route 53 ARC).

\n

You can start a zonal shift to move traffic for a load balancer resource away from an Availability Zone to\n\t\t\thelp your application recover quickly from an impairment in an Availability Zone. For example, \n\t\t\tyou can recover your application from a developer's bad code deployment or from an \n\t\t\tAmazon Web Services infrastructure failure in a single Availability Zone.

\n

You can also configure zonal autoshift for supported load balancer resources. Zonal autoshift \n\t\t\tis a capability in Route 53 ARC where you authorize Amazon Web Services to shift away application resource \n\t\t\ttraffic from an Availability Zone during events, on your behalf, to help reduce your time to recovery.\n\t\t\tAmazon Web Services starts an autoshift when internal telemetry indicates that there is an Availability \n\t\t\tZone impairment that could potentially impact customers.

\n

To help make sure that zonal autoshift is safe for your application, you must \n\t\t\talso configure practice runs when you enable zonal autoshift for a resource. Practice runs start \n\t\t\tweekly zonal shifts for a resource, to shift traffic for the resource away from an Availability Zone. \n\t\t\tPractice runs help you to make sure, on a regular basis, that you have enough capacity in all the \n\t\t\tAvailability Zones in an Amazon Web Services Region for your application to continue to operate normally \n\t\t\twhen traffic for a resource is shifted away from one Availability Zone.

\n \n

Before you configure practice runs or enable zonal autoshift, we strongly recommend\n\t\t\tthat you prescale your application resource capacity in all Availability Zones in the Region where \n\t\t\tyour application resources are deployed. You should not rely on scaling on demand when an \n\t\t\tautoshift or practice run starts. Zonal autoshift, including practice runs, works independently, \n\t\t\tand does not wait for auto scaling actions to complete. Relying on auto scaling, instead of \n\t\t\tpre-scaling, can result in loss of availability.

\n

If you use auto scaling to handle regular cycles of traffic, we strongly recommend that you configure \n\t\t\t\tthe minimum capacity of your auto scaling to continue operating normally with the loss of an \n\t\t\t\tAvailability Zone.

\n
\n

Be aware that Route 53 ARC does not inspect the health of individual resources. Amazon Web Services only starts an \n\t\t\tautoshift when Amazon Web Services telemetry detects that there is an Availability Zone impairment that could \n\t\t\tpotentially impact customers. In some cases, resources might be shifted away that are not experiencing \n\t\t\timpact.

\n

For more information about using zonal shift and zonal autoshift, see the \n\t\t\tAmazon Route 53 Application Recovery Controller \n\t\t\t\tDeveloper Guide.

", "smithy.api#title": "AWS ARC - Zonal Shift", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1836,7 +1939,7 @@ } }, "traits": { - "smithy.api#documentation": "

A practice run configuration for a resource includes the Amazon CloudWatch alarms that you've specified for a practice\n\t\trun, as well as any blocked dates or blocked windows for the practice run.

\n

You can update or delete a practice run configuration. Before you delete a practice run configuration, you\n\t\t\tmust disable zonal autoshift for the resource. A practice run configuration is required when zonal autoshift is enabled.

" + "smithy.api#documentation": "

A practice run configuration for a resource includes the Amazon CloudWatch alarms that you've specified for a practice\n\t\trun, as well as any blocked dates or blocked windows for the practice run. When a resource has a practice run \n\t\tconfiguration, Route 53 ARC shifts traffic for the resource weekly for practice runs.

\n

Practice runs are required for zonal autoshift. The zonal shifts that Route 53 ARC starts for practice runs help you to ensure that \n\t\t\tshifting away traffic from an Availability Zone during an autoshift is safe for your application.

\n

You can update or delete a practice run configuration. Before you delete a practice run configuration, you\n\t\t\tmust disable zonal autoshift for the resource. A practice run configuration is required when zonal autoshift is enabled.

" } }, "com.amazonaws.arczonalshift#PracticeRunConfigurationResource": { @@ -1982,14 +2085,14 @@ "resourceIdentifier": { "target": "com.amazonaws.arczonalshift#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

The identifier for the resource to shift away traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

\n

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

", + "smithy.api#documentation": "

The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

\n

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

", "smithy.api#required": {} } }, "awayFrom": { "target": "com.amazonaws.arczonalshift#AvailabilityZone", "traits": { - "smithy.api#documentation": "

The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. \n \t\tUntil the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

", + "smithy.api#documentation": "

The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. \n \t\tUntil the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

", "smithy.api#required": {} } }, @@ -2030,6 +2133,73 @@ "smithy.api#httpError": 429 } }, + "com.amazonaws.arczonalshift#UpdateAutoshiftObserverNotificationStatus": { + "type": "operation", + "input": { + "target": "com.amazonaws.arczonalshift#UpdateAutoshiftObserverNotificationStatusRequest" + }, + "output": { + "target": "com.amazonaws.arczonalshift#UpdateAutoshiftObserverNotificationStatusResponse" + }, + "errors": [ + { + "target": "com.amazonaws.arczonalshift#AccessDeniedException" + }, + { + "target": "com.amazonaws.arczonalshift#InternalServerException" + }, + { + "target": "com.amazonaws.arczonalshift#ThrottlingException" + }, + { + "target": "com.amazonaws.arczonalshift#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Update the status of autoshift observer notification. Autoshift observer\n\t\t\tnotification enables you to be notified, through Amazon EventBridge, when\n\t\t\tthere is an autoshift event for zonal autoshift.

\n

If the status is ENABLED,\n\t\t\tRoute 53 ARC includes all autoshift events when you use the EventBridge pattern\n\t\t\tAutoshift In Progress. When the status is DISABLED,\n\t\t\tRoute 53 ARC includes only autoshift events for autoshifts when one or more of your\n\t\t\tresources is included in the autoshift.

\n

For more information, see \n\t\t\t\n\t\t\t\tNotifications for practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller Developer Guide.

", + "smithy.api#http": { + "method": "PUT", + "uri": "/autoshift-observer-notification", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.arczonalshift#UpdateAutoshiftObserverNotificationStatusRequest": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.arczonalshift#AutoshiftObserverNotificationStatus", + "traits": { + "smithy.api#documentation": "

The status to set for autoshift observer notification. If the status is ENABLED,\n\t\t\tRoute 53 ARC includes all autoshift events when you use the Amazon EventBridge pattern\n\t\t\tAutoshift In Progress. When the status is DISABLED,\n\t\t\tRoute 53 ARC includes only autoshift events for autoshifts when one or more of your\n\t\t\tresources is included in the autoshift.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {}, + "smithy.api#references": [ + { + "resource": "com.amazonaws.arczonalshift#AutoshiftObserverNotification" + } + ] + } + }, + "com.amazonaws.arczonalshift#UpdateAutoshiftObserverNotificationStatusResponse": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.arczonalshift#AutoshiftObserverNotificationStatus", + "traits": { + "smithy.api#documentation": "

The status for autoshift observer notification.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.arczonalshift#UpdatePracticeRunConfiguration": { "type": "operation", "input": { @@ -2177,7 +2347,7 @@ } ], "traits": { - "smithy.api#documentation": "

You can update the zonal autoshift status for a resource, to enable or disable zonal\n\t\t\tautoshift. When zonal autoshift is ENABLED, Amazon Web Services shifts away \n\t\t\tresource traffic from an Availability Zone, on your behalf, when Amazon Web Services\n\t\t\tdetermines that there's an issue in the Availability Zone that could potentially affect customers.

", + "smithy.api#documentation": "

The zonal autoshift configuration for a resource includes the practice run configuration and the status for\n\t\t\trunning autoshifts, zonal autoshift status. When a resource has a practice run configuation, Route 53 ARC \n\t\t\tstarts weekly zonal shifts for the resource, to shift traffic away from an Availability Zone. Weekly practice\n\t\t\truns help you to make sure that your application can continue to operate normally with the loss of one Availability Zone.

\n

You can update the zonal autoshift autoshift status to enable or disable zonal autoshift. When zonal\n\t\t\tautoshift is ENABLED, you authorize Amazon Web Services to shift away resource traffic for \n\t\t\tan application from an Availability Zone during events, on your behalf, to help reduce time to recovery.\n\t\t\tTraffic is also shifted away for the required weekly practice runs.

", "smithy.api#http": { "method": "PUT", "uri": "/managedresources/{resourceIdentifier}", @@ -2200,7 +2370,7 @@ "zonalAutoshiftStatus": { "target": "com.amazonaws.arczonalshift#ZonalAutoshiftStatus", "traits": { - "smithy.api#documentation": "

The zonal autoshift status for the resource that you want to update the zonal\n\t\t\tautoshift configuration for.

", + "smithy.api#documentation": "

The zonal autoshift status for the resource that you want to update the zonal\n\t\t\tautoshift configuration for. Choose ENABLED to authorize Amazon Web Services \n\t\t\tto shift away resource traffic for an application from an Availability Zone during events, \n\t\t\ton your behalf, to help reduce time to recovery.

", "smithy.api#required": {} } } @@ -2222,7 +2392,7 @@ "zonalAutoshiftStatus": { "target": "com.amazonaws.arczonalshift#ZonalAutoshiftStatus", "traits": { - "smithy.api#documentation": "

The zonal autoshift status for the resource that you updated the zonal\n\t\t\tautoshift configuration for.

", + "smithy.api#documentation": "

The updated zonal autoshift status for the resource.

", "smithy.api#required": {} } } @@ -2410,21 +2580,21 @@ "resourceIdentifier": { "target": "com.amazonaws.arczonalshift#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

The identifier for the resource to shift away traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

\n

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

", + "smithy.api#documentation": "

The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

\n

At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

", "smithy.api#required": {} } }, "awayFrom": { "target": "com.amazonaws.arczonalshift#AvailabilityZone", "traits": { - "smithy.api#documentation": "

The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. \n\t\t\tUntil the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

", + "smithy.api#documentation": "

The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. \n\t\t\tUntil the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

", "smithy.api#required": {} } }, "expiryTime": { "target": "com.amazonaws.arczonalshift#ExpiryTime", "traits": { - "smithy.api#documentation": "

The expiry time (expiration time) for a customer-started zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n\t\t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n\t\t\tto set a new expiration at any time.

\n

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts \n\t\t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n\t\t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

", + "smithy.api#documentation": "

The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n\t\t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n\t\t\tto set a new expiration at any time.

\n

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts \n\t\t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n\t\t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

", "smithy.api#required": {} } }, @@ -2475,7 +2645,7 @@ "appliedStatus": { "target": "com.amazonaws.arczonalshift#AppliedStatus", "traits": { - "smithy.api#documentation": "

The appliedStatus field specifies which application traffic shift is in effect for a\n \t\tresource when there is more than one traffic shift active. There can be more than one application traffic \n \t\tshift in progress at the same time - that is, practice run zonal shifts, customer-started zonal shifts, \n \t\tor an autoshift. The appliedStatus field for an autoshift for a resource can have one of two \n \t\tvalues: APPLIED or NOT_APPLIED. The zonal shift or autoshift \n \t\tthat is currently in effect for the resource has an applied status set to APPLIED.

\n

The overall principle for precedence is that zonal shifts that you start as a customer take precedence \n \t\tautoshifts, which take precedence over practice runs. That is, customer-started zonal shifts > autoshifts > practice run \n \t\tzonal shifts.

\n

For more information, see \n \t\tHow zonal autoshift \n \t\t\tand practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

", + "smithy.api#documentation": "

The appliedStatus field specifies which application traffic shift is in effect for a\n \t\tresource when there is more than one active traffic shift. There can be more than one application traffic \n \t\tshift in progress at the same time - that is, practice run zonal shifts, customer-initiated zonal shifts, \n \t\tor an autoshift. The appliedStatus field for a shift that is in progress for a resource can \n \t\thave one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift \n \t\tthat is currently in effect for the resource has an appliedStatus set to APPLIED.

\n

The overall principle for precedence is that zonal shifts that you start as a customer take precedence \n \t\tautoshifts, which take precedence over practice runs. That is, customer-initiated zonal shifts > autoshifts > practice run \n \t\tzonal shifts.

\n

For more information, see \n \t\tHow zonal autoshift \n \t\t\tand practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

", "smithy.api#required": {} } }, @@ -2496,14 +2666,14 @@ "awayFrom": { "target": "com.amazonaws.arczonalshift#AvailabilityZone", "traits": { - "smithy.api#documentation": "

The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. \n \t\tUntil the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

", + "smithy.api#documentation": "

The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. \n \t\tUntil the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

", "smithy.api#required": {} } }, "expiryTime": { "target": "com.amazonaws.arczonalshift#ExpiryTime", "traits": { - "smithy.api#documentation": "

The expiry time (expiration time) for a customer-started zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n \t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n \t\tto set a new expiration at any time.

\n

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts \n \t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n \t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

", + "smithy.api#documentation": "

The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n \t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n \t\tto set a new expiration at any time.

\n

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts \n \t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n \t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

", "smithy.api#required": {} } }, @@ -2517,7 +2687,7 @@ "comment": { "target": "com.amazonaws.arczonalshift#ZonalShiftComment", "traits": { - "smithy.api#documentation": "

A comment that you enter about the zonal shift. Only the latest comment is retained; no comment\n \t\thistory is maintained. That is, a new comment overwrites any existing comment string.

", + "smithy.api#documentation": "

A comment that you enter for a customer-initiated zonal shift. Only the latest comment is retained; no comment\n \t\thistory is maintained. That is, a new comment overwrites any existing comment string.

", "smithy.api#required": {} } }, @@ -2597,14 +2767,14 @@ "awayFrom": { "target": "com.amazonaws.arczonalshift#AvailabilityZone", "traits": { - "smithy.api#documentation": "

The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. \n \t\tUntil the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

", + "smithy.api#documentation": "

The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. \n \t\tUntil the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

", "smithy.api#required": {} } }, "expiryTime": { "target": "com.amazonaws.arczonalshift#ExpiryTime", "traits": { - "smithy.api#documentation": "

The expiry time (expiration time) for a customer-started zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n \t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n \t\tto set a new expiration at any time.

\n

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts \n \t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n \t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

", + "smithy.api#documentation": "

The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n \t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n \t\tto set a new expiration at any time.

\n

When you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts \n \t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n \t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

", "smithy.api#required": {} } }, @@ -2637,7 +2807,7 @@ } }, "traits": { - "smithy.api#documentation": "

Lists information about zonal shifts in Amazon Route 53 Application Recovery Controller, including zonal shifts that you start yourself and zonal shifts that Route 53 ARC starts\n \t\ton your behalf for practice runs with zonal autoshift.

\n

Zonal shifts are temporary, including customer-started zonal shifts and the zonal autoshift practice run zonal shifts that\n \t\tRoute 53 ARC starts weekly, on your behalf. A zonal shift that a customer starts can be active for up to three days (72 hours). A\n \t\tpractice run zonal shift has a 30 minute duration.

" + "smithy.api#documentation": "

Lists information about zonal shifts in Amazon Route 53 Application Recovery Controller, including zonal shifts that you start yourself and zonal shifts that Route 53 ARC starts\n \t\ton your behalf for practice runs with zonal autoshift.

\n

Zonal shifts are temporary, including customer-initiated zonal shifts and the zonal autoshift practice run zonal shifts that\n \t\tRoute 53 ARC starts weekly, on your behalf. A zonal shift that a customer starts can be active for up to three days (72 hours). A\n \t\tpractice run zonal shift has a 30 minute duration.

" } }, "com.amazonaws.arczonalshift#ZonalShifts": { diff --git a/models/auto-scaling.json b/models/auto-scaling.json index e9c3eaaa56..d6d9e29ef2 100644 --- a/models/auto-scaling.json +++ b/models/auto-scaling.json @@ -4954,6 +4954,19 @@ ] } } + ], + "smithy.test#smokeTests": [ + { + "id": "DescribeScalingProcessTypesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } ] } }, diff --git a/models/batch.json b/models/batch.json index 2e47a6a446..d8b89bb51d 100644 --- a/models/batch.json +++ b/models/batch.json @@ -4024,6 +4024,12 @@ "smithy.api#documentation": "

The details for the init containers.

" } }, + "eksClusterArn": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon EKS cluster.

" + } + }, "podName": { "target": "com.amazonaws.batch#String", "traits": { @@ -6499,6 +6505,12 @@ "traits": { "smithy.api#documentation": "

An object that contains the instance types that you want to replace for the existing\n resources of a job.

" } + }, + "eksPropertiesOverride": { + "target": "com.amazonaws.batch#EksPropertiesOverride", + "traits": { + "smithy.api#documentation": "

An object that contains the properties that you want to replace for the existing Amazon EKS resources of a job.

" + } } }, "traits": { @@ -6545,6 +6557,12 @@ "traits": { "smithy.api#documentation": "

This is an object that represents the properties of the node range for a multi-node parallel\n job.

" } + }, + "eksProperties": { + "target": "com.amazonaws.batch#EksProperties", + "traits": { + "smithy.api#documentation": "

This is an object that represents the properties of the node range for a multi-node parallel job.

" + } } }, "traits": { @@ -6629,14 +6647,16 @@ "smithy.api#documentation": "

Registers an Batch job definition.

", "smithy.api#examples": [ { - "title": "To register a job definition", - "documentation": "This example registers a job definition for a simple container job.", + "title": "RegisterJobDefinition with tags", + "documentation": "This demonstrates calling the RegisterJobDefinition action, including tags.", "input": { + "jobDefinitionName": "sleep30", + "type": "container", "containerProperties": { "image": "busybox", "command": [ "sleep", - "10" + "30" ], "resourceRequirements": [ { @@ -6649,26 +6669,26 @@ } ] }, - "type": "container", - "jobDefinitionName": "sleep10" + "tags": { + "Department": "Engineering", + "User": "JaneDoe" + } }, "output": { - "jobDefinitionName": "sleep10", - "jobDefinitionArn": "arn:aws:batch:us-east-1:012345678910:job-definition/sleep10:1", + "jobDefinitionName": "sleep30", + "jobDefinitionArn": "arn:aws:batch:us-east-1:012345678910:job-definition/sleep30:1", "revision": 1 } }, { - "title": "RegisterJobDefinition with tags", - "documentation": "This demonstrates calling the RegisterJobDefinition action, including tags.", + "title": "To register a job definition", + "documentation": "This example registers a job definition for a simple container job.", "input": { - "jobDefinitionName": "sleep30", - "type": "container", "containerProperties": { "image": "busybox", "command": [ "sleep", - "30" + "10" ], "resourceRequirements": [ { @@ -6681,14 +6701,12 @@ } ] }, - "tags": { - "Department": "Engineering", - "User": "JaneDoe" - } + "type": "container", + "jobDefinitionName": "sleep10" }, "output": { - "jobDefinitionName": "sleep30", - "jobDefinitionArn": "arn:aws:batch:us-east-1:012345678910:job-definition/sleep30:1", + "jobDefinitionName": "sleep10", + "jobDefinitionArn": "arn:aws:batch:us-east-1:012345678910:job-definition/sleep10:1", "revision": 1 } } diff --git a/models/bedrock-agent-runtime.json b/models/bedrock-agent-runtime.json index f6881f207b..24ceb9b078 100644 --- a/models/bedrock-agent-runtime.json +++ b/models/bedrock-agent-runtime.json @@ -52,6 +52,18 @@ "traits": { "smithy.api#documentation": "

The function in the action group to call.

" } + }, + "executionType": { + "target": "com.amazonaws.bedrockagentruntime#ExecutionType", + "traits": { + "smithy.api#documentation": "

How fulfillment of the action is handled. For more information, see Handling fulfillment of the action.

" + } + }, + "invocationId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the invocation. Only returned if the executionType is RETURN_CONTROL.

" + } } }, "traits": { @@ -137,9 +149,15 @@ "type": "service", "version": "2023-07-26", "resources": [ + { + "target": "com.amazonaws.bedrockagentruntime#FlowResource" + }, { "target": "com.amazonaws.bedrockagentruntime#InferenceResource" }, + { + "target": "com.amazonaws.bedrockagentruntime#MemoryResource" + }, { "target": "com.amazonaws.bedrockagentruntime#RetrieveAndGenerateResource" }, @@ -1053,6 +1071,28 @@ "smithy.api#documentation": "

This property contains the document to chat with, along with its attributes.

" } }, + "com.amazonaws.bedrockagentruntime#ByteContentFile": { + "type": "structure", + "members": { + "mediaType": { + "target": "com.amazonaws.bedrockagentruntime#MimeType", + "traits": { + "smithy.api#documentation": "

The MIME type of data contained in the file used for chat.

", + "smithy.api#required": {} + } + }, + "data": { + "target": "com.amazonaws.bedrockagentruntime#ByteContentBlob", + "traits": { + "smithy.api#documentation": "

The byte value of the file to attach, encoded as Base-64 string. The maximum size of all files that is attached is 10MB. You can attach a maximum of 5 files.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The property contains the file to chat with, along with its attributes.

" + } + }, "com.amazonaws.bedrockagentruntime#Citation": { "type": "structure", "members": { @@ -1079,6 +1119,58 @@ "target": "com.amazonaws.bedrockagentruntime#Citation" } }, + "com.amazonaws.bedrockagentruntime#CodeInterpreterInvocationInput": { + "type": "structure", + "members": { + "code": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The code for the code interpreter to use.

" + } + }, + "files": { + "target": "com.amazonaws.bedrockagentruntime#Files", + "traits": { + "smithy.api#documentation": "

Files that are uploaded for code interpreter to use.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about the code interpreter being invoked.

" + } + }, + "com.amazonaws.bedrockagentruntime#CodeInterpreterInvocationOutput": { + "type": "structure", + "members": { + "executionOutput": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Contains the successful output returned from code execution

" + } + }, + "executionError": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Contains the error returned from code execution.

" + } + }, + "files": { + "target": "com.amazonaws.bedrockagentruntime#Files", + "traits": { + "smithy.api#documentation": "

Contains output files, if generated by code execution.

" + } + }, + "executionTimeout": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates if the execution of the code timed out.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the JSON-formatted string returned by the API invoked by the code interpreter.

" + } + }, "com.amazonaws.bedrockagentruntime#ConflictException": { "type": "structure", "members": { @@ -1138,6 +1230,98 @@ } } }, + "com.amazonaws.bedrockagentruntime#DateTimestamp": { + "type": "timestamp", + "traits": { + "smithy.api#documentation": "Time Stamp.", + "smithy.api#timestampFormat": "date-time" + } + }, + "com.amazonaws.bedrockagentruntime#DeleteAgentMemory": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagentruntime#DeleteAgentMemoryRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagentruntime#DeleteAgentMemoryResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagentruntime#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#BadGatewayException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#DependencyFailedException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes memory from the specified memory identifier.

", + "smithy.api#http": { + "code": 202, + "method": "DELETE", + "uri": "/agents/{agentId}/agentAliases/{agentAliasId}/memories" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.bedrockagentruntime#DeleteAgentMemoryRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagentruntime#AgentId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent to which the alias belongs.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentAliasId": { + "target": "com.amazonaws.bedrockagentruntime#AgentAliasId", + "traits": { + "smithy.api#documentation": "

The unique identifier of an alias of an agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "memoryId": { + "target": "com.amazonaws.bedrockagentruntime#MemoryId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the memory.

", + "smithy.api#httpQuery": "memoryId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagentruntime#DeleteAgentMemoryResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.bedrockagentruntime#DependencyFailedException": { "type": "structure", "members": { @@ -1157,6 +1341,23 @@ "smithy.api#httpError": 424 } }, + "com.amazonaws.bedrockagentruntime#ExecutionType": { + "type": "enum", + "members": { + "LAMBDA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LAMBDA" + } + }, + "RETURN_CONTROL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RETURN_CONTROL" + } + } + } + }, "com.amazonaws.bedrockagentruntime#ExternalSource": { "type": "structure", "members": { @@ -1300,6 +1501,96 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.bedrockagentruntime#FileBody": { + "type": "blob", + "traits": { + "smithy.api#length": { + "max": 1000000 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#FilePart": { + "type": "structure", + "members": { + "files": { + "target": "com.amazonaws.bedrockagentruntime#OutputFiles", + "traits": { + "smithy.api#documentation": "

Files containing intermediate response for the user.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains intermediate response for code interpreter if any files have been generated.

" + } + }, + "com.amazonaws.bedrockagentruntime#FileSource": { + "type": "structure", + "members": { + "sourceType": { + "target": "com.amazonaws.bedrockagentruntime#FileSourceType", + "traits": { + "smithy.api#documentation": "

The source type of the files to attach.

", + "smithy.api#required": {} + } + }, + "s3Location": { + "target": "com.amazonaws.bedrockagentruntime#S3ObjectFile", + "traits": { + "smithy.api#documentation": "

The s3 location of the files to attach.

" + } + }, + "byteContent": { + "target": "com.amazonaws.bedrockagentruntime#ByteContentFile", + "traits": { + "smithy.api#documentation": "

The data and the text of the attached files.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The source file of the content contained in the wrapper object.

" + } + }, + "com.amazonaws.bedrockagentruntime#FileSourceType": { + "type": "enum", + "members": { + "S3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "S3" + } + }, + "BYTE_CONTENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BYTE_CONTENT" + } + } + } + }, + "com.amazonaws.bedrockagentruntime#FileUseCase": { + "type": "enum", + "members": { + "CODE_INTERPRETER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CODE_INTERPRETER" + } + }, + "CHAT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CHAT" + } + } + } + }, + "com.amazonaws.bedrockagentruntime#Files": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, "com.amazonaws.bedrockagentruntime#FilterAttribute": { "type": "structure", "members": { @@ -1354,46 +1645,274 @@ "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagentruntime#Function": { + "com.amazonaws.bedrockagentruntime#FlowAliasIdentifier": { "type": "string", "traits": { - "smithy.api#sensitive": {} + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/[0-9a-zA-Z]{10})|(\\bTSTALIASID\\b|[0-9a-zA-Z]+)$" } }, - "com.amazonaws.bedrockagentruntime#FunctionInvocationInput": { + "com.amazonaws.bedrockagentruntime#FlowCompletionEvent": { "type": "structure", "members": { - "actionGroup": { - "target": "smithy.api#String", + "completionReason": { + "target": "com.amazonaws.bedrockagentruntime#FlowCompletionReason", "traits": { - "smithy.api#documentation": "

The action group that the function belongs to.

", + "smithy.api#documentation": "

The reason that the flow completed.

", "smithy.api#required": {} } - }, - "parameters": { - "target": "com.amazonaws.bedrockagentruntime#FunctionParameters", - "traits": { - "smithy.api#documentation": "

A list of parameters of the function.

" - } - }, - "function": { - "target": "smithy.api#String", + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about why a flow completed.

\n

This data type is used in the following API operations:

\n ", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#FlowCompletionReason": { + "type": "enum", + "members": { + "SUCCESS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The name of the function.

" + "smithy.api#enumValue": "SUCCESS" } } - }, + } + }, + "com.amazonaws.bedrockagentruntime#FlowIdentifier": { + "type": "string", "traits": { - "smithy.api#documentation": "

Contains information about the function that the agent predicts should be called.

\n

This data type is used in the following API operations:

\n " + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$" } }, - "com.amazonaws.bedrockagentruntime#FunctionParameter": { + "com.amazonaws.bedrockagentruntime#FlowInput": { "type": "structure", "members": { - "name": { - "target": "smithy.api#String", + "nodeName": { + "target": "com.amazonaws.bedrockagentruntime#NodeName", "traits": { - "smithy.api#documentation": "

The name of the parameter.

" + "smithy.api#documentation": "

A name for the input of the flow input node.

", + "smithy.api#required": {} + } + }, + "nodeOutputName": { + "target": "com.amazonaws.bedrockagentruntime#NodeOutputName", + "traits": { + "smithy.api#documentation": "

A name for the output of the flow input node.

", + "smithy.api#required": {} + } + }, + "content": { + "target": "com.amazonaws.bedrockagentruntime#FlowInputContent", + "traits": { + "smithy.api#documentation": "

Contains information about an input into the flow.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about an input into the flow and what to do with it.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#FlowInputContent": { + "type": "union", + "members": { + "document": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

The input for the flow input node.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about an input into the flow.

\n

This data type is used in the following API operations:

\n ", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#FlowInputs": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#FlowInput" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.bedrockagentruntime#FlowOutputContent": { + "type": "union", + "members": { + "document": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

A name for the output of the flow.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about the output node.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#FlowOutputEvent": { + "type": "structure", + "members": { + "nodeName": { + "target": "com.amazonaws.bedrockagentruntime#NodeName", + "traits": { + "smithy.api#documentation": "

The name of the node to which input was provided.

", + "smithy.api#required": {} + } + }, + "nodeType": { + "target": "com.amazonaws.bedrockagentruntime#NodeType", + "traits": { + "smithy.api#documentation": "

The type of node to which input was provided.

", + "smithy.api#required": {} + } + }, + "content": { + "target": "com.amazonaws.bedrockagentruntime#FlowOutputContent", + "traits": { + "smithy.api#documentation": "

The output of the node.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about an output from flow invoction.

\n

This data type is used in the following API operations:

\n ", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#FlowResource": { + "type": "resource", + "operations": [ + { + "target": "com.amazonaws.bedrockagentruntime#InvokeFlow" + } + ] + }, + "com.amazonaws.bedrockagentruntime#FlowResponseStream": { + "type": "union", + "members": { + "flowOutputEvent": { + "target": "com.amazonaws.bedrockagentruntime#FlowOutputEvent", + "traits": { + "smithy.api#documentation": "

Contains information about an output from flow invocation.

" + } + }, + "flowCompletionEvent": { + "target": "com.amazonaws.bedrockagentruntime#FlowCompletionEvent", + "traits": { + "smithy.api#documentation": "

Contains information about why the flow completed.

" + } + }, + "internalServerException": { + "target": "com.amazonaws.bedrockagentruntime#InternalServerException", + "traits": { + "smithy.api#documentation": "

An internal server error occurred. Retry your request.

" + } + }, + "validationException": { + "target": "com.amazonaws.bedrockagentruntime#ValidationException", + "traits": { + "smithy.api#documentation": "

Input validation failed. Check your request parameters and retry the request.

" + } + }, + "resourceNotFoundException": { + "target": "com.amazonaws.bedrockagentruntime#ResourceNotFoundException", + "traits": { + "smithy.api#documentation": "

The specified resource Amazon Resource Name (ARN) was not found. Check the Amazon Resource Name (ARN) and try your request again.

" + } + }, + "serviceQuotaExceededException": { + "target": "com.amazonaws.bedrockagentruntime#ServiceQuotaExceededException", + "traits": { + "smithy.api#documentation": "

The number of requests exceeds the service quota. Resubmit your request later.

" + } + }, + "throttlingException": { + "target": "com.amazonaws.bedrockagentruntime#ThrottlingException", + "traits": { + "smithy.api#documentation": "

The number of requests exceeds the limit. Resubmit your request later.

" + } + }, + "accessDeniedException": { + "target": "com.amazonaws.bedrockagentruntime#AccessDeniedException", + "traits": { + "smithy.api#documentation": "

The request is denied because of missing access permissions. Check your permissions and retry your request.

" + } + }, + "conflictException": { + "target": "com.amazonaws.bedrockagentruntime#ConflictException", + "traits": { + "smithy.api#documentation": "

There was a conflict performing an operation. Resolve the conflict and retry your request.

" + } + }, + "dependencyFailedException": { + "target": "com.amazonaws.bedrockagentruntime#DependencyFailedException", + "traits": { + "smithy.api#documentation": "

There was an issue with a dependency. Check the resource configurations and retry the request.

" + } + }, + "badGatewayException": { + "target": "com.amazonaws.bedrockagentruntime#BadGatewayException", + "traits": { + "smithy.api#documentation": "

There was an issue with a dependency due to a server issue. Retry your request.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The output of the flow.

\n

This data type is used in the following API operations:

\n ", + "smithy.api#streaming": {} + } + }, + "com.amazonaws.bedrockagentruntime#Function": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#FunctionInvocationInput": { + "type": "structure", + "members": { + "actionGroup": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The action group that the function belongs to.

", + "smithy.api#required": {} + } + }, + "parameters": { + "target": "com.amazonaws.bedrockagentruntime#FunctionParameters", + "traits": { + "smithy.api#documentation": "

A list of parameters of the function.

" + } + }, + "function": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the function.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about the function that the agent predicts should be called.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#FunctionParameter": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the parameter.

" } }, "type": { @@ -1484,18 +2003,145 @@ "inferenceConfig": { "target": "com.amazonaws.bedrockagentruntime#InferenceConfig", "traits": { - "smithy.api#documentation": "

Configuration settings for inference when using RetrieveAndGenerate to generate responses while using a knowledge base as a source.

" + "smithy.api#documentation": "

Configuration settings for inference when using RetrieveAndGenerate to generate responses while using a knowledge base as a source.

" + } + }, + "additionalModelRequestFields": { + "target": "com.amazonaws.bedrockagentruntime#AdditionalModelRequestFields", + "traits": { + "smithy.api#documentation": "

Additional model parameters and corresponding values not included in the textInferenceConfig structure for a knowledge base. This allows users to provide custom model parameters specific to the language model being used.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations for response generation based on the knowledge base query results.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#GetAgentMemory": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagentruntime#GetAgentMemoryRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagentruntime#GetAgentMemoryResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagentruntime#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#BadGatewayException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#DependencyFailedException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets the sessions stored in the memory of the agent.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/agents/{agentId}/agentAliases/{agentAliasId}/memories" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxItems", + "items": "memoryContents" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.bedrockagentruntime#GetAgentMemoryRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.bedrockagentruntime#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxItems value provided in the request, enter the token returned \n in the nextToken field in the response in this field to return the next batch of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxItems": { + "target": "com.amazonaws.bedrockagentruntime#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of items to return in the response. If the total number of results is greater \n than this value, use the token returned in the response in the nextToken field when making another \n request to return the next batch of results.

", + "smithy.api#httpQuery": "maxItems" + } + }, + "agentId": { + "target": "com.amazonaws.bedrockagentruntime#AgentId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent to which the alias belongs.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentAliasId": { + "target": "com.amazonaws.bedrockagentruntime#AgentAliasId", + "traits": { + "smithy.api#documentation": "

The unique identifier of an alias of an agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "memoryType": { + "target": "com.amazonaws.bedrockagentruntime#MemoryType", + "traits": { + "smithy.api#documentation": "

The type of memory.

", + "smithy.api#httpQuery": "memoryType", + "smithy.api#required": {} + } + }, + "memoryId": { + "target": "com.amazonaws.bedrockagentruntime#MemoryId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the memory.

", + "smithy.api#httpQuery": "memoryId", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagentruntime#GetAgentMemoryResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.bedrockagentruntime#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxItems value provided in the request, use this token \n when making another request in the nextToken field to return the next batch of results.

" } }, - "additionalModelRequestFields": { - "target": "com.amazonaws.bedrockagentruntime#AdditionalModelRequestFields", + "memoryContents": { + "target": "com.amazonaws.bedrockagentruntime#Memories", "traits": { - "smithy.api#documentation": "

Additional model parameters and corresponding values not included in the textInferenceConfig structure for a knowledge base. This allows users to provide custom model parameters specific to the language model being used.

" + "smithy.api#documentation": "

Contains details of the sessions stored in the memory

" } } }, "traits": { - "smithy.api#documentation": "

Contains configurations for response generation based on the knowledge base query results.

\n

This data type is used in the following API operations:

\n " + "smithy.api#output": {} } }, "com.amazonaws.bedrockagentruntime#GuadrailAction": { @@ -2326,6 +2972,41 @@ } ] }, + "com.amazonaws.bedrockagentruntime#InputFile": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the source file.

", + "smithy.api#required": {} + } + }, + "source": { + "target": "com.amazonaws.bedrockagentruntime#FileSource", + "traits": { + "smithy.api#documentation": "

Specifies where the files are located.

", + "smithy.api#required": {} + } + }, + "useCase": { + "target": "com.amazonaws.bedrockagentruntime#FileUseCase", + "traits": { + "smithy.api#documentation": "

Specifies how the source files will be used by the code interpreter.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details of the source files.

" + } + }, + "com.amazonaws.bedrockagentruntime#InputFiles": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#InputFile" + } + }, "com.amazonaws.bedrockagentruntime#InputText": { "type": "string", "traits": { @@ -2374,6 +3055,12 @@ "traits": { "smithy.api#documentation": "

Contains details about the knowledge base to look up and the query to be made.

" } + }, + "codeInterpreterInvocationInput": { + "target": "com.amazonaws.bedrockagentruntime#CodeInterpreterInvocationInput", + "traits": { + "smithy.api#documentation": "

Contains information about the code interpreter to be invoked.

" + } } }, "traits": { @@ -2453,6 +3140,12 @@ "traits": { "smithy.api#enumValue": "FINISH" } + }, + "ACTION_GROUP_CODE_INTERPRETER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTION_GROUP_CODE_INTERPRETER" + } } } }, @@ -2494,7 +3187,7 @@ } ], "traits": { - "smithy.api#documentation": "\n

The CLI doesn't support InvokeAgent.

\n
\n

Sends a prompt for the agent to process and respond to. Note the following fields for the request:

\n
    \n
  • \n

    To continue the same conversation with an agent, use the same sessionId value in the request.

    \n
  • \n
  • \n

    To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.

    \n
  • \n
  • \n

    End a conversation by setting endSession to true.

    \n
  • \n
  • \n

    In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.

    \n
  • \n
\n

The response is returned in the bytes field of the chunk object.

\n
    \n
  • \n

    The attribution object contains citations for parts of the response.

    \n
  • \n
  • \n

    If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.

    \n
  • \n
  • \n

    If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field.

    \n
  • \n
  • \n

    Errors are also surfaced in the response.

    \n
  • \n
", + "smithy.api#documentation": "\n

The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent.

\n
\n

Sends a prompt for the agent to process and respond to. Note the following fields for the request:

\n
    \n
  • \n

    To continue the same conversation with an agent, use the same sessionId value in the request.

    \n
  • \n
  • \n

    To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.

    \n
  • \n
  • \n

    End a conversation by setting endSession to true.

    \n
  • \n
  • \n

    In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.

    \n
  • \n
\n

The response is returned in the bytes field of the chunk object.

\n
    \n
  • \n

    The attribution object contains citations for parts of the response.

    \n
  • \n
  • \n

    If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.

    \n
  • \n
  • \n

    If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field.

    \n
  • \n
  • \n

    Errors are also surfaced in the response.

    \n
  • \n
", "smithy.api#http": { "code": 200, "method": "POST", @@ -2552,6 +3245,12 @@ "traits": { "smithy.api#documentation": "

The prompt text to send the agent.

\n \n

If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored.

\n
" } + }, + "memoryId": { + "target": "com.amazonaws.bedrockagentruntime#MemoryId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent memory.

" + } } }, "traits": { @@ -2584,6 +3283,106 @@ "smithy.api#httpHeader": "x-amz-bedrock-agent-session-id", "smithy.api#required": {} } + }, + "memoryId": { + "target": "com.amazonaws.bedrockagentruntime#MemoryId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent memory.

", + "smithy.api#httpHeader": "x-amz-bedrock-agent-memory-id" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagentruntime#InvokeFlow": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagentruntime#InvokeFlowRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagentruntime#InvokeFlowResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagentruntime#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#BadGatewayException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#DependencyFailedException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagentruntime#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Invokes an alias of a flow to run the inputs that you specify and return the output of each node as a stream. If there's an error, the error is returned. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/flows/{flowIdentifier}/aliases/{flowAliasIdentifier}" + } + } + }, + "com.amazonaws.bedrockagentruntime#InvokeFlowRequest": { + "type": "structure", + "members": { + "flowIdentifier": { + "target": "com.amazonaws.bedrockagentruntime#FlowIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "flowAliasIdentifier": { + "target": "com.amazonaws.bedrockagentruntime#FlowAliasIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow alias.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "inputs": { + "target": "com.amazonaws.bedrockagentruntime#FlowInputs", + "traits": { + "smithy.api#documentation": "

A list of objects, each containing information about an input into the flow.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagentruntime#InvokeFlowResponse": { + "type": "structure", + "members": { + "responseStream": { + "target": "com.amazonaws.bedrockagentruntime#FlowResponseStream", + "traits": { + "smithy.api#documentation": "

The output of the flow, returned as a stream. If there's an error, the error is returned.

", + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } } }, "traits": { @@ -2600,6 +3399,39 @@ "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$" } }, + "com.amazonaws.bedrockagentruntime#KnowledgeBaseConfiguration": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagentruntime#KnowledgeBaseId", + "traits": { + "smithy.api#documentation": "

The unique identifier for a knowledge base attached to the agent.

", + "smithy.api#required": {} + } + }, + "retrievalConfiguration": { + "target": "com.amazonaws.bedrockagentruntime#KnowledgeBaseRetrievalConfiguration", + "traits": { + "smithy.api#documentation": "

The configurations to apply to the knowledge base during query. For more information, see Query configurations.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Configurations to apply to a knowledge base attached to the agent during query. For more information, see Knowledge base retrieval configurations.

" + } + }, + "com.amazonaws.bedrockagentruntime#KnowledgeBaseConfigurations": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#KnowledgeBaseConfiguration" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, "com.amazonaws.bedrockagentruntime#KnowledgeBaseId": { "type": "string", "traits": { @@ -2680,7 +3512,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains configurations for knowledge base query. For more information, see Query configurations.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#KnowledgeBaseRetrievalResult": { @@ -2745,70 +3577,176 @@ "retrievalConfiguration": { "target": "com.amazonaws.bedrockagentruntime#KnowledgeBaseRetrievalConfiguration", "traits": { - "smithy.api#documentation": "

Contains configurations for how to retrieve and return the knowledge base query.

" + "smithy.api#documentation": "

Contains configurations for how to retrieve and return the knowledge base query.

" + } + }, + "generationConfiguration": { + "target": "com.amazonaws.bedrockagentruntime#GenerationConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configurations for response generation based on the knowledge base query results.

" + } + }, + "orchestrationConfiguration": { + "target": "com.amazonaws.bedrockagentruntime#OrchestrationConfiguration", + "traits": { + "smithy.api#documentation": "

Settings for how the model processes the prompt prior to retrieval and generation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about the resource being queried.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#KnowledgeBaseVectorSearchConfiguration": { + "type": "structure", + "members": { + "numberOfResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#addedDefault": {}, + "smithy.api#default": 5, + "smithy.api#documentation": "

The number of source chunks to retrieve.

", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "overrideSearchType": { + "target": "com.amazonaws.bedrockagentruntime#SearchType", + "traits": { + "smithy.api#documentation": "

By default, Amazon Bedrock decides a search strategy for you. If you're using an Amazon OpenSearch Serverless vector store that contains a filterable text field, you can specify whether to query the knowledge base with a HYBRID search using both vector embeddings and raw text, or SEMANTIC search using only vector embeddings. For other vector store configurations, only SEMANTIC search is available. For more information, see Test a knowledge base.

" + } + }, + "filter": { + "target": "com.amazonaws.bedrockagentruntime#RetrievalFilter", + "traits": { + "smithy.api#documentation": "

Specifies the filters to use on the metadata in the knowledge base data sources before returning results. For more information, see Query configurations.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configurations for how to perform the search query and return results. For more information, see Query configurations.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#LambdaArn": { + "type": "string" + }, + "com.amazonaws.bedrockagentruntime#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#documentation": "Max Results.", + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.bedrockagentruntime#MaxTokens": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 65536 + } + } + }, + "com.amazonaws.bedrockagentruntime#MaximumLength": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 4096 + } + } + }, + "com.amazonaws.bedrockagentruntime#Memories": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#Memory" + } + }, + "com.amazonaws.bedrockagentruntime#Memory": { + "type": "union", + "members": { + "sessionSummary": { + "target": "com.amazonaws.bedrockagentruntime#MemorySessionSummary", + "traits": { + "smithy.api#documentation": "

Contains summary of a session.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains sessions summaries.

" + } + }, + "com.amazonaws.bedrockagentruntime#MemoryId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 2, + "max": 100 + }, + "smithy.api#pattern": "^[0-9a-zA-Z._:-]+$" + } + }, + "com.amazonaws.bedrockagentruntime#MemoryResource": { + "type": "resource", + "operations": [ + { + "target": "com.amazonaws.bedrockagentruntime#DeleteAgentMemory" + }, + { + "target": "com.amazonaws.bedrockagentruntime#GetAgentMemory" + } + ] + }, + "com.amazonaws.bedrockagentruntime#MemorySessionSummary": { + "type": "structure", + "members": { + "memoryId": { + "target": "com.amazonaws.bedrockagentruntime#MemoryId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the memory where the session summary is stored.

" } }, - "generationConfiguration": { - "target": "com.amazonaws.bedrockagentruntime#GenerationConfiguration", + "sessionId": { + "target": "com.amazonaws.bedrockagentruntime#SessionId", "traits": { - "smithy.api#documentation": "

Contains configurations for response generation based on the knowwledge base query results.

" + "smithy.api#documentation": "

The identifier for this session.

" } - } - }, - "traits": { - "smithy.api#documentation": "

Contains details about the resource being queried.

\n

This data type is used in the following API operations:

\n " - } - }, - "com.amazonaws.bedrockagentruntime#KnowledgeBaseVectorSearchConfiguration": { - "type": "structure", - "members": { - "numberOfResults": { - "target": "smithy.api#Integer", + }, + "sessionStartTime": { + "target": "com.amazonaws.bedrockagentruntime#DateTimestamp", "traits": { - "smithy.api#addedDefault": {}, - "smithy.api#default": 5, - "smithy.api#documentation": "

The number of source chunks to retrieve.

", - "smithy.api#range": { - "min": 1, - "max": 100 - } + "smithy.api#documentation": "

The start time for this session.

" } }, - "overrideSearchType": { - "target": "com.amazonaws.bedrockagentruntime#SearchType", + "sessionExpiryTime": { + "target": "com.amazonaws.bedrockagentruntime#DateTimestamp", "traits": { - "smithy.api#documentation": "

By default, Amazon Bedrock decides a search strategy for you. If you're using an Amazon OpenSearch Serverless vector store that contains a filterable text field, you can specify whether to query the knowledge base with a HYBRID search using both vector embeddings and raw text, or SEMANTIC search using only vector embeddings. For other vector store configurations, only SEMANTIC search is available. For more information, see Test a knowledge base.

" + "smithy.api#documentation": "

The time when the memory duration for the session is set to end.

" } }, - "filter": { - "target": "com.amazonaws.bedrockagentruntime#RetrievalFilter", + "summaryText": { + "target": "com.amazonaws.bedrockagentruntime#SummaryText", "traits": { - "smithy.api#documentation": "

Specifies the filters to use on the metadata in the knowledge base data sources before returning results. For more information, see Query configurations.

" + "smithy.api#documentation": "

The summarized text for this session.

" } } }, "traits": { - "smithy.api#documentation": "

Configurations for how to perform the search query and return results. For more information, see Query configurations.

\n

This data type is used in the following API operations:

\n " - } - }, - "com.amazonaws.bedrockagentruntime#LambdaArn": { - "type": "string" - }, - "com.amazonaws.bedrockagentruntime#MaxTokens": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 0, - "max": 65536 - } + "smithy.api#documentation": "

Contains details of a session summary.

" } }, - "com.amazonaws.bedrockagentruntime#MaximumLength": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 0, - "max": 4096 + "com.amazonaws.bedrockagentruntime#MemoryType": { + "type": "enum", + "members": { + "SESSION_SUMMARY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SESSION_SUMMARY" + } } } }, @@ -2876,6 +3814,65 @@ "smithy.api#pattern": "^\\S*$" } }, + "com.amazonaws.bedrockagentruntime#NodeName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$" + } + }, + "com.amazonaws.bedrockagentruntime#NodeOutputName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$" + } + }, + "com.amazonaws.bedrockagentruntime#NodeType": { + "type": "enum", + "members": { + "FLOW_INPUT_NODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FlowInputNode" + } + }, + "FLOW_OUTPUT_NODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FlowOutputNode" + } + }, + "LAMBDA_FUNCTION_NODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LambdaFunctionNode" + } + }, + "KNOWLEDGE_BASE_NODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "KnowledgeBaseNode" + } + }, + "PROMPT_NODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PromptNode" + } + }, + "CONDITION_NODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ConditionNode" + } + }, + "LEX_NODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LexNode" + } + } + } + }, "com.amazonaws.bedrockagentruntime#NonBlankString": { "type": "string", "traits": { @@ -2920,6 +3917,12 @@ "traits": { "smithy.api#documentation": "

Contains details about the response to reprompt the input.

" } + }, + "codeInterpreterInvocationOutput": { + "target": "com.amazonaws.bedrockagentruntime#CodeInterpreterInvocationOutput", + "traits": { + "smithy.api#documentation": "

Contains the JSON-formatted string returned by the API invoked by the code interpreter.

" + } } }, "traits": { @@ -2927,6 +3930,21 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.bedrockagentruntime#OrchestrationConfiguration": { + "type": "structure", + "members": { + "queryTransformationConfiguration": { + "target": "com.amazonaws.bedrockagentruntime#QueryTransformationConfiguration", + "traits": { + "smithy.api#documentation": "

To split up the prompt and retrieve multiple sources, set the transformation type to\n QUERY_DECOMPOSITION.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for how the model processes the prompt prior to retrieval and generation.

" + } + }, "com.amazonaws.bedrockagentruntime#OrchestrationTrace": { "type": "union", "members": { @@ -2960,6 +3978,45 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.bedrockagentruntime#OutputFile": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the file containing response from code interpreter.

" + } + }, + "type": { + "target": "com.amazonaws.bedrockagentruntime#MimeType", + "traits": { + "smithy.api#documentation": "

The type of file that contains response from the code interpreter.

" + } + }, + "bytes": { + "target": "com.amazonaws.bedrockagentruntime#FileBody", + "traits": { + "smithy.api#documentation": "

The byte count of files that contains response from code interpreter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details of the response from code interpreter.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#OutputFiles": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#OutputFile" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, "com.amazonaws.bedrockagentruntime#OutputString": { "type": "string", "traits": { @@ -3226,6 +4283,32 @@ "smithy.api#documentation": "

Contains the parameters in the request body.

" } }, + "com.amazonaws.bedrockagentruntime#QueryTransformationConfiguration": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.bedrockagentruntime#QueryTransformationType", + "traits": { + "smithy.api#documentation": "

The type of transformation to apply to the prompt.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

To split up the prompt and retrieve multiple sources, set the transformation type to\n QUERY_DECOMPOSITION.

" + } + }, + "com.amazonaws.bedrockagentruntime#QueryTransformationType": { + "type": "enum", + "members": { + "QUERY_DECOMPOSITION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUERY_DECOMPOSITION" + } + } + } + }, "com.amazonaws.bedrockagentruntime#RAGStopSequences": { "type": "list", "member": { @@ -3419,6 +4502,12 @@ "traits": { "smithy.api#documentation": "

There was an issue with a dependency due to a server issue. Retry your request.

" } + }, + "files": { + "target": "com.amazonaws.bedrockagentruntime#FilePart", + "traits": { + "smithy.api#documentation": "

Contains intermediate response for code interpreter if any files have been generated.

" + } } }, "traits": { @@ -3525,6 +4614,20 @@ } } }, + "com.amazonaws.bedrockagentruntime#RetrievalResultConfluenceLocation": { + "type": "structure", + "members": { + "url": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Confluence host URL for the data source location.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Confluence data source location.

" + } + }, "com.amazonaws.bedrockagentruntime#RetrievalResultContent": { "type": "structure", "members": { @@ -3547,19 +4650,43 @@ "type": { "target": "com.amazonaws.bedrockagentruntime#RetrievalResultLocationType", "traits": { - "smithy.api#documentation": "

The type of the location of the data source.

", + "smithy.api#documentation": "

The type of data source location.

", "smithy.api#required": {} } }, "s3Location": { "target": "com.amazonaws.bedrockagentruntime#RetrievalResultS3Location", "traits": { - "smithy.api#documentation": "

Contains the S3 location of the data source.

" + "smithy.api#documentation": "

The S3 data source location.

" + } + }, + "webLocation": { + "target": "com.amazonaws.bedrockagentruntime#RetrievalResultWebLocation", + "traits": { + "smithy.api#documentation": "

The web URL/URLs data source location.

" + } + }, + "confluenceLocation": { + "target": "com.amazonaws.bedrockagentruntime#RetrievalResultConfluenceLocation", + "traits": { + "smithy.api#documentation": "

The Confluence data source location.

" + } + }, + "salesforceLocation": { + "target": "com.amazonaws.bedrockagentruntime#RetrievalResultSalesforceLocation", + "traits": { + "smithy.api#documentation": "

The Salesforce data source location.

" + } + }, + "sharePointLocation": { + "target": "com.amazonaws.bedrockagentruntime#RetrievalResultSharePointLocation", + "traits": { + "smithy.api#documentation": "

The SharePoint data source location.

" } } }, "traits": { - "smithy.api#documentation": "

Contains information about the location of the data source.

\n

This data type is used in the following API operations:

\n ", + "smithy.api#documentation": "

Contains information about the data source location.

\n

This data type is used in the following API operations:

\n ", "smithy.api#sensitive": {} } }, @@ -3571,6 +4698,30 @@ "traits": { "smithy.api#enumValue": "S3" } + }, + "WEB": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WEB" + } + }, + "CONFLUENCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONFLUENCE" + } + }, + "SALESFORCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SALESFORCE" + } + }, + "SHAREPOINT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SHAREPOINT" + } } } }, @@ -3607,12 +4758,54 @@ "uri": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The S3 URI of the data source.

" + "smithy.api#documentation": "

The S3 URI for the data source location.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The S3 data source location.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#RetrievalResultSalesforceLocation": { + "type": "structure", + "members": { + "url": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Salesforce host URL for the data source location.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Salesforce data source location.

" + } + }, + "com.amazonaws.bedrockagentruntime#RetrievalResultSharePointLocation": { + "type": "structure", + "members": { + "url": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The SharePoint site URL for the data source location.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The SharePoint data source location.

" + } + }, + "com.amazonaws.bedrockagentruntime#RetrievalResultWebLocation": { + "type": "structure", + "members": { + "url": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The web URL/URLs for the data source location.

" } } }, "traits": { - "smithy.api#documentation": "

Contains the S3 location of the data source.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

The web URL/URLs data source location.

" } }, "com.amazonaws.bedrockagentruntime#Retrieve": { @@ -3781,7 +4974,7 @@ "sessionId": { "target": "com.amazonaws.bedrockagentruntime#SessionId", "traits": { - "smithy.api#documentation": "

The unique identifier of the session. Reuse the same value to continue the same session with the knowledge base.

" + "smithy.api#documentation": "

The unique identifier of the session. When you first make a RetrieveAndGenerate request, Amazon Bedrock automatically generates this value. You must reuse this value for all subsequent requests in the same conversational session. This value allows Amazon Bedrock to maintain context and knowledge from previous interactions. You can't explicitly set the sessionId yourself.

" } }, "input": { @@ -3822,7 +5015,7 @@ "sessionId": { "target": "com.amazonaws.bedrockagentruntime#SessionId", "traits": { - "smithy.api#documentation": "

The unique identifier of the session. Reuse the same value to continue the same session with the knowledge base.

", + "smithy.api#documentation": "

The unique identifier of the session. When you first make a RetrieveAndGenerate request, Amazon Bedrock automatically generates this value. You must reuse this value for all subsequent requests in the same conversational session. This value allows Amazon Bedrock to maintain context and knowledge from previous interactions. You can't explicitly set the sessionId yourself.

", "smithy.api#required": {} } }, @@ -4026,6 +5219,21 @@ "smithy.api#documentation": "

The unique wrapper object of the document from the S3 location.

" } }, + "com.amazonaws.bedrockagentruntime#S3ObjectFile": { + "type": "structure", + "members": { + "uri": { + "target": "com.amazonaws.bedrockagentruntime#S3Uri", + "traits": { + "smithy.api#documentation": "

The uri of the s3 object.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details of the s3 object where the source file is located.

" + } + }, "com.amazonaws.bedrockagentruntime#S3Uri": { "type": "string", "traits": { @@ -4111,6 +5319,18 @@ "traits": { "smithy.api#documentation": "

The identifier of the invocation of an action. This value must match the invocationId returned in the InvokeAgent response for the action whose results are provided in the returnControlInvocationResults field. For more information, see Return control to the agent developer and Control session context.

" } + }, + "files": { + "target": "com.amazonaws.bedrockagentruntime#InputFiles", + "traits": { + "smithy.api#documentation": "

Contains information about the files used by code interpreter.

" + } + }, + "knowledgeBaseConfigurations": { + "target": "com.amazonaws.bedrockagentruntime#KnowledgeBaseConfigurations", + "traits": { + "smithy.api#documentation": "

An array of configurations, each of which applies to a knowledge base attached to the agent.

" + } } }, "traits": { @@ -4181,6 +5401,15 @@ } } }, + "com.amazonaws.bedrockagentruntime#SummaryText": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 25000000 + } + } + }, "com.amazonaws.bedrockagentruntime#Temperature": { "type": "float", "traits": { diff --git a/models/bedrock-agent.json b/models/bedrock-agent.json index 9d4cbfb72a..5a40050c6b 100644 --- a/models/bedrock-agent.json +++ b/models/bedrock-agent.json @@ -82,6 +82,12 @@ "traits": { "smithy.api#enumValue": "AMAZON.UserInput" } + }, + "AMAZON_CODEINTERPRETER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AMAZON.CodeInterpreter" + } } } }, @@ -278,7 +284,13 @@ "guardrailConfiguration": { "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", "traits": { - "smithy.api#documentation": "

The guardrails configuration assigned to the agent.

" + "smithy.api#documentation": "

Details about the guardrail associated with the agent.

" + } + }, + "memoryConfiguration": { + "target": "com.amazonaws.bedrockagent#MemoryConfiguration", + "traits": { + "smithy.api#documentation": "

Contains memory configuration for the agent.

" } } }, @@ -664,6 +676,21 @@ "smithy.api#pattern": "^arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:agent/[0-9a-zA-Z]{10}$" } }, + "com.amazonaws.bedrockagent#AgentFlowNodeConfiguration": { + "type": "structure", + "members": { + "agentAliasArn": { + "target": "com.amazonaws.bedrockagent#AgentAliasArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the alias of the agent to invoke.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines an agent node in your flow. You specify the agent to invoke at this point in the flow. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + } + }, "com.amazonaws.bedrockagent#AgentKnowledgeBase": { "type": "structure", "members": { @@ -909,7 +936,7 @@ "guardrailConfiguration": { "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", "traits": { - "smithy.api#documentation": "

The details of the guardrails configuration in the agent summary.

" + "smithy.api#documentation": "

Details about the guardrail associated with the agent.

" } } }, @@ -1028,7 +1055,13 @@ "guardrailConfiguration": { "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", "traits": { - "smithy.api#documentation": "

The guardrails configuration assigned to the agent version.

" + "smithy.api#documentation": "

Details about the guardrail associated with the agent.

" + } + }, + "memoryConfiguration": { + "target": "com.amazonaws.bedrockagent#MemoryConfiguration", + "traits": { + "smithy.api#documentation": "

\n Contains details of the memory configuration on the version of the agent. \n

" } } }, @@ -1094,7 +1127,7 @@ "guardrailConfiguration": { "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", "traits": { - "smithy.api#documentation": "

The details of the guardrails configuration in the agent version summary.

" + "smithy.api#documentation": "

Details about the guardrail associated with the agent.

" } } }, @@ -1138,12 +1171,18 @@ { "target": "com.amazonaws.bedrockagent#DataSourceResource" }, + { + "target": "com.amazonaws.bedrockagent#FlowResource" + }, { "target": "com.amazonaws.bedrockagent#IngestionJobResource" }, { "target": "com.amazonaws.bedrockagent#KnowledgeBaseResource" }, + { + "target": "com.amazonaws.bedrockagent#PromptResource" + }, { "target": "com.amazonaws.bedrockagent#TaggingResource" }, @@ -1955,7 +1994,8 @@ "smithy.api#length": { "min": 1, "max": 100000 - } + }, + "smithy.api#sensitive": {} } }, "com.amazonaws.bedrockagent#BedrockEmbeddingModelArn": { @@ -1982,6 +2022,37 @@ "smithy.api#documentation": "

The vector configuration details for the Bedrock embeddings model.

" } }, + "com.amazonaws.bedrockagent#BedrockFoundationModelConfiguration": { + "type": "structure", + "members": { + "modelArn": { + "target": "com.amazonaws.bedrockagent#BedrockModelArn", + "traits": { + "smithy.api#documentation": "

The model's ARN.

", + "smithy.api#required": {} + } + }, + "parsingPrompt": { + "target": "com.amazonaws.bedrockagent#ParsingPrompt", + "traits": { + "smithy.api#documentation": "

Instructions for interpreting the contents of a document.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for a foundation model used to parse documents for a data source.

" + } + }, + "com.amazonaws.bedrockagent#BedrockModelArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})$" + } + }, "com.amazonaws.bedrockagent#BucketOwnerAccountId": { "type": "string", "traits": { @@ -1998,7 +2069,7 @@ "chunkingStrategy": { "target": "com.amazonaws.bedrockagent#ChunkingStrategy", "traits": { - "smithy.api#documentation": "

Knowledge base can split your source data into chunks. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for NONE, then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk.

\n
    \n
  • \n

    \n FIXED_SIZE – Amazon Bedrock splits your source data into chunks of the approximate size that you set in the fixedSizeChunkingConfiguration.

    \n
  • \n
  • \n

    \n NONE – Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files.

    \n
  • \n
", + "smithy.api#documentation": "

Knowledge base can split your source data into chunks. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for NONE, then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk.

\n
    \n
  • \n

    \n FIXED_SIZE – Amazon Bedrock splits your source data into chunks of the approximate size that you set in the fixedSizeChunkingConfiguration.

    \n
  • \n
  • \n

    \n HIERARCHICAL – Split documents into layers of chunks where the first layer contains large chunks, and the second layer contains smaller chunks derived from the first layer.

    \n
  • \n
  • \n

    \n SEMANTIC – Split documents into chunks based on groups of similar content derived with natural language processing.

    \n
  • \n
  • \n

    \n NONE – Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -2007,6 +2078,18 @@ "traits": { "smithy.api#documentation": "

Configurations for when you choose fixed-size chunking. If you set the chunkingStrategy as NONE, exclude this field.

" } + }, + "hierarchicalChunkingConfiguration": { + "target": "com.amazonaws.bedrockagent#HierarchicalChunkingConfiguration", + "traits": { + "smithy.api#documentation": "

Settings for hierarchical document chunking for a data source. Hierarchical chunking splits documents\n into layers of chunks where the first layer contains large chunks, and the second layer contains smaller\n chunks derived from the first layer.

" + } + }, + "semanticChunkingConfiguration": { + "target": "com.amazonaws.bedrockagent#SemanticChunkingConfiguration", + "traits": { + "smithy.api#documentation": "

Settings for semantic document chunking for a data source. Semantic chunking splits\n a document into into smaller documents based on groups of similar content derived from the text\n with natural language processing.

" + } } }, "traits": { @@ -2027,6 +2110,18 @@ "traits": { "smithy.api#enumValue": "NONE" } + }, + "HIERARCHICAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HIERARCHICAL" + } + }, + "SEMANTIC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SEMANTIC" + } } } }, @@ -2037,7 +2132,14 @@ "min": 33, "max": 256 }, - "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}$" + } + }, + "com.amazonaws.bedrockagent#CollectorFlowNodeConfiguration": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

Defines a collector node in your flow. This node takes an iteration of inputs and consolidates them into an array in the output. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" } }, "com.amazonaws.bedrockagent#ColumnName": { @@ -2049,6 +2151,21 @@ "smithy.api#pattern": "^[a-zA-Z0-9_\\-]+$" } }, + "com.amazonaws.bedrockagent#ConditionFlowNodeConfiguration": { + "type": "structure", + "members": { + "conditions": { + "target": "com.amazonaws.bedrockagent#FlowConditions", + "traits": { + "smithy.api#documentation": "

An array of conditions. Each member contains the name of a condition and an expression that defines the condition.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines a condition node in your flow. You can specify conditions that determine which node comes next in the flow. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + } + }, "com.amazonaws.bedrockagent#ConflictException": { "type": "structure", "members": { @@ -2062,6 +2179,137 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.bedrockagent#ConfluenceAuthType": { + "type": "enum", + "members": { + "BASIC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BASIC" + } + }, + "OAUTH2_CLIENT_CREDENTIALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OAUTH2_CLIENT_CREDENTIALS" + } + } + } + }, + "com.amazonaws.bedrockagent#ConfluenceCrawlerConfiguration": { + "type": "structure", + "members": { + "filterConfiguration": { + "target": "com.amazonaws.bedrockagent#CrawlFilterConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of filtering the Confluence content. For example, configuring \n regular expression patterns to include or exclude certain content.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of the Confluence content. For example, configuring specific \n types of Confluence content.

" + } + }, + "com.amazonaws.bedrockagent#ConfluenceDataSourceConfiguration": { + "type": "structure", + "members": { + "sourceConfiguration": { + "target": "com.amazonaws.bedrockagent#ConfluenceSourceConfiguration", + "traits": { + "smithy.api#documentation": "

The endpoint information to connect to your Confluence data source.

", + "smithy.api#required": {} + } + }, + "crawlerConfiguration": { + "target": "com.amazonaws.bedrockagent#ConfluenceCrawlerConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the Confluence content. For example, configuring \n specific types of Confluence content.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration information to connect to Confluence as your data source.

" + } + }, + "com.amazonaws.bedrockagent#ConfluenceHostType": { + "type": "enum", + "members": { + "SAAS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SAAS" + } + } + } + }, + "com.amazonaws.bedrockagent#ConfluenceSourceConfiguration": { + "type": "structure", + "members": { + "hostUrl": { + "target": "com.amazonaws.bedrockagent#HttpsUrl", + "traits": { + "smithy.api#documentation": "

The Confluence host URL or instance URL.

", + "smithy.api#required": {} + } + }, + "hostType": { + "target": "com.amazonaws.bedrockagent#ConfluenceHostType", + "traits": { + "smithy.api#documentation": "

The supported host type, whether online/cloud or server/on-premises.

", + "smithy.api#required": {} + } + }, + "authType": { + "target": "com.amazonaws.bedrockagent#ConfluenceAuthType", + "traits": { + "smithy.api#documentation": "

The supported authentication type to authenticate and connect to your \n Confluence instance.

", + "smithy.api#required": {} + } + }, + "credentialsSecretArn": { + "target": "com.amazonaws.bedrockagent#SecretArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name of an Secrets Manager secret that \n stores your authentication credentials for your SharePoint site/sites. \n For more information on the key-value pairs that must be included in \n your secret, depending on your authentication type, see \n Confluence connection configuration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The endpoint information to connect to your Confluence data source.

" + } + }, + "com.amazonaws.bedrockagent#CrawlFilterConfiguration": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.bedrockagent#CrawlFilterConfigurationType", + "traits": { + "smithy.api#documentation": "

The type of filtering that you want to apply to certain objects or content of the \n data source. For example, the PATTERN type is regular expression patterns \n you can apply to filter your content.

", + "smithy.api#required": {} + } + }, + "patternObjectFilter": { + "target": "com.amazonaws.bedrockagent#PatternObjectFilterConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of filtering certain objects or content types of the data source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of filtering the data source content. For example, \n configuring regular expression patterns to include or exclude certain content.

" + } + }, + "com.amazonaws.bedrockagent#CrawlFilterConfigurationType": { + "type": "enum", + "members": { + "PATTERN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PATTERN" + } + } + } + }, "com.amazonaws.bedrockagent#CreateAgent": { "type": "operation", "input": { @@ -2091,7 +2339,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an agent that orchestrates interactions between foundation models, data sources, software applications, user conversations, and APIs to carry out tasks to help customers.

\n
    \n
  • \n

    Specify the following fields for security purposes.

    \n
      \n
    • \n

      \n agentResourceRoleArn – The Amazon Resource Name (ARN) of the role with permissions to invoke API operations on an agent.

      \n
    • \n
    • \n

      (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent.

      \n
    • \n
    • \n

      (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeAgent request begins a new session.

      \n
    • \n
    \n
  • \n
  • \n

    To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts.

    \n
  • \n
  • \n

    If you agent fails to be created, the response returns a list of failureReasons alongside a list of recommendedActions for you to troubleshoot.

    \n
  • \n
", + "smithy.api#documentation": "

Creates an agent that orchestrates interactions between foundation models, data sources, software applications, user conversations, and APIs to carry out tasks to help customers.

\n
    \n
  • \n

    Specify the following fields for security purposes.

    \n
      \n
    • \n

      \n agentResourceRoleArn – The Amazon Resource Name (ARN) of the role with permissions to invoke API operations on an agent.

      \n
    • \n
    • \n

      (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent.

      \n
    • \n
    • \n

      (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeAgent request begins a new session.

      \n
    • \n
    \n
  • \n
  • \n

    To enable your agent to retain conversational context across multiple sessions, include a memoryConfiguration object. \n For more information, see Configure memory.

    \n
  • \n
  • \n

    To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. \n For more information, see Advanced prompts.

    \n
  • \n
  • \n

    If you agent fails to be created, the response returns a list of failureReasons alongside a list of recommendedActions for you to troubleshoot.

    \n
  • \n
", "smithy.api#http": { "code": 202, "method": "PUT", @@ -2135,7 +2383,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an action group for an agent. An action group represents the actions that an agent can carry out for the customer by defining the APIs that an agent can call and the logic for calling them.

\n

To allow your agent to request the user for additional information when trying to complete a task, add an action group with the parentActionGroupSignature field set to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group. During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

", + "smithy.api#documentation": "

Creates an action group for an agent. An action group represents the actions that an agent can carry out for the customer by defining the APIs that an agent can call and the logic for calling them.

\n

To allow your agent to request the user for additional information when trying to complete a task, \n add an action group with the parentActionGroupSignature field set to AMAZON.UserInput.

\n

To allow your agent to generate, run, and troubleshoot code when trying to complete a task, \n add an action group with the parentActionGroupSignature field set to AMAZON.CodeInterpreter.

\n

You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group. During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

", "smithy.api#http": { "code": 200, "method": "PUT", @@ -2189,7 +2437,7 @@ "parentActionGroupSignature": { "target": "com.amazonaws.bedrockagent#ActionGroupSignature", "traits": { - "smithy.api#documentation": "

To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

\n

During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

" + "smithy.api#documentation": "

To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

\n

To allow your agent to generate, run, and troubleshoot code when trying to complete a task, set this field to AMAZON.CodeInterpreter. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

\n

During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

" } }, "actionGroupExecutor": { @@ -2413,6 +2661,12 @@ "traits": { "smithy.api#documentation": "

The unique Guardrail configuration assigned to the agent when it is created.

" } + }, + "memoryConfiguration": { + "target": "com.amazonaws.bedrockagent#MemoryConfiguration", + "traits": { + "smithy.api#documentation": "

Contains the details of the memory configured for the agent.

" + } } }, "traits": { @@ -2466,7 +2720,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sets up a data source to be added to a knowledge base.

\n \n

You can't change the chunkingConfiguration after you create the data source.

\n
", + "smithy.api#documentation": "

Creates a data source connector for a knowledge base.

\n \n

You can't change the chunkingConfiguration after you create the data source connector.

\n
", "smithy.api#http": { "code": 200, "method": "PUT", @@ -2512,14 +2766,14 @@ "dataSourceConfiguration": { "target": "com.amazonaws.bedrockagent#DataSourceConfiguration", "traits": { - "smithy.api#documentation": "

Contains metadata about where the data source is stored.

", + "smithy.api#documentation": "

The connection configuration for the data source.

", "smithy.api#required": {} } }, "dataDeletionPolicy": { "target": "com.amazonaws.bedrockagent#DataDeletionPolicy", "traits": { - "smithy.api#documentation": "

The data deletion policy assigned to the data source.

" + "smithy.api#documentation": "

The data deletion policy for the data source.

\n

You can set the data deletion policy to:

\n
    \n
  • \n

    DELETE: Deletes all underlying data belonging to \n the data source from the vector store upon deletion of a knowledge base or data \n source resource. Note that the vector store itself is not deleted, only the \n underlying data. This flag is ignored if an Amazon Web Services account is deleted.

    \n
  • \n
  • \n

    RETAIN: Retains all underlying data in your \n vector store upon deletion of a knowledge base or data source resource.

    \n
  • \n
" } }, "serverSideEncryptionConfiguration": { @@ -2554,13 +2808,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#CreateKnowledgeBase": { + "com.amazonaws.bedrockagent#CreateFlow": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#CreateKnowledgeBaseRequest" + "target": "com.amazonaws.bedrockagent#CreateFlowRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#CreateKnowledgeBaseResponse" + "target": "com.amazonaws.bedrockagent#CreateFlowResponse" }, "errors": [ { @@ -2583,11 +2837,11 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a knowledge base that contains data sources from which information can be queried and used by LLMs. To create a knowledge base, you must first set up your data sources and configure a supported vector store. For more information, see Set up your data for ingestion.

\n \n

If you prefer to let Amazon Bedrock create and manage a vector store for you in Amazon OpenSearch Service, use the console. For more information, see Create a knowledge base.

\n
\n
    \n
  • \n

    Provide the name and an optional description.

    \n
  • \n
  • \n

    Provide the Amazon Resource Name (ARN) with permissions to create a knowledge base in the roleArn field.

    \n
  • \n
  • \n

    Provide the embedding model to use in the embeddingModelArn field in the knowledgeBaseConfiguration object.

    \n
  • \n
  • \n

    Provide the configuration for your vector store in the storageConfiguration object.

    \n \n
  • \n
", + "smithy.api#documentation": "

Creates a prompt flow that you can use to send an input through various steps to yield an output. Configure nodes, each of which corresponds to a step of the flow, and create connections between the nodes to create paths to different outputs. For more information, see How it works and Create a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", "smithy.api#http": { - "code": 202, - "method": "PUT", - "uri": "/knowledgebases/" + "code": 201, + "method": "POST", + "uri": "/flows/" }, "smithy.api#idempotent": {}, "smithy.api#tags": [ @@ -2595,54 +2849,93 @@ ] } }, - "com.amazonaws.bedrockagent#CreateKnowledgeBaseRequest": { + "com.amazonaws.bedrockagent#CreateFlowAlias": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#CreateFlowAliasRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#CreateFlowAliasResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an alias of a flow for deployment. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/flows/{flowIdentifier}/aliases" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#CreateFlowAliasRequest": { "type": "structure", "members": { - "clientToken": { - "target": "com.amazonaws.bedrockagent#ClientToken", - "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", - "smithy.api#idempotencyToken": {} - } - }, "name": { "target": "com.amazonaws.bedrockagent#Name", "traits": { - "smithy.api#documentation": "

A name for the knowledge base.

", + "smithy.api#documentation": "

A name for the alias.

", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

A description of the knowledge base.

" + "smithy.api#documentation": "

A description for the alias.

" } }, - "roleArn": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseRoleArn", + "routingConfiguration": { + "target": "com.amazonaws.bedrockagent#FlowAliasRoutingConfiguration", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base.

", + "smithy.api#documentation": "

Contains information about the version to which to map the alias.

", "smithy.api#required": {} } }, - "knowledgeBaseConfiguration": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseConfiguration", + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", "traits": { - "smithy.api#documentation": "

Contains details about the embeddings model used for the knowledge base.

", + "smithy.api#documentation": "

The unique identifier of the flow for which to create an alias.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "storageConfiguration": { - "target": "com.amazonaws.bedrockagent#StorageConfiguration", + "clientToken": { + "target": "com.amazonaws.bedrockagent#ClientToken", "traits": { - "smithy.api#documentation": "

Contains details about the configuration of the vector database used for the knowledge base.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "smithy.api#idempotencyToken": {} } }, "tags": { "target": "com.amazonaws.bedrockagent#TagsMap", "traits": { - "smithy.api#documentation": "

Specify the key-value pairs for the tags that you want to attach to your knowledge base in this object.

" + "aws.cloudformation#cfnMutability": "full", + "smithy.api#documentation": "

Any tags that you want to attach to the alias of the flow. For more information, see Tagging resources in Amazon Bedrock.

" } } }, @@ -2650,296 +2943,212 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#CreateKnowledgeBaseResponse": { + "com.amazonaws.bedrockagent#CreateFlowAliasResponse": { "type": "structure", "members": { - "knowledgeBase": { - "target": "com.amazonaws.bedrockagent#KnowledgeBase", + "name": { + "target": "com.amazonaws.bedrockagent#Name", "traits": { - "smithy.api#documentation": "

Contains details about the knowledge base.

", + "smithy.api#documentation": "

The name of the alias.

", "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.bedrockagent#CreationMode": { - "type": "enum", - "members": { - "DEFAULT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DEFAULT" - } }, - "OVERRIDDEN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "OVERRIDDEN" - } - } - } - }, - "com.amazonaws.bedrockagent#CustomControlMethod": { - "type": "enum", - "members": { - "RETURN_CONTROL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "RETURN_CONTROL" - } - } - } - }, - "com.amazonaws.bedrockagent#DataDeletionPolicy": { - "type": "enum", - "members": { - "RETAIN": { - "target": "smithy.api#Unit", + "description": { + "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#enumValue": "RETAIN" + "smithy.api#documentation": "

The description of the alias.

" } }, - "DELETE": { - "target": "smithy.api#Unit", + "routingConfiguration": { + "target": "com.amazonaws.bedrockagent#FlowAliasRoutingConfiguration", "traits": { - "smithy.api#enumValue": "DELETE" + "smithy.api#documentation": "

Contains information about the version that the alias is mapped to.

", + "smithy.api#required": {} } - } - } - }, - "com.amazonaws.bedrockagent#DataSource": { - "type": "structure", - "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + }, + "flowId": { + "target": "com.amazonaws.bedrockagent#FlowId", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source belongs.

", + "smithy.api#documentation": "

The unique identifier of the flow that the alias belongs to.

", "smithy.api#required": {} } }, - "dataSourceId": { - "target": "com.amazonaws.bedrockagent#Id", + "id": { + "target": "com.amazonaws.bedrockagent#FlowAliasId", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source.

", + "smithy.api#documentation": "

The unique identifier of the alias.

", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.bedrockagent#Name", + "arn": { + "target": "com.amazonaws.bedrockagent#FlowAliasArn", "traits": { - "smithy.api#documentation": "

The name of the data source.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the alias.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.bedrockagent#DataSourceStatus", + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The status of the data source. The following statuses are possible:

\n
    \n
  • \n

    Available – The data source has been created and is ready for ingestion into the knowledge base.

    \n
  • \n
  • \n

    Deleting – The data source is being deleted.

    \n
  • \n
", + "smithy.api#documentation": "

The time at which the alias was created.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.bedrockagent#Description", + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The description of the data source.

" + "smithy.api#documentation": "

The time at which the alias of the flow was last updated.

", + "smithy.api#required": {} } - }, - "dataSourceConfiguration": { - "target": "com.amazonaws.bedrockagent#DataSourceConfiguration", + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#CreateFlowRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockagent#FlowName", "traits": { - "smithy.api#documentation": "

Contains details about how the data source is stored.

", + "smithy.api#documentation": "

A name for the flow.

", "smithy.api#required": {} } }, - "serverSideEncryptionConfiguration": { - "target": "com.amazonaws.bedrockagent#ServerSideEncryptionConfiguration", + "description": { + "target": "com.amazonaws.bedrockagent#FlowDescription", "traits": { - "smithy.api#documentation": "

Contains details about the configuration of the server-side encryption.

" + "smithy.api#documentation": "

A description for the flow.

" } }, - "vectorIngestionConfiguration": { - "target": "com.amazonaws.bedrockagent#VectorIngestionConfiguration", + "executionRoleArn": { + "target": "com.amazonaws.bedrockagent#FlowExecutionRoleArn", "traits": { - "smithy.api#documentation": "

Contains details about how to ingest the documents in the data source.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the service role with permissions to create and manage a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#required": {} } }, - "dataDeletionPolicy": { - "target": "com.amazonaws.bedrockagent#DataDeletionPolicy", + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", "traits": { - "smithy.api#documentation": "

The data deletion policy for a data source.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key to encrypt the flow.

" } }, - "createdAt": { - "target": "com.amazonaws.bedrockagent#DateTimestamp", + "definition": { + "target": "com.amazonaws.bedrockagent#FlowDefinition", "traits": { - "smithy.api#documentation": "

The time at which the data source was created.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A definition of the nodes and connections between nodes in the flow.

" } }, - "updatedAt": { - "target": "com.amazonaws.bedrockagent#DateTimestamp", + "clientToken": { + "target": "com.amazonaws.bedrockagent#ClientToken", "traits": { - "smithy.api#documentation": "

The time at which the data source was last updated.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "smithy.api#idempotencyToken": {} } }, - "failureReasons": { - "target": "com.amazonaws.bedrockagent#FailureReasons", + "tags": { + "target": "com.amazonaws.bedrockagent#TagsMap", "traits": { - "smithy.api#documentation": "

The detailed reasons on the failure to delete a data source.

" + "aws.cloudformation#cfnMutability": "full", + "smithy.api#documentation": "

Any tags that you want to attach to the flow. For more information, see Tagging resources in Amazon Bedrock.

" } } }, "traits": { - "smithy.api#documentation": "

Contains details about a data source.

" + "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#DataSourceConfiguration": { + "com.amazonaws.bedrockagent#CreateFlowResponse": { "type": "structure", "members": { - "type": { - "target": "com.amazonaws.bedrockagent#DataSourceType", + "name": { + "target": "com.amazonaws.bedrockagent#FlowName", "traits": { - "smithy.api#documentation": "

The type of storage for the data source.

", + "smithy.api#documentation": "

The name of the flow.

", "smithy.api#required": {} } }, - "s3Configuration": { - "target": "com.amazonaws.bedrockagent#S3DataSourceConfiguration", + "description": { + "target": "com.amazonaws.bedrockagent#FlowDescription", "traits": { - "smithy.api#documentation": "

Contains details about the configuration of the S3 object containing the data source.

" + "smithy.api#documentation": "

The description of the flow.

" } - } - }, - "traits": { - "smithy.api#documentation": "

Contains details about how a data source is stored.

" - } - }, - "com.amazonaws.bedrockagent#DataSourceResource": { - "type": "resource", - "operations": [ - { - "target": "com.amazonaws.bedrockagent#CreateDataSource" - }, - { - "target": "com.amazonaws.bedrockagent#DeleteDataSource" - }, - { - "target": "com.amazonaws.bedrockagent#GetDataSource" }, - { - "target": "com.amazonaws.bedrockagent#ListDataSources" - }, - { - "target": "com.amazonaws.bedrockagent#UpdateDataSource" - } - ] - }, - "com.amazonaws.bedrockagent#DataSourceStatus": { - "type": "enum", - "members": { - "AVAILABLE": { - "target": "smithy.api#Unit", + "executionRoleArn": { + "target": "com.amazonaws.bedrockagent#FlowExecutionRoleArn", "traits": { - "smithy.api#enumValue": "AVAILABLE" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#required": {} } }, - "DELETING": { - "target": "smithy.api#Unit", + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", "traits": { - "smithy.api#enumValue": "DELETING" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that you encrypted the flow with.

" } }, - "DELETE_UNSUCCESSFUL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_UNSUCCESSFUL" - } - } - } - }, - "com.amazonaws.bedrockagent#DataSourceSummaries": { - "type": "list", - "member": { - "target": "com.amazonaws.bedrockagent#DataSourceSummary" - } - }, - "com.amazonaws.bedrockagent#DataSourceSummary": { - "type": "structure", - "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "id": { + "target": "com.amazonaws.bedrockagent#FlowId", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source belongs.

", + "smithy.api#documentation": "

The unique identifier of the flow.

", "smithy.api#required": {} } }, - "dataSourceId": { - "target": "com.amazonaws.bedrockagent#Id", + "arn": { + "target": "com.amazonaws.bedrockagent#FlowArn", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the flow.

", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.bedrockagent#Name", + "status": { + "target": "com.amazonaws.bedrockagent#FlowStatus", "traits": { - "smithy.api#documentation": "

The name of the data source.

", + "smithy.api#documentation": "

The status of the flow. When you submit this request, the status will be NotPrepared. If creation fails, the status becomes Failed.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.bedrockagent#DataSourceStatus", + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The status of the data source.

", + "smithy.api#documentation": "

The time at which the flow was created.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.bedrockagent#Description", + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The description of the data source.

" + "smithy.api#documentation": "

The time at which the flow was last updated.

", + "smithy.api#required": {} } }, - "updatedAt": { - "target": "com.amazonaws.bedrockagent#DateTimestamp", + "version": { + "target": "com.amazonaws.bedrockagent#DraftVersion", "traits": { - "smithy.api#documentation": "

The time at which the data source was last updated.

", + "smithy.api#documentation": "

The version of the flow. When you create a flow, the version created is the DRAFT version.

", "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#documentation": "

Contains details about a data source.

" - } - }, - "com.amazonaws.bedrockagent#DataSourceType": { - "type": "enum", - "members": { - "S3": { - "target": "smithy.api#Unit", + }, + "definition": { + "target": "com.amazonaws.bedrockagent#FlowDefinition", "traits": { - "smithy.api#enumValue": "S3" + "smithy.api#documentation": "

A definition of the nodes and connections between nodes in the flow.

" } } - } - }, - "com.amazonaws.bedrockagent#DateTimestamp": { - "type": "timestamp", + }, "traits": { - "smithy.api#timestampFormat": "date-time" + "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#DeleteAgent": { + "com.amazonaws.bedrockagent#CreateFlowVersion": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#DeleteAgentRequest" + "target": "com.amazonaws.bedrockagent#CreateFlowVersionRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#DeleteAgentResponse" + "target": "com.amazonaws.bedrockagent#CreateFlowVersionResponse" }, "errors": [ { @@ -2954,6 +3163,9 @@ { "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" }, + { + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.bedrockagent#ThrottlingException" }, @@ -2962,11 +3174,11 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an agent.

", + "smithy.api#documentation": "

Creates a version of the flow that you can deploy. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", "smithy.api#http": { - "code": 202, - "method": "DELETE", - "uri": "/agents/{agentId}/" + "code": 201, + "method": "POST", + "uri": "/flows/{flowIdentifier}/versions" }, "smithy.api#idempotent": {}, "smithy.api#tags": [ @@ -2974,111 +3186,130 @@ ] } }, - "com.amazonaws.bedrockagent#DeleteAgentActionGroup": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#DeleteAgentActionGroupRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#DeleteAgentActionGroupResponse" - }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" - }, - { - "target": "com.amazonaws.bedrockagent#ConflictException" + "com.amazonaws.bedrockagent#CreateFlowVersionRequest": { + "type": "structure", + "members": { + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow that you want to create a version of.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" + "description": { + "target": "com.amazonaws.bedrockagent#FlowDescription", + "traits": { + "smithy.api#documentation": "

A description of the version of the flow.

" + } }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" - }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" + "clientToken": { + "target": "com.amazonaws.bedrockagent#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "smithy.api#idempotencyToken": {} + } } - ], + }, "traits": { - "smithy.api#documentation": "

Deletes an action group in an agent.

", - "smithy.api#http": { - "code": 204, - "method": "DELETE", - "uri": "/agents/{agentId}/agentversions/{agentVersion}/actiongroups/{actionGroupId}/" - }, - "smithy.api#idempotent": {}, - "smithy.api#tags": [ - "console" - ] + "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#DeleteAgentActionGroupRequest": { + "com.amazonaws.bedrockagent#CreateFlowVersionResponse": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "name": { + "target": "com.amazonaws.bedrockagent#FlowName", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent that the action group belongs to.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The name of the flow version.

", "smithy.api#required": {} } }, - "agentVersion": { - "target": "com.amazonaws.bedrockagent#DraftVersion", + "description": { + "target": "com.amazonaws.bedrockagent#FlowDescription", "traits": { - "smithy.api#documentation": "

The version of the agent that the action group belongs to.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The description of the flow version.

" + } + }, + "executionRoleArn": { + "target": "com.amazonaws.bedrockagent#FlowExecutionRoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

", "smithy.api#required": {} } }, - "actionGroupId": { - "target": "com.amazonaws.bedrockagent#Id", + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", "traits": { - "smithy.api#documentation": "

The unique identifier of the action group to delete.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The KMS key that the flow is encrypted with.

" + } + }, + "id": { + "target": "com.amazonaws.bedrockagent#FlowId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", "smithy.api#required": {} } }, - "skipResourceInUseCheck": { - "target": "smithy.api#Boolean", + "arn": { + "target": "com.amazonaws.bedrockagent#FlowArn", "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", - "smithy.api#httpQuery": "skipResourceInUseCheck" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the flow.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#FlowStatus", + "traits": { + "smithy.api#documentation": "

The status of the flow.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the flow was created.

", + "smithy.api#required": {} + } + }, + "version": { + "target": "com.amazonaws.bedrockagent#NumericalVersion", + "traits": { + "smithy.api#documentation": "

The version of the flow that was created. Versions are numbered incrementally, starting from 1.

", + "smithy.api#required": {} + } + }, + "definition": { + "target": "com.amazonaws.bedrockagent#FlowDefinition", + "traits": { + "smithy.api#documentation": "

A definition of the nodes and connections in the flow.

" } } }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.bedrockagent#DeleteAgentActionGroupResponse": { - "type": "structure", - "members": {}, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#DeleteAgentAlias": { + "com.amazonaws.bedrockagent#CreateKnowledgeBase": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#DeleteAgentAliasRequest" + "target": "com.amazonaws.bedrockagent#CreateKnowledgeBaseRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#DeleteAgentAliasResponse" + "target": "com.amazonaws.bedrockagent#CreateKnowledgeBaseResponse" }, "errors": [ { "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, { "target": "com.amazonaws.bedrockagent#InternalServerException" }, { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" }, { "target": "com.amazonaws.bedrockagent#ThrottlingException" @@ -3088,11 +3319,11 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an alias of an agent.

", + "smithy.api#documentation": "

Creates a knowledge base that contains data sources from which information can be queried and used by LLMs. To create a knowledge base, you must first set up your data sources and configure a supported vector store. For more information, see Set up your data for ingestion.

\n \n

If you prefer to let Amazon Bedrock create and manage a vector store for you in Amazon OpenSearch Service, use the console. For more information, see Create a knowledge base.

\n
\n
    \n
  • \n

    Provide the name and an optional description.

    \n
  • \n
  • \n

    Provide the Amazon Resource Name (ARN) with permissions to create a knowledge base in the roleArn field.

    \n
  • \n
  • \n

    Provide the embedding model to use in the embeddingModelArn field in the knowledgeBaseConfiguration object.

    \n
  • \n
  • \n

    Provide the configuration for your vector store in the storageConfiguration object.

    \n \n
  • \n
", "smithy.api#http": { "code": 202, - "method": "DELETE", - "uri": "/agents/{agentId}/agentaliases/{agentAliasId}/" + "method": "PUT", + "uri": "/knowledgebases/" }, "smithy.api#idempotent": {}, "smithy.api#tags": [ @@ -3100,76 +3331,54 @@ ] } }, - "com.amazonaws.bedrockagent#DeleteAgentAliasRequest": { + "com.amazonaws.bedrockagent#CreateKnowledgeBaseRequest": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "clientToken": { + "target": "com.amazonaws.bedrockagent#ClientToken", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent that the alias belongs to.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "smithy.api#idempotencyToken": {} } }, - "agentAliasId": { - "target": "com.amazonaws.bedrockagent#AgentAliasId", + "name": { + "target": "com.amazonaws.bedrockagent#Name", "traits": { - "smithy.api#documentation": "

The unique identifier of the alias to delete.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

A name for the knowledge base.

", "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.bedrockagent#DeleteAgentAliasResponse": { - "type": "structure", - "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + }, + "description": { + "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent that the alias belongs to.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A description of the knowledge base.

" } }, - "agentAliasId": { - "target": "com.amazonaws.bedrockagent#AgentAliasId", + "roleArn": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseRoleArn", "traits": { - "smithy.api#documentation": "

The unique identifier of the alias that was deleted.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base.

", "smithy.api#required": {} } }, - "agentAliasStatus": { - "target": "com.amazonaws.bedrockagent#AgentAliasStatus", + "knowledgeBaseConfiguration": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseConfiguration", "traits": { - "smithy.api#documentation": "

The status of the alias.

", + "smithy.api#documentation": "

Contains details about the embeddings model used for the knowledge base.

", "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.bedrockagent#DeleteAgentRequest": { - "type": "structure", - "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + }, + "storageConfiguration": { + "target": "com.amazonaws.bedrockagent#StorageConfiguration", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent to delete.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

Contains details about the configuration of the vector database used for the knowledge base.

", "smithy.api#required": {} } }, - "skipResourceInUseCheck": { - "target": "smithy.api#Boolean", + "tags": { + "target": "com.amazonaws.bedrockagent#TagsMap", "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", - "smithy.api#httpQuery": "skipResourceInUseCheck" + "smithy.api#documentation": "

Specify the key-value pairs for the tags that you want to attach to your knowledge base in this object.

" } } }, @@ -3177,20 +3386,13 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#DeleteAgentResponse": { + "com.amazonaws.bedrockagent#CreateKnowledgeBaseResponse": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", - "traits": { - "smithy.api#documentation": "

The unique identifier of the agent that was deleted.

", - "smithy.api#required": {} - } - }, - "agentStatus": { - "target": "com.amazonaws.bedrockagent#AgentStatus", + "knowledgeBase": { + "target": "com.amazonaws.bedrockagent#KnowledgeBase", "traits": { - "smithy.api#documentation": "

The status of the agent.

", + "smithy.api#documentation": "

Contains details about the knowledge base.

", "smithy.api#required": {} } } @@ -3199,13 +3401,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#DeleteAgentVersion": { + "com.amazonaws.bedrockagent#CreatePrompt": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#DeleteAgentVersionRequest" + "target": "com.amazonaws.bedrockagent#CreatePromptRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#DeleteAgentVersionResponse" + "target": "com.amazonaws.bedrockagent#CreatePromptResponse" }, "errors": [ { @@ -3218,7 +3420,7 @@ "target": "com.amazonaws.bedrockagent#InternalServerException" }, { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" }, { "target": "com.amazonaws.bedrockagent#ThrottlingException" @@ -3228,11 +3430,11 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a version of an agent.

", + "smithy.api#documentation": "

Creates a prompt in your prompt library that you can add to a flow. For more information, see Prompt management in Amazon Bedrock, Create a prompt using Prompt management and Prompt flows in Amazon Bedrock in the Amazon Bedrock User Guide.

", "smithy.api#http": { - "code": 202, - "method": "DELETE", - "uri": "/agents/{agentId}/agentversions/{agentVersion}/" + "code": 201, + "method": "POST", + "uri": "/prompts/" }, "smithy.api#idempotent": {}, "smithy.api#tags": [ @@ -3240,31 +3442,52 @@ ] } }, - "com.amazonaws.bedrockagent#DeleteAgentVersionRequest": { + "com.amazonaws.bedrockagent#CreatePromptRequest": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "name": { + "target": "com.amazonaws.bedrockagent#PromptName", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent that the version belongs to.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

A name for the prompt.

", "smithy.api#required": {} } }, - "agentVersion": { - "target": "com.amazonaws.bedrockagent#NumericalVersion", + "description": { + "target": "com.amazonaws.bedrockagent#PromptDescription", "traits": { - "smithy.api#documentation": "

The version of the agent to delete.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#documentation": "

A description for the prompt.

" } }, - "skipResourceInUseCheck": { - "target": "smithy.api#Boolean", + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", - "smithy.api#httpQuery": "skipResourceInUseCheck" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key to encrypt the prompt.

" + } + }, + "defaultVariant": { + "target": "com.amazonaws.bedrockagent#PromptVariantName", + "traits": { + "smithy.api#documentation": "

The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object.

" + } + }, + "variants": { + "target": "com.amazonaws.bedrockagent#PromptVariantList", + "traits": { + "smithy.api#documentation": "

A list of objects, each containing details about a variant of the prompt.

" + } + }, + "clientToken": { + "target": "com.amazonaws.bedrockagent#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "smithy.api#idempotencyToken": {} + } + }, + "tags": { + "target": "com.amazonaws.bedrockagent#TagsMap", + "traits": { + "aws.cloudformation#cfnMutability": "full", + "smithy.api#documentation": "

Any tags that you want to attach to the prompt. For more information, see Tagging resources in Amazon Bedrock.

" } } }, @@ -3272,121 +3495,72 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#DeleteAgentVersionResponse": { + "com.amazonaws.bedrockagent#CreatePromptResponse": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "name": { + "target": "com.amazonaws.bedrockagent#PromptName", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent that the version belongs to.

", + "smithy.api#documentation": "

The name of the prompt.

", "smithy.api#required": {} } }, - "agentVersion": { - "target": "com.amazonaws.bedrockagent#NumericalVersion", + "description": { + "target": "com.amazonaws.bedrockagent#PromptDescription", "traits": { - "smithy.api#documentation": "

The version that was deleted.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The description of the prompt.

" } }, - "agentStatus": { - "target": "com.amazonaws.bedrockagent#AgentStatus", + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", "traits": { - "smithy.api#documentation": "

The status of the agent version.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that you encrypted the prompt with.

" } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.bedrockagent#DeleteDataSource": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#DeleteDataSourceRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#DeleteDataSourceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" - }, - { - "target": "com.amazonaws.bedrockagent#ConflictException" - }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" + "defaultVariant": { + "target": "com.amazonaws.bedrockagent#PromptVariantName", + "traits": { + "smithy.api#documentation": "

The name of the default variant for your prompt.

" + } }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

Deletes a data source from a knowledge base.

", - "smithy.api#http": { - "code": 202, - "method": "DELETE", - "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}" + "variants": { + "target": "com.amazonaws.bedrockagent#PromptVariantList", + "traits": { + "smithy.api#documentation": "

A list of objects, each containing details about a variant of the prompt.

" + } }, - "smithy.api#idempotent": {}, - "smithy.api#tags": [ - "console" - ] - } - }, - "com.amazonaws.bedrockagent#DeleteDataSourceRequest": { - "type": "structure", - "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "id": { + "target": "com.amazonaws.bedrockagent#PromptId", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base from which to delete the data source.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The unique identifier of the prompt.

", "smithy.api#required": {} } }, - "dataSourceId": { - "target": "com.amazonaws.bedrockagent#Id", + "arn": { + "target": "com.amazonaws.bedrockagent#PromptArn", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source to delete.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the prompt.

", "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.bedrockagent#DeleteDataSourceResponse": { - "type": "structure", - "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + }, + "version": { + "target": "com.amazonaws.bedrockagent#Version", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source that was deleted belonged.

", + "smithy.api#documentation": "

The version of the prompt. When you create a prompt, the version created is the DRAFT version.

", "smithy.api#required": {} } }, - "dataSourceId": { - "target": "com.amazonaws.bedrockagent#Id", + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source that was deleted.

", + "smithy.api#documentation": "

The time at which the prompt was created.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.bedrockagent#DataSourceStatus", + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The status of the data source.

", + "smithy.api#documentation": "

The time at which the prompt was last updated.

", "smithy.api#required": {} } } @@ -3395,13 +3569,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#DeleteKnowledgeBase": { + "com.amazonaws.bedrockagent#CreatePromptVersion": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#DeleteKnowledgeBaseRequest" + "target": "com.amazonaws.bedrockagent#CreatePromptVersionRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#DeleteKnowledgeBaseResponse" + "target": "com.amazonaws.bedrockagent#CreatePromptVersionResponse" }, "errors": [ { @@ -3416,6 +3590,9 @@ { "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" }, + { + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.bedrockagent#ThrottlingException" }, @@ -3424,11 +3601,11 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a knowledge base. Before deleting a knowledge base, you should disassociate the knowledge base from any agents that it is associated with by making a DisassociateAgentKnowledgeBase request.

", + "smithy.api#documentation": "

Creates a static snapshot of your prompt that can be deployed to production. For more information, see Deploy prompts using Prompt management by creating versions in the Amazon Bedrock User Guide.

", "smithy.api#http": { - "code": 202, - "method": "DELETE", - "uri": "/knowledgebases/{knowledgeBaseId}" + "code": 201, + "method": "POST", + "uri": "/prompts/{promptIdentifier}/versions" }, "smithy.api#idempotent": {}, "smithy.api#tags": [ @@ -3436,414 +3613,469 @@ ] } }, - "com.amazonaws.bedrockagent#DeleteKnowledgeBaseRequest": { + "com.amazonaws.bedrockagent#CreatePromptVersionRequest": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "promptIdentifier": { + "target": "com.amazonaws.bedrockagent#PromptIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to delete.

", + "smithy.api#documentation": "

The unique identifier of the prompt that you want to create a version of.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "description": { + "target": "com.amazonaws.bedrockagent#PromptDescription", + "traits": { + "smithy.api#documentation": "

A description for the version of the prompt.

" + } + }, + "clientToken": { + "target": "com.amazonaws.bedrockagent#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "smithy.api#idempotencyToken": {} + } + }, + "tags": { + "target": "com.amazonaws.bedrockagent#TagsMap", + "traits": { + "aws.cloudformation#cfnMutability": "full", + "smithy.api#documentation": "

Any tags that you want to attach to the version of the prompt. For more information, see Tagging resources in Amazon Bedrock.

" + } } }, "traits": { "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#DeleteKnowledgeBaseResponse": { + "com.amazonaws.bedrockagent#CreatePromptVersionResponse": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "name": { + "target": "com.amazonaws.bedrockagent#PromptName", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base that was deleted.

", + "smithy.api#documentation": "

The name of the prompt version.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseStatus", + "description": { + "target": "com.amazonaws.bedrockagent#PromptDescription", "traits": { - "smithy.api#documentation": "

The status of the knowledge base and whether it has been successfully deleted.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A description for the prompt version.

" } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.bedrockagent#Description": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 200 - } - } - }, - "com.amazonaws.bedrockagent#Dimensions": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 0, - "max": 4096 - } - } - }, - "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBase": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBaseRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBaseResponse" - }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, - { - "target": "com.amazonaws.bedrockagent#ConflictException" + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key to encrypt the version of the prompt.

" + } }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" + "defaultVariant": { + "target": "com.amazonaws.bedrockagent#PromptVariantName", + "traits": { + "smithy.api#documentation": "

The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object.

" + } }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + "variants": { + "target": "com.amazonaws.bedrockagent#PromptVariantList", + "traits": { + "smithy.api#documentation": "

A list of objects, each containing details about a variant of the prompt.

" + } }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" + "id": { + "target": "com.amazonaws.bedrockagent#PromptId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the prompt.

", + "smithy.api#required": {} + } }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

Disassociates a knowledge base from an agent.

", - "smithy.api#http": { - "code": 204, - "method": "DELETE", - "uri": "/agents/{agentId}/agentversions/{agentVersion}/knowledgebases/{knowledgeBaseId}/" + "arn": { + "target": "com.amazonaws.bedrockagent#PromptArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the version of the prompt.

", + "smithy.api#required": {} + } }, - "smithy.api#idempotent": {}, - "smithy.api#tags": [ - "console" - ] - } - }, - "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBaseRequest": { - "type": "structure", - "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "version": { + "target": "com.amazonaws.bedrockagent#Version", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent from which to disassociate the knowledge base.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The version of the prompt that was created. Versions are numbered incrementally, starting from 1.

", "smithy.api#required": {} } }, - "agentVersion": { - "target": "com.amazonaws.bedrockagent#DraftVersion", + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The version of the agent from which to disassociate the knowledge base.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The time at which the prompt was created.

", "smithy.api#required": {} } }, - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to disassociate.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The time at which the prompt was last updated.

", "smithy.api#required": {} } } }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBaseResponse": { - "type": "structure", - "members": {}, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#DraftVersion": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 5, - "max": 5 + "com.amazonaws.bedrockagent#CreationMode": { + "type": "enum", + "members": { + "DEFAULT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEFAULT" + } }, - "smithy.api#pattern": "^DRAFT$" + "OVERRIDDEN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OVERRIDDEN" + } + } } }, - "com.amazonaws.bedrockagent#EmbeddingModelConfiguration": { - "type": "structure", + "com.amazonaws.bedrockagent#CustomControlMethod": { + "type": "enum", "members": { - "bedrockEmbeddingModelConfiguration": { - "target": "com.amazonaws.bedrockagent#BedrockEmbeddingModelConfiguration", + "RETURN_CONTROL": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The vector configuration details on the Bedrock embeddings model.

" + "smithy.api#enumValue": "RETURN_CONTROL" } } - }, - "traits": { - "smithy.api#documentation": "

The configuration details for the embeddings model.

" } }, - "com.amazonaws.bedrockagent#FailureReason": { - "type": "string", - "traits": { - "smithy.api#length": { - "max": 2048 + "com.amazonaws.bedrockagent#CustomTransformationConfiguration": { + "type": "structure", + "members": { + "intermediateStorage": { + "target": "com.amazonaws.bedrockagent#IntermediateStorage", + "traits": { + "smithy.api#documentation": "

An S3 bucket path for input and output objects.

", + "smithy.api#required": {} + } + }, + "transformations": { + "target": "com.amazonaws.bedrockagent#Transformations", + "traits": { + "smithy.api#documentation": "

A Lambda function that processes documents.

", + "smithy.api#required": {} + } } - } - }, - "com.amazonaws.bedrockagent#FailureReasons": { - "type": "list", - "member": { - "target": "com.amazonaws.bedrockagent#FailureReason" }, "traits": { - "smithy.api#length": { - "max": 2048 - } + "smithy.api#documentation": "

Settings for customizing steps in the data source content ingestion pipeline.

\n

You can configure the data source to process documents with a Lambda function after\n they are parsed and converted into chunks. When you add a post-chunking transformation,\n the service stores chunked documents in an S3 bucket and invokes a Lambda function to process\n them.

\n

To process chunked documents with a Lambda function, define an S3 bucket path for input\n and output objects, and a transformation that specifies the Lambda function to invoke. You can\n use the Lambda function to customize how chunks are split, and the metadata for each chunk.\n

" } }, - "com.amazonaws.bedrockagent#FieldName": { - "type": "string", - "traits": { - "smithy.api#length": { - "max": 2048 + "com.amazonaws.bedrockagent#DataDeletionPolicy": { + "type": "enum", + "members": { + "RETAIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RETAIN" + } }, - "smithy.api#pattern": "^.*$" + "DELETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETE" + } + } } }, - "com.amazonaws.bedrockagent#FixedSizeChunkingConfiguration": { + "com.amazonaws.bedrockagent#DataSource": { "type": "structure", "members": { - "maxTokens": { - "target": "smithy.api#Integer", + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The maximum number of tokens to include in a chunk.

", - "smithy.api#range": { - "min": 1 - }, + "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source belongs.

", "smithy.api#required": {} } }, - "overlapPercentage": { - "target": "smithy.api#Integer", + "dataSourceId": { + "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The percentage of overlap between adjacent chunks of a data source.

", - "smithy.api#range": { - "min": 1, - "max": 99 - }, + "smithy.api#documentation": "

The unique identifier of the data source.

", "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#documentation": "

Configurations for when you choose fixed-size chunking. If you set the chunkingStrategy as NONE, exclude this field.

" - } - }, - "com.amazonaws.bedrockagent#Function": { - "type": "structure", - "members": { + }, "name": { "target": "com.amazonaws.bedrockagent#Name", "traits": { - "smithy.api#documentation": "

A name for the function.

", + "smithy.api#documentation": "

The name of the data source.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source. The following statuses are possible:

\n
    \n
  • \n

    Available – The data source has been created and is ready for ingestion into the knowledge base.

    \n
  • \n
  • \n

    Deleting – The data source is being deleted.

    \n
  • \n
", "smithy.api#required": {} } }, "description": { - "target": "com.amazonaws.bedrockagent#FunctionDescription", + "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

A description of the function and its purpose.

" + "smithy.api#documentation": "

The description of the data source.

" } }, - "parameters": { - "target": "com.amazonaws.bedrockagent#ParameterMap", + "dataSourceConfiguration": { + "target": "com.amazonaws.bedrockagent#DataSourceConfiguration", "traits": { - "smithy.api#documentation": "

The parameters that the agent elicits from the user to fulfill the function.

" + "smithy.api#documentation": "

The connection configuration for the data source.

", + "smithy.api#required": {} + } + }, + "serverSideEncryptionConfiguration": { + "target": "com.amazonaws.bedrockagent#ServerSideEncryptionConfiguration", + "traits": { + "smithy.api#documentation": "

Contains details about the configuration of the server-side encryption.

" + } + }, + "vectorIngestionConfiguration": { + "target": "com.amazonaws.bedrockagent#VectorIngestionConfiguration", + "traits": { + "smithy.api#documentation": "

Contains details about how to ingest the documents in the data source.

" + } + }, + "dataDeletionPolicy": { + "target": "com.amazonaws.bedrockagent#DataDeletionPolicy", + "traits": { + "smithy.api#documentation": "

The data deletion policy for the data source.

" + } + }, + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the data source was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the data source was last updated.

", + "smithy.api#required": {} + } + }, + "failureReasons": { + "target": "com.amazonaws.bedrockagent#FailureReasons", + "traits": { + "smithy.api#documentation": "

The detailed reasons on the failure to delete a data source.

" } } }, "traits": { - "smithy.api#documentation": "

Defines parameters that the agent needs to invoke from the user to complete the function. Corresponds to an action in an action group.

\n

This data type is used in the following API operations:

\n " - } - }, - "com.amazonaws.bedrockagent#FunctionDescription": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 1200 - } + "smithy.api#documentation": "

Contains details about a data source.

" } }, - "com.amazonaws.bedrockagent#FunctionSchema": { - "type": "union", + "com.amazonaws.bedrockagent#DataSourceConfiguration": { + "type": "structure", "members": { - "functions": { - "target": "com.amazonaws.bedrockagent#Functions", + "type": { + "target": "com.amazonaws.bedrockagent#DataSourceType", "traits": { - "smithy.api#documentation": "

A list of functions that each define an action in the action group.

" + "smithy.api#documentation": "

The type of data source.

", + "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#documentation": "

Defines functions that each define parameters that the agent needs to invoke from the user. Each function represents an action in an action group.

\n

This data type is used in the following API operations:

\n " - } - }, - "com.amazonaws.bedrockagent#Functions": { - "type": "list", - "member": { - "target": "com.amazonaws.bedrockagent#Function" - } - }, - "com.amazonaws.bedrockagent#GetAgent": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#GetAgentRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#GetAgentResponse" - }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" + "s3Configuration": { + "target": "com.amazonaws.bedrockagent#S3DataSourceConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration information to connect to Amazon S3 as your data source.

" + } }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + "webConfiguration": { + "target": "com.amazonaws.bedrockagent#WebDataSourceConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of web URLs to crawl for your data source. \n You should be authorized to crawl the URLs.

\n \n

Crawling web URLs as your data source is in preview release \n and is subject to change.

\n
" + } }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" + "confluenceConfiguration": { + "target": "com.amazonaws.bedrockagent#ConfluenceDataSourceConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration information to connect to Confluence as your data source.

\n \n

Confluence data source connector is in preview release and is subject to change.

\n
" + } }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" + "salesforceConfiguration": { + "target": "com.amazonaws.bedrockagent#SalesforceDataSourceConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration information to connect to Salesforce as your data source.

\n \n

Salesforce data source connector is in preview release and is subject to change.

\n
" + } + }, + "sharePointConfiguration": { + "target": "com.amazonaws.bedrockagent#SharePointDataSourceConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration information to connect to SharePoint as your data source.

\n \n

SharePoint data source connector is in preview release and is subject to change.

\n
" + } } - ], + }, "traits": { - "smithy.api#documentation": "

Gets information about an agent.

", - "smithy.api#http": { - "code": 200, - "method": "GET", - "uri": "/agents/{agentId}/" - }, - "smithy.api#readonly": {}, - "smithy.api#tags": [ - "console" - ] + "smithy.api#documentation": "

The connection configuration for the data source.

" } }, - "com.amazonaws.bedrockagent#GetAgentActionGroup": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#GetAgentActionGroupRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#GetAgentActionGroupResponse" - }, - "errors": [ + "com.amazonaws.bedrockagent#DataSourceResource": { + "type": "resource", + "operations": [ { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" + "target": "com.amazonaws.bedrockagent#CreateDataSource" }, { - "target": "com.amazonaws.bedrockagent#InternalServerException" + "target": "com.amazonaws.bedrockagent#DeleteDataSource" }, { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + "target": "com.amazonaws.bedrockagent#GetDataSource" }, { - "target": "com.amazonaws.bedrockagent#ThrottlingException" + "target": "com.amazonaws.bedrockagent#ListDataSources" }, { - "target": "com.amazonaws.bedrockagent#ValidationException" + "target": "com.amazonaws.bedrockagent#UpdateDataSource" } - ], - "traits": { - "smithy.api#documentation": "

Gets information about an action group for an agent.

", - "smithy.api#http": { - "code": 200, - "method": "GET", - "uri": "/agents/{agentId}/agentversions/{agentVersion}/actiongroups/{actionGroupId}/" - }, - "smithy.api#readonly": {}, - "smithy.api#tags": [ - "console" - ] - } + ] }, - "com.amazonaws.bedrockagent#GetAgentActionGroupRequest": { - "type": "structure", + "com.amazonaws.bedrockagent#DataSourceStatus": { + "type": "enum", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "AVAILABLE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent that the action group belongs to.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#enumValue": "AVAILABLE" } }, - "agentVersion": { - "target": "com.amazonaws.bedrockagent#Version", + "DELETING": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The version of the agent that the action group belongs to.

", - "smithy.api#httpLabel": {}, + "smithy.api#enumValue": "DELETING" + } + }, + "DELETE_UNSUCCESSFUL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETE_UNSUCCESSFUL" + } + } + } + }, + "com.amazonaws.bedrockagent#DataSourceSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#DataSourceSummary" + } + }, + "com.amazonaws.bedrockagent#DataSourceSummary": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source belongs.

", "smithy.api#required": {} } }, - "actionGroupId": { + "dataSourceId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the action group for which to get information.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The unique identifier of the data source.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.bedrockagent#Name", + "traits": { + "smithy.api#documentation": "

The name of the data source.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#Description", + "traits": { + "smithy.api#documentation": "

The description of the data source.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the data source was last updated.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#input": {} + "smithy.api#documentation": "

Contains details about a data source.

" } }, - "com.amazonaws.bedrockagent#GetAgentActionGroupResponse": { - "type": "structure", + "com.amazonaws.bedrockagent#DataSourceType": { + "type": "enum", "members": { - "agentActionGroup": { - "target": "com.amazonaws.bedrockagent#AgentActionGroup", + "S3": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

Contains details about the action group.

", - "smithy.api#required": {} + "smithy.api#enumValue": "S3" + } + }, + "WEB": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WEB" + } + }, + "CONFLUENCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONFLUENCE" + } + }, + "SALESFORCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SALESFORCE" + } + }, + "SHAREPOINT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SHAREPOINT" } } - }, + } + }, + "com.amazonaws.bedrockagent#DateTimestamp": { + "type": "timestamp", "traits": { - "smithy.api#output": {} + "smithy.api#timestampFormat": "date-time" } }, - "com.amazonaws.bedrockagent#GetAgentAlias": { + "com.amazonaws.bedrockagent#DeleteAgent": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#GetAgentAliasRequest" + "target": "com.amazonaws.bedrockagent#DeleteAgentRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#GetAgentAliasResponse" + "target": "com.amazonaws.bedrockagent#DeleteAgentResponse" }, "errors": [ { "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, { "target": "com.amazonaws.bedrockagent#InternalServerException" }, @@ -3858,69 +4090,33 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about an alias of an agent.

", + "smithy.api#documentation": "

Deletes an agent.

", "smithy.api#http": { - "code": 200, - "method": "GET", - "uri": "/agents/{agentId}/agentaliases/{agentAliasId}/" + "code": 202, + "method": "DELETE", + "uri": "/agents/{agentId}/" }, - "smithy.api#readonly": {}, + "smithy.api#idempotent": {}, "smithy.api#tags": [ "console" ] } }, - "com.amazonaws.bedrockagent#GetAgentAliasRequest": { - "type": "structure", - "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", - "traits": { - "smithy.api#documentation": "

The unique identifier of the agent to which the alias to get information belongs.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "agentAliasId": { - "target": "com.amazonaws.bedrockagent#AgentAliasId", - "traits": { - "smithy.api#documentation": "

The unique identifier of the alias for which to get information.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.bedrockagent#GetAgentAliasResponse": { - "type": "structure", - "members": { - "agentAlias": { - "target": "com.amazonaws.bedrockagent#AgentAlias", - "traits": { - "smithy.api#documentation": "

Contains information about the alias.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.bedrockagent#GetAgentKnowledgeBase": { + "com.amazonaws.bedrockagent#DeleteAgentActionGroup": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#GetAgentKnowledgeBaseRequest" + "target": "com.amazonaws.bedrockagent#DeleteAgentActionGroupRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#GetAgentKnowledgeBaseResponse" + "target": "com.amazonaws.bedrockagent#DeleteAgentActionGroupResponse" }, "errors": [ { "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, { "target": "com.amazonaws.bedrockagent#InternalServerException" }, @@ -3935,74 +4131,51 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about a knowledge base associated with an agent.

", + "smithy.api#documentation": "

Deletes an action group in an agent.

", "smithy.api#http": { - "code": 200, - "method": "GET", - "uri": "/agents/{agentId}/agentversions/{agentVersion}/knowledgebases/{knowledgeBaseId}/" + "code": 204, + "method": "DELETE", + "uri": "/agents/{agentId}/agentversions/{agentVersion}/actiongroups/{actionGroupId}/" }, - "smithy.api#readonly": {}, + "smithy.api#idempotent": {}, "smithy.api#tags": [ "console" ] } }, - "com.amazonaws.bedrockagent#GetAgentKnowledgeBaseRequest": { + "com.amazonaws.bedrockagent#DeleteAgentActionGroupRequest": { "type": "structure", "members": { "agentId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent with which the knowledge base is associated.

", + "smithy.api#documentation": "

The unique identifier of the agent that the action group belongs to.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, "agentVersion": { - "target": "com.amazonaws.bedrockagent#Version", + "target": "com.amazonaws.bedrockagent#DraftVersion", "traits": { - "smithy.api#documentation": "

The version of the agent with which the knowledge base is associated.

", + "smithy.api#documentation": "

The version of the agent that the action group belongs to.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "knowledgeBaseId": { + "actionGroupId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base associated with the agent.

", + "smithy.api#documentation": "

The unique identifier of the action group to delete.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.bedrockagent#GetAgentKnowledgeBaseResponse": { - "type": "structure", - "members": { - "agentKnowledgeBase": { - "target": "com.amazonaws.bedrockagent#AgentKnowledgeBase", - "traits": { - "smithy.api#documentation": "

Contains details about a knowledge base attached to an agent.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.bedrockagent#GetAgentRequest": { - "type": "structure", - "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + }, + "skipResourceInUseCheck": { + "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#default": false, + "smithy.api#documentation": "

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", + "smithy.api#httpQuery": "skipResourceInUseCheck" } } }, @@ -4010,28 +4183,20 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#GetAgentResponse": { + "com.amazonaws.bedrockagent#DeleteAgentActionGroupResponse": { "type": "structure", - "members": { - "agent": { - "target": "com.amazonaws.bedrockagent#Agent", - "traits": { - "smithy.api#documentation": "

Contains details about the agent.

", - "smithy.api#required": {} - } - } - }, + "members": {}, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#GetAgentVersion": { + "com.amazonaws.bedrockagent#DeleteAgentAlias": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#GetAgentVersionRequest" + "target": "com.amazonaws.bedrockagent#DeleteAgentAliasRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#GetAgentVersionResponse" + "target": "com.amazonaws.bedrockagent#DeleteAgentAliasResponse" }, "errors": [ { @@ -4051,33 +4216,33 @@ } ], "traits": { - "smithy.api#documentation": "

Gets details about a version of an agent.

", + "smithy.api#documentation": "

Deletes an alias of an agent.

", "smithy.api#http": { - "code": 200, - "method": "GET", - "uri": "/agents/{agentId}/agentversions/{agentVersion}/" + "code": 202, + "method": "DELETE", + "uri": "/agents/{agentId}/agentaliases/{agentAliasId}/" }, - "smithy.api#readonly": {}, + "smithy.api#idempotent": {}, "smithy.api#tags": [ "console" ] } }, - "com.amazonaws.bedrockagent#GetAgentVersionRequest": { + "com.amazonaws.bedrockagent#DeleteAgentAliasRequest": { "type": "structure", "members": { "agentId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent.

", + "smithy.api#documentation": "

The unique identifier of the agent that the alias belongs to.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "agentVersion": { - "target": "com.amazonaws.bedrockagent#NumericalVersion", + "agentAliasId": { + "target": "com.amazonaws.bedrockagent#AgentAliasId", "traits": { - "smithy.api#documentation": "

The version of the agent.

", + "smithy.api#documentation": "

The unique identifier of the alias to delete.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4087,13 +4252,27 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#GetAgentVersionResponse": { + "com.amazonaws.bedrockagent#DeleteAgentAliasResponse": { "type": "structure", "members": { - "agentVersion": { - "target": "com.amazonaws.bedrockagent#AgentVersion", + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

Contains details about the version of the agent.

", + "smithy.api#documentation": "

The unique identifier of the agent that the alias belongs to.

", + "smithy.api#required": {} + } + }, + "agentAliasId": { + "target": "com.amazonaws.bedrockagent#AgentAliasId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the alias that was deleted.

", + "smithy.api#required": {} + } + }, + "agentAliasStatus": { + "target": "com.amazonaws.bedrockagent#AgentAliasStatus", + "traits": { + "smithy.api#documentation": "

The status of the alias.

", "smithy.api#required": {} } } @@ -4102,18 +4281,67 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#GetDataSource": { + "com.amazonaws.bedrockagent#DeleteAgentRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "skipResourceInUseCheck": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", + "smithy.api#httpQuery": "skipResourceInUseCheck" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#DeleteAgentResponse": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent that was deleted.

", + "smithy.api#required": {} + } + }, + "agentStatus": { + "target": "com.amazonaws.bedrockagent#AgentStatus", + "traits": { + "smithy.api#documentation": "

The status of the agent.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#DeleteAgentVersion": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#GetDataSourceRequest" + "target": "com.amazonaws.bedrockagent#DeleteAgentVersionRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#GetDataSourceResponse" + "target": "com.amazonaws.bedrockagent#DeleteAgentVersionResponse" }, "errors": [ { "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, { "target": "com.amazonaws.bedrockagent#InternalServerException" }, @@ -4128,49 +4356,71 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about a data source.

", + "smithy.api#documentation": "

Deletes a version of an agent.

", "smithy.api#http": { - "code": 200, - "method": "GET", - "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}" + "code": 202, + "method": "DELETE", + "uri": "/agents/{agentId}/agentversions/{agentVersion}/" }, - "smithy.api#readonly": {}, + "smithy.api#idempotent": {}, "smithy.api#tags": [ "console" ] } }, - "com.amazonaws.bedrockagent#GetDataSourceRequest": { + "com.amazonaws.bedrockagent#DeleteAgentVersionRequest": { "type": "structure", "members": { - "knowledgeBaseId": { + "agentId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base that the data source was added to.

", + "smithy.api#documentation": "

The unique identifier of the agent that the version belongs to.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "dataSourceId": { - "target": "com.amazonaws.bedrockagent#Id", + "agentVersion": { + "target": "com.amazonaws.bedrockagent#NumericalVersion", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source.

", + "smithy.api#documentation": "

The version of the agent to delete.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "skipResourceInUseCheck": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", + "smithy.api#httpQuery": "skipResourceInUseCheck" + } } }, "traits": { "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#GetDataSourceResponse": { + "com.amazonaws.bedrockagent#DeleteAgentVersionResponse": { "type": "structure", "members": { - "dataSource": { - "target": "com.amazonaws.bedrockagent#DataSource", + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

Contains details about the data source.

", + "smithy.api#documentation": "

The unique identifier of the agent that the version belongs to.

", + "smithy.api#required": {} + } + }, + "agentVersion": { + "target": "com.amazonaws.bedrockagent#NumericalVersion", + "traits": { + "smithy.api#documentation": "

The version that was deleted.

", + "smithy.api#required": {} + } + }, + "agentStatus": { + "target": "com.amazonaws.bedrockagent#AgentStatus", + "traits": { + "smithy.api#documentation": "

The status of the agent version.

", "smithy.api#required": {} } } @@ -4179,18 +4429,21 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#GetIngestionJob": { + "com.amazonaws.bedrockagent#DeleteDataSource": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#GetIngestionJobRequest" + "target": "com.amazonaws.bedrockagent#DeleteDataSourceRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#GetIngestionJobResponse" + "target": "com.amazonaws.bedrockagent#DeleteDataSourceResponse" }, "errors": [ { "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, { "target": "com.amazonaws.bedrockagent#InternalServerException" }, @@ -4205,25 +4458,25 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about a ingestion job, in which a data source is added to a knowledge base.

", + "smithy.api#documentation": "

Deletes a data source from a knowledge base.

", "smithy.api#http": { - "code": 200, - "method": "GET", - "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}/ingestionjobs/{ingestionJobId}" + "code": 202, + "method": "DELETE", + "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}" }, - "smithy.api#readonly": {}, + "smithy.api#idempotent": {}, "smithy.api#tags": [ "console" ] } }, - "com.amazonaws.bedrockagent#GetIngestionJobRequest": { + "com.amazonaws.bedrockagent#DeleteDataSourceRequest": { "type": "structure", "members": { "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base for which the ingestion job applies.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base from which to delete the data source.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4231,15 +4484,7 @@ "dataSourceId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source in the ingestion job.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "ingestionJobId": { - "target": "com.amazonaws.bedrockagent#Id", - "traits": { - "smithy.api#documentation": "

The unique identifier of the ingestion job.

", + "smithy.api#documentation": "

The unique identifier of the data source to delete.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4249,13 +4494,27 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#GetIngestionJobResponse": { + "com.amazonaws.bedrockagent#DeleteDataSourceResponse": { "type": "structure", "members": { - "ingestionJob": { - "target": "com.amazonaws.bedrockagent#IngestionJob", + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

Contains details about the ingestion job.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source that was deleted belonged.

", + "smithy.api#required": {} + } + }, + "dataSourceId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the data source that was deleted.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source.

", "smithy.api#required": {} } } @@ -4264,18 +4523,21 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#GetKnowledgeBase": { + "com.amazonaws.bedrockagent#DeleteFlow": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#GetKnowledgeBaseRequest" + "target": "com.amazonaws.bedrockagent#DeleteFlowRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#GetKnowledgeBaseResponse" + "target": "com.amazonaws.bedrockagent#DeleteFlowResponse" }, "errors": [ { "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, { "target": "com.amazonaws.bedrockagent#InternalServerException" }, @@ -4290,25 +4552,74 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about a knoweldge base.

", + "smithy.api#documentation": "

Deletes a flow.

", "smithy.api#http": { "code": 200, - "method": "GET", - "uri": "/knowledgebases/{knowledgeBaseId}" + "method": "DELETE", + "uri": "/flows/{flowIdentifier}/" }, - "smithy.api#readonly": {}, + "smithy.api#idempotent": {}, "smithy.api#tags": [ "console" ] } }, - "com.amazonaws.bedrockagent#GetKnowledgeBaseRequest": { + "com.amazonaws.bedrockagent#DeleteFlowAlias": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#DeleteFlowAliasRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#DeleteFlowAliasResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an alias of a flow.

", + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/flows/{flowIdentifier}/aliases/{aliasIdentifier}" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#DeleteFlowAliasRequest": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base for which to get information.

", + "smithy.api#documentation": "

The unique identifier of the flow that the alias belongs to.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "aliasIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowAliasIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the alias to be deleted.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4318,13 +4629,20 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#GetKnowledgeBaseResponse": { + "com.amazonaws.bedrockagent#DeleteFlowAliasResponse": { "type": "structure", "members": { - "knowledgeBase": { - "target": "com.amazonaws.bedrockagent#KnowledgeBase", + "flowId": { + "target": "com.amazonaws.bedrockagent#FlowId", "traits": { - "smithy.api#documentation": "

Contains details about the knowledge base.

", + "smithy.api#documentation": "

The unique identifier of the flow that the alias belongs to.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.bedrockagent#FlowAliasId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", "smithy.api#required": {} } } @@ -4333,1008 +4651,5497 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#GuardrailConfiguration": { + "com.amazonaws.bedrockagent#DeleteFlowRequest": { "type": "structure", "members": { - "guardrailIdentifier": { - "target": "com.amazonaws.bedrockagent#GuardrailIdentifier", + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", "traits": { - "smithy.api#documentation": "

The guardrails identifier assigned to the guardrails configuration.

" + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "guardrailVersion": { - "target": "com.amazonaws.bedrockagent#GuardrailVersion", + "skipResourceInUseCheck": { + "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

The guardrails version assigned to the guardrails configuration.

" + "smithy.api#default": false, + "smithy.api#documentation": "

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", + "smithy.api#httpQuery": "skipResourceInUseCheck" } } }, "traits": { - "smithy.api#documentation": "

The details of the guardrails configuration.

" - } - }, - "com.amazonaws.bedrockagent#GuardrailIdentifier": { - "type": "string", - "traits": { - "smithy.api#length": { - "max": 2048 - }, - "smithy.api#pattern": "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$" + "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#GuardrailVersion": { - "type": "string", + "com.amazonaws.bedrockagent#DeleteFlowResponse": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.bedrockagent#FlowId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#required": {} + } + } + }, "traits": { - "smithy.api#pattern": "^(([0-9]{1,8})|(DRAFT))$" + "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#Id": { - "type": "string", + "com.amazonaws.bedrockagent#DeleteFlowVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#DeleteFlowVersionRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#DeleteFlowVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], "traits": { - "smithy.api#pattern": "^[0-9a-zA-Z]{10}$" + "smithy.api#documentation": "

Deletes a version of a flow.

", + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/flows/{flowIdentifier}/versions/{flowVersion}/" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "console" + ] } }, - "com.amazonaws.bedrockagent#InferenceConfiguration": { + "com.amazonaws.bedrockagent#DeleteFlowVersionRequest": { "type": "structure", "members": { - "temperature": { - "target": "com.amazonaws.bedrockagent#Temperature", - "traits": { - "smithy.api#documentation": "

The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options.

" - } - }, - "topP": { - "target": "com.amazonaws.bedrockagent#TopP", - "traits": { - "smithy.api#documentation": "

While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for Top P determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topP to 80, the model only selects the next token from the top 80% of the probability distribution of next tokens.

" - } - }, - "topK": { - "target": "com.amazonaws.bedrockagent#TopK", + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", "traits": { - "smithy.api#documentation": "

While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for topK is the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topK to 50, the model selects the next token from among the top 50 most likely choices.

" + "smithy.api#documentation": "

The unique identifier of the flow whose version that you want to delete

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "maximumLength": { - "target": "com.amazonaws.bedrockagent#MaximumLength", + "flowVersion": { + "target": "com.amazonaws.bedrockagent#NumericalVersion", "traits": { - "smithy.api#documentation": "

The maximum number of tokens to allow in the generated response.

" + "smithy.api#documentation": "

The version of the flow that you want to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "stopSequences": { - "target": "com.amazonaws.bedrockagent#StopSequences", + "skipResourceInUseCheck": { + "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating the response.

" + "smithy.api#default": false, + "smithy.api#documentation": "

By default, this value is false and deletion is stopped if the resource is in use. If you set it to true, the resource will be deleted even if the resource is in use.

", + "smithy.api#httpQuery": "skipResourceInUseCheck" } } }, "traits": { - "smithy.api#documentation": "

Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the promptType. For more information, see Inference parameters for foundation models.

" + "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#IngestionJob": { + "com.amazonaws.bedrockagent#DeleteFlowVersionResponse": { "type": "structure", "members": { - "knowledgeBaseId": { + "id": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source is being added.

", + "smithy.api#documentation": "

The unique identifier of the flow.

", "smithy.api#required": {} } }, - "dataSourceId": { - "target": "com.amazonaws.bedrockagent#Id", + "version": { + "target": "com.amazonaws.bedrockagent#NumericalVersion", "traits": { - "smithy.api#documentation": "

The unique identifier of the ingested data source.

", + "smithy.api#documentation": "

The version of the flow being deleted.

", "smithy.api#required": {} } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#DeleteKnowledgeBase": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#DeleteKnowledgeBaseRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#DeleteKnowledgeBaseResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, - "ingestionJobId": { + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a knowledge base. Before deleting a knowledge base, you should disassociate the knowledge base from any agents that it is associated with by making a DisassociateAgentKnowledgeBase request.

", + "smithy.api#http": { + "code": 202, + "method": "DELETE", + "uri": "/knowledgebases/{knowledgeBaseId}" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#DeleteKnowledgeBaseRequest": { + "type": "structure", + "members": { + "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the ingestion job.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base to delete.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } - }, - "description": { - "target": "com.amazonaws.bedrockagent#Description", + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#DeleteKnowledgeBaseResponse": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The description of the ingestion job.

" + "smithy.api#documentation": "

The unique identifier of the knowledge base that was deleted.

", + "smithy.api#required": {} } }, "status": { - "target": "com.amazonaws.bedrockagent#IngestionJobStatus", + "target": "com.amazonaws.bedrockagent#KnowledgeBaseStatus", "traits": { - "smithy.api#documentation": "

The status of the ingestion job.

", + "smithy.api#documentation": "

The status of the knowledge base and whether it has been successfully deleted.

", "smithy.api#required": {} } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#DeletePrompt": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#DeletePromptRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#DeletePromptResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, - "statistics": { - "target": "com.amazonaws.bedrockagent#IngestionJobStatistics", - "traits": { - "smithy.api#documentation": "

Contains statistics about the ingestion job.

" - } + { + "target": "com.amazonaws.bedrockagent#ConflictException" }, - "failureReasons": { - "target": "com.amazonaws.bedrockagent#FailureReasons", - "traits": { - "smithy.api#documentation": "

A list of reasons that the ingestion job failed.

" - } + { + "target": "com.amazonaws.bedrockagent#InternalServerException" }, - "startedAt": { - "target": "com.amazonaws.bedrockagent#DateTimestamp", + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a prompt or a prompt version from the Prompt management tool. For more information, see Delete prompts from the Prompt management tool and Delete a version of a prompt from the Prompt management tool in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/prompts/{promptIdentifier}/" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#DeletePromptRequest": { + "type": "structure", + "members": { + "promptIdentifier": { + "target": "com.amazonaws.bedrockagent#PromptIdentifier", "traits": { - "smithy.api#documentation": "

The time at which the ingestion job started.

", + "smithy.api#documentation": "

The unique identifier of the prompt.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "updatedAt": { - "target": "com.amazonaws.bedrockagent#DateTimestamp", + "promptVersion": { + "target": "com.amazonaws.bedrockagent#NumericalVersion", "traits": { - "smithy.api#documentation": "

The time at which the ingestion job was last updated.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The version of the prompt to delete.

", + "smithy.api#httpQuery": "promptVersion" } } }, "traits": { - "smithy.api#documentation": "

Contains details about an ingestion job, which converts a data source to embeddings for a vector store in knowledge base.

\n

This data type is used in the following API operations:

\n " + "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#IngestionJobFilter": { + "com.amazonaws.bedrockagent#DeletePromptResponse": { "type": "structure", "members": { - "attribute": { - "target": "com.amazonaws.bedrockagent#IngestionJobFilterAttribute", + "id": { + "target": "com.amazonaws.bedrockagent#PromptId", "traits": { - "smithy.api#documentation": "

The attribute by which to filter the results.

", + "smithy.api#documentation": "

The unique identifier of the prompt that was deleted.

", "smithy.api#required": {} } }, - "operator": { - "target": "com.amazonaws.bedrockagent#IngestionJobFilterOperator", + "version": { + "target": "com.amazonaws.bedrockagent#NumericalVersion", "traits": { - "smithy.api#documentation": "

The operation to carry out between the attribute and the values.

", - "smithy.api#required": {} - } - }, - "values": { - "target": "com.amazonaws.bedrockagent#IngestionJobFilterValues", - "traits": { - "smithy.api#documentation": "

A list of values for the attribute.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The version of the prompt that was deleted.

" } } }, "traits": { - "smithy.api#documentation": "

Defines a filter by which to filter the results.

" + "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#IngestionJobFilterAttribute": { - "type": "enum", + "com.amazonaws.bedrockagent#Description": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + } + } + }, + "com.amazonaws.bedrockagent#Dimensions": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 4096 + } + } + }, + "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBase": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBaseRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBaseResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Disassociates a knowledge base from an agent.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/agents/{agentId}/agentversions/{agentVersion}/knowledgebases/{knowledgeBaseId}/" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBaseRequest": { + "type": "structure", "members": { - "STATUS": { - "target": "smithy.api#Unit", + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#enumValue": "STATUS" + "smithy.api#documentation": "

The unique identifier of the agent from which to disassociate the knowledge base.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentVersion": { + "target": "com.amazonaws.bedrockagent#DraftVersion", + "traits": { + "smithy.api#documentation": "

The version of the agent from which to disassociate the knowledge base.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base to disassociate.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#IngestionJobFilterOperator": { - "type": "enum", + "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBaseResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#DraftVersion": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 5, + "max": 5 + }, + "smithy.api#pattern": "^DRAFT$" + } + }, + "com.amazonaws.bedrockagent#EmbeddingModelConfiguration": { + "type": "structure", "members": { - "EQ": { - "target": "smithy.api#Unit", + "bedrockEmbeddingModelConfiguration": { + "target": "com.amazonaws.bedrockagent#BedrockEmbeddingModelConfiguration", "traits": { - "smithy.api#enumValue": "EQ" + "smithy.api#documentation": "

The vector configuration details on the Bedrock embeddings model.

" } } + }, + "traits": { + "smithy.api#documentation": "

The configuration details for the embeddings model.

" } }, - "com.amazonaws.bedrockagent#IngestionJobFilterValue": { + "com.amazonaws.bedrockagent#EnabledMemoryTypes": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#MemoryType" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.bedrockagent#FailureReason": { "type": "string", "traits": { "smithy.api#length": { - "min": 0, - "max": 100 - }, - "smithy.api#pattern": "^.*$" + "max": 2048 + } } }, - "com.amazonaws.bedrockagent#IngestionJobFilterValues": { + "com.amazonaws.bedrockagent#FailureReasons": { "type": "list", "member": { - "target": "com.amazonaws.bedrockagent#IngestionJobFilterValue" + "target": "com.amazonaws.bedrockagent#FailureReason" }, "traits": { "smithy.api#length": { - "min": 0, - "max": 10 + "max": 2048 } } }, - "com.amazonaws.bedrockagent#IngestionJobFilters": { + "com.amazonaws.bedrockagent#FieldName": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^.*$" + } + }, + "com.amazonaws.bedrockagent#FilterList": { "type": "list", "member": { - "target": "com.amazonaws.bedrockagent#IngestionJobFilter" + "target": "com.amazonaws.bedrockagent#FilterPattern" }, "traits": { "smithy.api#length": { "min": 1, - "max": 1 - } + "max": 25 + }, + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagent#IngestionJobResource": { - "type": "resource", - "operations": [ - { - "target": "com.amazonaws.bedrockagent#GetIngestionJob" + "com.amazonaws.bedrockagent#FilterPattern": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 }, - { - "target": "com.amazonaws.bedrockagent#ListIngestionJobs" + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagent#FilteredObjectType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 }, - { - "target": "com.amazonaws.bedrockagent#StartIngestionJob" - } - ] + "smithy.api#sensitive": {} + } }, - "com.amazonaws.bedrockagent#IngestionJobSortBy": { + "com.amazonaws.bedrockagent#FixedSizeChunkingConfiguration": { "type": "structure", "members": { - "attribute": { - "target": "com.amazonaws.bedrockagent#IngestionJobSortByAttribute", + "maxTokens": { + "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The attribute by which to sort the results.

", + "smithy.api#documentation": "

The maximum number of tokens to include in a chunk.

", + "smithy.api#range": { + "min": 1 + }, "smithy.api#required": {} } }, - "order": { - "target": "com.amazonaws.bedrockagent#SortOrder", + "overlapPercentage": { + "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The order by which to sort the results.

", + "smithy.api#documentation": "

The percentage of overlap between adjacent chunks of a data source.

", + "smithy.api#range": { + "min": 1, + "max": 99 + }, "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Parameters by which to sort the results.

" + "smithy.api#documentation": "

Configurations for when you choose fixed-size chunking. If you set the chunkingStrategy as NONE, exclude this field.

" } }, - "com.amazonaws.bedrockagent#IngestionJobSortByAttribute": { - "type": "enum", - "members": { - "STATUS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STATUS" - } + "com.amazonaws.bedrockagent#FlowAliasArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/(TSTALIASID|[0-9a-zA-Z]{10})$" + } + }, + "com.amazonaws.bedrockagent#FlowAliasId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(TSTALIASID|[0-9a-zA-Z]{10})$" + } + }, + "com.amazonaws.bedrockagent#FlowAliasIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/[0-9a-zA-Z]{10})|(TSTALIASID|[0-9a-zA-Z]{10})$" + } + }, + "com.amazonaws.bedrockagent#FlowAliasResource": { + "type": "resource", + "identifiers": { + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier" }, - "STARTED_AT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STARTED_AT" - } + "aliasIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowAliasIdentifier" + } + }, + "create": { + "target": "com.amazonaws.bedrockagent#CreateFlowAlias" + }, + "read": { + "target": "com.amazonaws.bedrockagent#GetFlowAlias" + }, + "update": { + "target": "com.amazonaws.bedrockagent#UpdateFlowAlias" + }, + "delete": { + "target": "com.amazonaws.bedrockagent#DeleteFlowAlias" + }, + "list": { + "target": "com.amazonaws.bedrockagent#ListFlowAliases" + }, + "traits": { + "aws.api#arn": { + "template": "flow/{flowIdentifier}/alias/{aliasIdentifier}" + }, + "aws.cloudformation#cfnResource": { + "name": "FlowAlias" } } }, - "com.amazonaws.bedrockagent#IngestionJobStatistics": { - "type": "structure", - "members": { - "numberOfDocumentsScanned": { - "target": "smithy.api#PrimitiveLong", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The total number of source documents that were scanned. Includes new, updated, and unchanged documents.

" - } - }, - "numberOfMetadataDocumentsScanned": { - "target": "smithy.api#PrimitiveLong", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The total number of metadata files that were scanned. Includes new, updated, and unchanged files.

" - } - }, - "numberOfNewDocumentsIndexed": { - "target": "smithy.api#PrimitiveLong", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The number of new source documents in the data source that were successfully indexed.

" - } - }, - "numberOfModifiedDocumentsIndexed": { - "target": "smithy.api#PrimitiveLong", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The number of modified source documents in the data source that were successfully indexed.

" - } - }, - "numberOfMetadataDocumentsModified": { - "target": "smithy.api#PrimitiveLong", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The number of metadata files that were updated or deleted.

" - } - }, - "numberOfDocumentsDeleted": { - "target": "smithy.api#PrimitiveLong", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The number of source documents that was deleted.

" - } - }, - "numberOfDocumentsFailed": { - "target": "smithy.api#PrimitiveLong", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The number of source documents that failed to be ingested.

" - } - } + "com.amazonaws.bedrockagent#FlowAliasRoutingConfiguration": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#FlowAliasRoutingConfigurationListItem" }, "traits": { - "smithy.api#documentation": "

Contains the statistics for the ingestion job.

" + "smithy.api#length": { + "min": 1, + "max": 1 + } } }, - "com.amazonaws.bedrockagent#IngestionJobStatus": { - "type": "enum", + "com.amazonaws.bedrockagent#FlowAliasRoutingConfigurationListItem": { + "type": "structure", "members": { - "STARTING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STARTING" - } - }, - "IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IN_PROGRESS" - } - }, - "COMPLETE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "COMPLETE" - } - }, - "FAILED": { - "target": "smithy.api#Unit", + "flowVersion": { + "target": "com.amazonaws.bedrockagent#Version", "traits": { - "smithy.api#enumValue": "FAILED" + "smithy.api#documentation": "

The version that the alias maps to.

" } } + }, + "traits": { + "smithy.api#documentation": "

Contains information about a version that the alias maps to.

" } }, - "com.amazonaws.bedrockagent#IngestionJobSummaries": { + "com.amazonaws.bedrockagent#FlowAliasSummaries": { "type": "list", "member": { - "target": "com.amazonaws.bedrockagent#IngestionJobSummary" + "target": "com.amazonaws.bedrockagent#FlowAliasSummary" + }, + "traits": { + "smithy.api#length": { + "max": 10 + } } }, - "com.amazonaws.bedrockagent#IngestionJobSummary": { + "com.amazonaws.bedrockagent#FlowAliasSummary": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "name": { + "target": "com.amazonaws.bedrockagent#Name", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source is added.

", + "smithy.api#documentation": "

The name of the alias.

", "smithy.api#required": {} } }, - "dataSourceId": { - "target": "com.amazonaws.bedrockagent#Id", + "description": { + "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source in the ingestion job.

", + "smithy.api#documentation": "

A description of the alias.

" + } + }, + "routingConfiguration": { + "target": "com.amazonaws.bedrockagent#FlowAliasRoutingConfiguration", + "traits": { + "smithy.api#documentation": "

A list of configurations about the versions that the alias maps to. Currently, you can only specify one.

", "smithy.api#required": {} } }, - "ingestionJobId": { - "target": "com.amazonaws.bedrockagent#Id", + "flowId": { + "target": "com.amazonaws.bedrockagent#FlowId", "traits": { - "smithy.api#documentation": "

The unique identifier of the ingestion job.

", + "smithy.api#documentation": "

The unique identifier of the flow.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.bedrockagent#Description", + "id": { + "target": "com.amazonaws.bedrockagent#FlowAliasId", "traits": { - "smithy.api#documentation": "

The description of the ingestion job.

" + "smithy.api#documentation": "

The unique identifier of the alias of the flow.

", + "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.bedrockagent#IngestionJobStatus", + "arn": { + "target": "com.amazonaws.bedrockagent#FlowAliasArn", "traits": { - "smithy.api#documentation": "

The status of the ingestion job.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the flow alias.

", "smithy.api#required": {} } }, - "startedAt": { + "createdAt": { "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The time at which the ingestion job was started.

", + "smithy.api#documentation": "

The time at which the alias was created.

", "smithy.api#required": {} } }, "updatedAt": { "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The time at which the ingestion job was last updated.

", + "smithy.api#documentation": "

The time at which the alias was last updated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about an alias of a flow.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagent#FlowArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}$" + } + }, + "com.amazonaws.bedrockagent#FlowCondition": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockagent#FlowConditionName", + "traits": { + "smithy.api#documentation": "

A name for the condition that you can reference.

", "smithy.api#required": {} } }, - "statistics": { - "target": "com.amazonaws.bedrockagent#IngestionJobStatistics", + "expression": { + "target": "com.amazonaws.bedrockagent#FlowConditionExpression", "traits": { - "smithy.api#documentation": "

Contains statistics for the ingestion job.

" + "smithy.api#documentation": "

Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in Node types in prompt flows.

" } } }, "traits": { - "smithy.api#documentation": "

Contains details about an ingestion job.

" + "smithy.api#documentation": "

Defines a condition in the condition node.

", + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagent#Instruction": { + "com.amazonaws.bedrockagent#FlowConditionExpression": { "type": "string", "traits": { "smithy.api#length": { - "min": 40, - "max": 4000 + "min": 1, + "max": 64 }, "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagent#InternalServerException": { + "com.amazonaws.bedrockagent#FlowConditionName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$" + } + }, + "com.amazonaws.bedrockagent#FlowConditionalConnectionConfiguration": { "type": "structure", "members": { - "message": { - "target": "com.amazonaws.bedrockagent#NonBlankString" + "condition": { + "target": "com.amazonaws.bedrockagent#FlowConditionName", + "traits": { + "smithy.api#documentation": "

The condition that triggers this connection. For more information about how to write conditions, see the Condition node type in the Node types topic in the Amazon Bedrock User Guide.

", + "smithy.api#required": {} + } } }, "traits": { - "smithy.api#documentation": "

An internal server error occurred. Retry your request.

", - "smithy.api#error": "server", - "smithy.api#httpError": 500 + "smithy.api#documentation": "

The configuration of a connection between a condition node and another node.

" } }, - "com.amazonaws.bedrockagent#KmsKeyArn": { - "type": "string", + "com.amazonaws.bedrockagent#FlowConditions": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#FlowCondition" + }, "traits": { "smithy.api#length": { "min": 1, - "max": 2048 + "max": 5 }, - "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$" + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagent#KnowledgeBase": { + "com.amazonaws.bedrockagent#FlowConnection": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "type": { + "target": "com.amazonaws.bedrockagent#FlowConnectionType", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base.

", + "smithy.api#documentation": "

Whether the source node that the connection begins from is a condition node (Conditional) or not (Data).

", "smithy.api#required": {} } }, "name": { - "target": "com.amazonaws.bedrockagent#Name", - "traits": { - "smithy.api#documentation": "

The name of the knowledge base.

", - "smithy.api#required": {} - } - }, - "knowledgeBaseArn": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseArn", + "target": "com.amazonaws.bedrockagent#FlowConnectionName", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", + "smithy.api#documentation": "

A name for the connection that you can reference.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.bedrockagent#Description", - "traits": { - "smithy.api#documentation": "

The description of the knowledge base.

" - } - }, - "roleArn": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseRoleArn", + "source": { + "target": "com.amazonaws.bedrockagent#FlowNodeName", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base.

", + "smithy.api#documentation": "

The node that the connection starts at.

", "smithy.api#required": {} } }, - "knowledgeBaseConfiguration": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseConfiguration", + "target": { + "target": "com.amazonaws.bedrockagent#FlowNodeName", "traits": { - "smithy.api#documentation": "

Contains details about the embeddings configuration of the knowledge base.

", + "smithy.api#documentation": "

The node that the connection ends at.

", "smithy.api#required": {} } }, - "storageConfiguration": { - "target": "com.amazonaws.bedrockagent#StorageConfiguration", + "configuration": { + "target": "com.amazonaws.bedrockagent#FlowConnectionConfiguration", "traits": { - "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The configuration of the connection.

" } - }, - "status": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseStatus", + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about a connection between two nodes in the flow.

" + } + }, + "com.amazonaws.bedrockagent#FlowConnectionConfiguration": { + "type": "union", + "members": { + "data": { + "target": "com.amazonaws.bedrockagent#FlowDataConnectionConfiguration", "traits": { - "smithy.api#documentation": "

The status of the knowledge base. The following statuses are possible:

\n
    \n
  • \n

    CREATING – The knowledge base is being created.

    \n
  • \n
  • \n

    ACTIVE – The knowledge base is ready to be queried.

    \n
  • \n
  • \n

    DELETING – The knowledge base is being deleted.

    \n
  • \n
  • \n

    UPDATING – The knowledge base is being updated.

    \n
  • \n
  • \n

    FAILED – The knowledge base API operation failed.

    \n
  • \n
", - "smithy.api#required": {} + "smithy.api#documentation": "

The configuration of a connection originating from a node that isn't a Condition node.

" } }, - "createdAt": { - "target": "com.amazonaws.bedrockagent#DateTimestamp", + "conditional": { + "target": "com.amazonaws.bedrockagent#FlowConditionalConnectionConfiguration", "traits": { - "smithy.api#documentation": "

The time at which the knowledge base was created.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The configuration of a connection originating from a Condition node.

" } - }, - "updatedAt": { - "target": "com.amazonaws.bedrockagent#DateTimestamp", + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of the connection.

" + } + }, + "com.amazonaws.bedrockagent#FlowConnectionName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,100}$" + } + }, + "com.amazonaws.bedrockagent#FlowConnectionType": { + "type": "enum", + "members": { + "DATA": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The time at which the knowledge base was last updated.

", - "smithy.api#required": {} + "smithy.api#enumValue": "Data" } }, - "failureReasons": { - "target": "com.amazonaws.bedrockagent#FailureReasons", + "CONDITIONAL": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

A list of reasons that the API operation on the knowledge base failed.

" + "smithy.api#enumValue": "Conditional" } } - }, - "traits": { - "smithy.api#documentation": "

Contains information about a knowledge base.

" } }, - "com.amazonaws.bedrockagent#KnowledgeBaseArn": { - "type": "string", + "com.amazonaws.bedrockagent#FlowConnections": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#FlowConnection" + }, "traits": { "smithy.api#length": { - "max": 128 - }, - "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):bedrock:[a-zA-Z0-9-]*:[0-9]{12}:knowledge-base/[0-9a-zA-Z]+$" + "max": 20 + } } }, - "com.amazonaws.bedrockagent#KnowledgeBaseConfiguration": { + "com.amazonaws.bedrockagent#FlowDataConnectionConfiguration": { "type": "structure", "members": { - "type": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseType", + "sourceOutput": { + "target": "com.amazonaws.bedrockagent#FlowNodeOutputName", "traits": { - "smithy.api#documentation": "

The type of data that the data source is converted into for the knowledge base.

", + "smithy.api#documentation": "

The name of the output in the source node that the connection begins from.

", "smithy.api#required": {} } }, - "vectorKnowledgeBaseConfiguration": { - "target": "com.amazonaws.bedrockagent#VectorKnowledgeBaseConfiguration", + "targetInput": { + "target": "com.amazonaws.bedrockagent#FlowNodeInputName", "traits": { - "smithy.api#documentation": "

Contains details about the embeddings model that'sused to convert the data source.

" + "smithy.api#documentation": "

The name of the input in the target node that the connection ends at.

", + "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Contains details about the embeddings configuration of the knowledge base.

" + "smithy.api#documentation": "

The configuration of a connection originating from a node that isn't a Condition node.

" } }, - "com.amazonaws.bedrockagent#KnowledgeBaseResource": { - "type": "resource", - "operations": [ - { - "target": "com.amazonaws.bedrockagent#AssociateAgentKnowledgeBase" - }, - { - "target": "com.amazonaws.bedrockagent#CreateKnowledgeBase" - }, - { - "target": "com.amazonaws.bedrockagent#DeleteKnowledgeBase" - }, - { - "target": "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBase" - }, - { - "target": "com.amazonaws.bedrockagent#GetAgentKnowledgeBase" - }, - { - "target": "com.amazonaws.bedrockagent#GetKnowledgeBase" - }, - { - "target": "com.amazonaws.bedrockagent#ListAgentKnowledgeBases" - }, - { - "target": "com.amazonaws.bedrockagent#ListKnowledgeBases" - }, - { - "target": "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBase" + "com.amazonaws.bedrockagent#FlowDefinition": { + "type": "structure", + "members": { + "nodes": { + "target": "com.amazonaws.bedrockagent#FlowNodes", + "traits": { + "smithy.api#documentation": "

An array of node definitions in the flow.

" + } }, - { - "target": "com.amazonaws.bedrockagent#UpdateKnowledgeBase" + "connections": { + "target": "com.amazonaws.bedrockagent#FlowConnections", + "traits": { + "smithy.api#documentation": "

An array of connection definitions in the flow.

" + } } - ] + }, + "traits": { + "smithy.api#documentation": "

The definition of the nodes and connections between nodes in the flow.

" + } }, - "com.amazonaws.bedrockagent#KnowledgeBaseRoleArn": { + "com.amazonaws.bedrockagent#FlowDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + } + } + }, + "com.amazonaws.bedrockagent#FlowExecutionRoleArn": { "type": "string", "traits": { "smithy.api#length": { "max": 2048 }, - "smithy.api#pattern": "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/.+$" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/(service-role/)?.+$" } }, - "com.amazonaws.bedrockagent#KnowledgeBaseState": { - "type": "enum", + "com.amazonaws.bedrockagent#FlowId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9a-zA-Z]{10}$" + } + }, + "com.amazonaws.bedrockagent#FlowIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10})|([0-9a-zA-Z]{10})$" + } + }, + "com.amazonaws.bedrockagent#FlowName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9a-zA-Z][_-]?){1,100}$" + } + }, + "com.amazonaws.bedrockagent#FlowNode": { + "type": "structure", "members": { - "ENABLED": { - "target": "smithy.api#Unit", + "name": { + "target": "com.amazonaws.bedrockagent#FlowNodeName", "traits": { - "smithy.api#enumValue": "ENABLED" + "smithy.api#documentation": "

A name for the node.

", + "smithy.api#required": {} } }, - "DISABLED": { - "target": "smithy.api#Unit", + "type": { + "target": "com.amazonaws.bedrockagent#FlowNodeType", "traits": { - "smithy.api#enumValue": "DISABLED" + "smithy.api#documentation": "

The type of node. This value must match the name of the key that you provide in the configuration you provide in the FlowNodeConfiguration field.

", + "smithy.api#required": {} + } + }, + "configuration": { + "target": "com.amazonaws.bedrockagent#FlowNodeConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configurations for the node.

" + } + }, + "inputs": { + "target": "com.amazonaws.bedrockagent#FlowNodeInputs", + "traits": { + "smithy.api#documentation": "

An array of objects, each of which contains information about an input into the node.

" + } + }, + "outputs": { + "target": "com.amazonaws.bedrockagent#FlowNodeOutputs", + "traits": { + "smithy.api#documentation": "

A list of objects, each of which contains information about an output from the node.

" } } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations about a node in the flow.

" } }, - "com.amazonaws.bedrockagent#KnowledgeBaseStatus": { - "type": "enum", + "com.amazonaws.bedrockagent#FlowNodeConfiguration": { + "type": "union", "members": { - "CREATING": { - "target": "smithy.api#Unit", + "input": { + "target": "com.amazonaws.bedrockagent#InputFlowNodeConfiguration", "traits": { - "smithy.api#enumValue": "CREATING" + "smithy.api#documentation": "

Contains configurations for an input flow node in your flow. The first node in the flow. inputs can't be specified for this node.

" } }, - "ACTIVE": { - "target": "smithy.api#Unit", + "output": { + "target": "com.amazonaws.bedrockagent#OutputFlowNodeConfiguration", "traits": { - "smithy.api#enumValue": "ACTIVE" + "smithy.api#documentation": "

Contains configurations for an output flow node in your flow. The last node in the flow. outputs can't be specified for this node.

" } }, - "DELETING": { - "target": "smithy.api#Unit", + "knowledgeBase": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseFlowNodeConfiguration", "traits": { - "smithy.api#enumValue": "DELETING" + "smithy.api#documentation": "

Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response.

" } }, - "UPDATING": { - "target": "smithy.api#Unit", + "condition": { + "target": "com.amazonaws.bedrockagent#ConditionFlowNodeConfiguration", "traits": { - "smithy.api#enumValue": "UPDATING" + "smithy.api#documentation": "

Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow.

" } }, - "FAILED": { - "target": "smithy.api#Unit", + "lex": { + "target": "com.amazonaws.bedrockagent#LexFlowNodeConfiguration", "traits": { - "smithy.api#enumValue": "FAILED" + "smithy.api#documentation": "

Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output.

" } }, - "DELETE_UNSUCCESSFUL": { - "target": "smithy.api#Unit", + "prompt": { + "target": "com.amazonaws.bedrockagent#PromptFlowNodeConfiguration", "traits": { - "smithy.api#enumValue": "DELETE_UNSUCCESSFUL" + "smithy.api#documentation": "

Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node.

" + } + }, + "lambdaFunction": { + "target": "com.amazonaws.bedrockagent#LambdaFunctionFlowNodeConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configurations for a Lambda function node in your flow. Invokes an Lambda function.

" + } + }, + "storage": { + "target": "com.amazonaws.bedrockagent#StorageFlowNodeConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configurations for a Storage node in your flow. Stores an input in an Amazon S3 location.

" + } + }, + "agent": { + "target": "com.amazonaws.bedrockagent#AgentFlowNodeConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configurations for an agent node in your flow. Invokes an alias of an agent and returns the response.

" + } + }, + "retrieval": { + "target": "com.amazonaws.bedrockagent#RetrievalFlowNodeConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configurations for a Retrieval node in your flow. Retrieves data from an Amazon S3 location and returns it as the output.

" + } + }, + "iterator": { + "target": "com.amazonaws.bedrockagent#IteratorFlowNodeConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configurations for an iterator node in your flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output.

\n

The output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node.

" + } + }, + "collector": { + "target": "com.amazonaws.bedrockagent#CollectorFlowNodeConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configurations for a collector node in your flow. Collects an iteration of inputs and consolidates them into an array of outputs.

" } } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations for a node in your flow. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" } }, - "com.amazonaws.bedrockagent#KnowledgeBaseStorageType": { + "com.amazonaws.bedrockagent#FlowNodeIODataType": { "type": "enum", "members": { - "OPENSEARCH_SERVERLESS": { + "STRING": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "OPENSEARCH_SERVERLESS" + "smithy.api#enumValue": "String" } }, - "PINECONE": { + "NUMBER": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "PINECONE" + "smithy.api#enumValue": "Number" } }, - "REDIS_ENTERPRISE_CLOUD": { + "BOOLEAN": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "REDIS_ENTERPRISE_CLOUD" + "smithy.api#enumValue": "Boolean" } }, - "RDS": { + "OBJECT": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RDS" + "smithy.api#enumValue": "Object" } }, - "MONGO_DB_ATLAS": { + "ARRAY": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "MONGO_DB_ATLAS" + "smithy.api#enumValue": "Array" } } } }, - "com.amazonaws.bedrockagent#KnowledgeBaseSummaries": { - "type": "list", - "member": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseSummary" - } - }, - "com.amazonaws.bedrockagent#KnowledgeBaseSummary": { + "com.amazonaws.bedrockagent#FlowNodeInput": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "name": { + "target": "com.amazonaws.bedrockagent#FlowNodeInputName", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base.

", + "smithy.api#documentation": "

A name for the input that you can reference.

", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.bedrockagent#Name", + "type": { + "target": "com.amazonaws.bedrockagent#FlowNodeIODataType", "traits": { - "smithy.api#documentation": "

The name of the knowledge base.

", + "smithy.api#documentation": "

The data type of the input. If the input doesn't match this type at runtime, a validation error will be thrown.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.bedrockagent#Description", + "expression": { + "target": "com.amazonaws.bedrockagent#FlowNodeInputExpression", "traits": { - "smithy.api#documentation": "

The description of the knowledge base.

" + "smithy.api#documentation": "

An expression that formats the input for the node. For an explanation of how to create expressions, see Expressions in Prompt flows in Amazon Bedrock.

", + "smithy.api#required": {} } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations for an input to a node.

" + } + }, + "com.amazonaws.bedrockagent#FlowNodeInputExpression": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 }, - "status": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseStatus", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagent#FlowNodeInputName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$" + } + }, + "com.amazonaws.bedrockagent#FlowNodeInputs": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#FlowNodeInput" + }, + "traits": { + "smithy.api#length": { + "max": 5 + } + } + }, + "com.amazonaws.bedrockagent#FlowNodeName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$" + } + }, + "com.amazonaws.bedrockagent#FlowNodeOutput": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockagent#FlowNodeOutputName", "traits": { - "smithy.api#documentation": "

The status of the knowledge base.

", + "smithy.api#documentation": "

A name for the output that you can reference.

", "smithy.api#required": {} } }, - "updatedAt": { - "target": "com.amazonaws.bedrockagent#DateTimestamp", + "type": { + "target": "com.amazonaws.bedrockagent#FlowNodeIODataType", "traits": { - "smithy.api#documentation": "

The time at which the knowledge base was last updated.

", + "smithy.api#documentation": "

The data type of the output. If the output doesn't match this type at runtime, a validation error will be thrown.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Contains details about a knowledge base.

" + "smithy.api#documentation": "

Contains configurations for an output from a node.

" } }, - "com.amazonaws.bedrockagent#KnowledgeBaseType": { - "type": "enum", - "members": { - "VECTOR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "VECTOR" - } - } + "com.amazonaws.bedrockagent#FlowNodeOutputName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z]([_]?[0-9a-zA-Z]){1,50}$" } }, - "com.amazonaws.bedrockagent#LambdaArn": { - "type": "string", + "com.amazonaws.bedrockagent#FlowNodeOutputs": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#FlowNodeOutput" + }, "traits": { "smithy.api#length": { - "max": 2048 - }, - "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$" + "max": 5 + } } }, - "com.amazonaws.bedrockagent#ListAgentActionGroups": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#ListAgentActionGroupsRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#ListAgentActionGroupsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" + "com.amazonaws.bedrockagent#FlowNodeType": { + "type": "enum", + "members": { + "INPUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Input" + } }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" + "OUTPUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Output" + } }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + "KNOWLEDGE_BASE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "KnowledgeBase" + } }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" + "CONDITION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Condition" + } + }, + "LEX": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Lex" + } + }, + "PROMPT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Prompt" + } + }, + "LAMBDA_FUNCTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LambdaFunction" + } + }, + "STORAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Storage" + } + }, + "AGENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Agent" + } + }, + "RETRIEVAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Retrieval" + } + }, + "ITERATOR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Iterator" + } + }, + "COLLECTOR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Collector" + } + } + } + }, + "com.amazonaws.bedrockagent#FlowNodes": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#FlowNode" + }, + "traits": { + "smithy.api#length": { + "max": 20 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagent#FlowResource": { + "type": "resource", + "identifiers": { + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier" + } + }, + "create": { + "target": "com.amazonaws.bedrockagent#CreateFlow" + }, + "read": { + "target": "com.amazonaws.bedrockagent#GetFlow" + }, + "update": { + "target": "com.amazonaws.bedrockagent#UpdateFlow" + }, + "delete": { + "target": "com.amazonaws.bedrockagent#DeleteFlow" + }, + "list": { + "target": "com.amazonaws.bedrockagent#ListFlows" + }, + "operations": [ + { + "target": "com.amazonaws.bedrockagent#PrepareFlow" + } + ], + "resources": [ + { + "target": "com.amazonaws.bedrockagent#FlowAliasResource" + }, + { + "target": "com.amazonaws.bedrockagent#FlowVersionResource" + } + ], + "traits": { + "aws.cloudformation#cfnResource": { + "name": "Flow" + } + } + }, + "com.amazonaws.bedrockagent#FlowStatus": { + "type": "enum", + "members": { + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + }, + "PREPARED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Prepared" + } + }, + "PREPARING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Preparing" + } + }, + "NOT_PREPARED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotPrepared" + } + } + } + }, + "com.amazonaws.bedrockagent#FlowSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#FlowSummary" + }, + "traits": { + "smithy.api#length": { + "max": 10 + } + } + }, + "com.amazonaws.bedrockagent#FlowSummary": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockagent#FlowName", + "traits": { + "smithy.api#documentation": "

The name of the flow.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#FlowDescription", + "traits": { + "smithy.api#documentation": "

A description of the flow.

" + } + }, + "id": { + "target": "com.amazonaws.bedrockagent#FlowId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.bedrockagent#FlowArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the flow.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#FlowStatus", + "traits": { + "smithy.api#documentation": "

The status of the flow. The following statuses are possible:

\n
    \n
  • \n

    NotPrepared – The flow has been created or updated, but hasn't been prepared. If you just created the flow, you can't test it. If you updated the flow, the DRAFT version won't contain the latest changes for testing. Send a PrepareFlow request to package the latest changes into the DRAFT version.

    \n
  • \n
  • \n

    Preparing – The flow is being prepared so that the DRAFT version contains the latest changes for testing.

    \n
  • \n
  • \n

    Prepared – The flow is prepared and the DRAFT version contains the latest changes for testing.

    \n
  • \n
  • \n

    Failed – The last API operation that you invoked on the flow failed. Send a GetFlow request and check the error message in the validations field.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the flow was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the flow was last updated.

", + "smithy.api#required": {} + } + }, + "version": { + "target": "com.amazonaws.bedrockagent#DraftVersion", + "traits": { + "smithy.api#documentation": "

The latest version of the flow.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the definition of a flow.

" + } + }, + "com.amazonaws.bedrockagent#FlowValidation": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.bedrockagent#NonBlankString", + "traits": { + "smithy.api#documentation": "

A message describing the validation error.

", + "smithy.api#required": {} + } + }, + "severity": { + "target": "com.amazonaws.bedrockagent#FlowValidationSeverity", + "traits": { + "smithy.api#documentation": "

The severity of the issue described in the message.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about validation of the flow.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagent#FlowValidationSeverity": { + "type": "enum", + "members": { + "WARNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Warning" + } + }, + "ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Error" + } + } + } + }, + "com.amazonaws.bedrockagent#FlowValidations": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#FlowValidation" + }, + "traits": { + "smithy.api#length": { + "max": 100 + } + } + }, + "com.amazonaws.bedrockagent#FlowVersionResource": { + "type": "resource", + "identifiers": { + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier" + }, + "flowVersion": { + "target": "com.amazonaws.bedrockagent#NumericalVersion" + } + }, + "create": { + "target": "com.amazonaws.bedrockagent#CreateFlowVersion" + }, + "read": { + "target": "com.amazonaws.bedrockagent#GetFlowVersion" + }, + "delete": { + "target": "com.amazonaws.bedrockagent#DeleteFlowVersion" + }, + "list": { + "target": "com.amazonaws.bedrockagent#ListFlowVersions" + }, + "traits": { + "aws.cloudformation#cfnResource": { + "name": "FlowVersion" + } + } + }, + "com.amazonaws.bedrockagent#FlowVersionSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#FlowVersionSummary" + }, + "traits": { + "smithy.api#length": { + "max": 10 + } + } + }, + "com.amazonaws.bedrockagent#FlowVersionSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.bedrockagent#FlowId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.bedrockagent#FlowArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the flow that the version belongs to.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#FlowStatus", + "traits": { + "smithy.api#documentation": "

The status of the flow.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at the flow version was created.

", + "smithy.api#required": {} + } + }, + "version": { + "target": "com.amazonaws.bedrockagent#NumericalVersion", + "traits": { + "smithy.api#documentation": "

The version of the flow.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about the flow version.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagent#Function": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockagent#Name", + "traits": { + "smithy.api#documentation": "

A name for the function.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#FunctionDescription", + "traits": { + "smithy.api#documentation": "

A description of the function and its purpose.

" + } + }, + "parameters": { + "target": "com.amazonaws.bedrockagent#ParameterMap", + "traits": { + "smithy.api#documentation": "

The parameters that the agent elicits from the user to fulfill the function.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines parameters that the agent needs to invoke from the user to complete the function. Corresponds to an action in an action group.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagent#FunctionDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1200 + } + } + }, + "com.amazonaws.bedrockagent#FunctionSchema": { + "type": "union", + "members": { + "functions": { + "target": "com.amazonaws.bedrockagent#Functions", + "traits": { + "smithy.api#documentation": "

A list of functions that each define an action in the action group.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines functions that each define parameters that the agent needs to invoke from the user. Each function represents an action in an action group.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagent#Functions": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#Function" + } + }, + "com.amazonaws.bedrockagent#GetAgent": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetAgentRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetAgentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets information about an agent.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/agents/{agentId}/" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetAgentActionGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetAgentActionGroupRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetAgentActionGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets information about an action group for an agent.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/agents/{agentId}/agentversions/{agentVersion}/actiongroups/{actionGroupId}/" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetAgentActionGroupRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent that the action group belongs to.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentVersion": { + "target": "com.amazonaws.bedrockagent#Version", + "traits": { + "smithy.api#documentation": "

The version of the agent that the action group belongs to.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "actionGroupId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the action group for which to get information.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetAgentActionGroupResponse": { + "type": "structure", + "members": { + "agentActionGroup": { + "target": "com.amazonaws.bedrockagent#AgentActionGroup", + "traits": { + "smithy.api#documentation": "

Contains details about the action group.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GetAgentAlias": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetAgentAliasRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetAgentAliasResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets information about an alias of an agent.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/agents/{agentId}/agentaliases/{agentAliasId}/" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetAgentAliasRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent to which the alias to get information belongs.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentAliasId": { + "target": "com.amazonaws.bedrockagent#AgentAliasId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the alias for which to get information.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetAgentAliasResponse": { + "type": "structure", + "members": { + "agentAlias": { + "target": "com.amazonaws.bedrockagent#AgentAlias", + "traits": { + "smithy.api#documentation": "

Contains information about the alias.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GetAgentKnowledgeBase": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetAgentKnowledgeBaseRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetAgentKnowledgeBaseResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets information about a knowledge base associated with an agent.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/agents/{agentId}/agentversions/{agentVersion}/knowledgebases/{knowledgeBaseId}/" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetAgentKnowledgeBaseRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent with which the knowledge base is associated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentVersion": { + "target": "com.amazonaws.bedrockagent#Version", + "traits": { + "smithy.api#documentation": "

The version of the agent with which the knowledge base is associated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base associated with the agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetAgentKnowledgeBaseResponse": { + "type": "structure", + "members": { + "agentKnowledgeBase": { + "target": "com.amazonaws.bedrockagent#AgentKnowledgeBase", + "traits": { + "smithy.api#documentation": "

Contains details about a knowledge base attached to an agent.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GetAgentRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetAgentResponse": { + "type": "structure", + "members": { + "agent": { + "target": "com.amazonaws.bedrockagent#Agent", + "traits": { + "smithy.api#documentation": "

Contains details about the agent.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GetAgentVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetAgentVersionRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetAgentVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets details about a version of an agent.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/agents/{agentId}/agentversions/{agentVersion}/" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetAgentVersionRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentVersion": { + "target": "com.amazonaws.bedrockagent#NumericalVersion", + "traits": { + "smithy.api#documentation": "

The version of the agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetAgentVersionResponse": { + "type": "structure", + "members": { + "agentVersion": { + "target": "com.amazonaws.bedrockagent#AgentVersion", + "traits": { + "smithy.api#documentation": "

Contains details about the version of the agent.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GetDataSource": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetDataSourceRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetDataSourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets information about a data source.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetDataSourceRequest": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base that the data source was added to.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "dataSourceId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the data source.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetDataSourceResponse": { + "type": "structure", + "members": { + "dataSource": { + "target": "com.amazonaws.bedrockagent#DataSource", + "traits": { + "smithy.api#documentation": "

Contains details about the data source.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GetFlow": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetFlowRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetFlowResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves information about a flow. For more information, see Manage a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/flows/{flowIdentifier}/" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetFlowAlias": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetFlowAliasRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetFlowAliasResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves information about a flow. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/flows/{flowIdentifier}/aliases/{aliasIdentifier}" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetFlowAliasRequest": { + "type": "structure", + "members": { + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow that the alias belongs to.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "aliasIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowAliasIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the alias for which to retrieve information.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetFlowAliasResponse": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockagent#Name", + "traits": { + "smithy.api#documentation": "

The name of the flow alias.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#Description", + "traits": { + "smithy.api#documentation": "

The description of the flow.

" + } + }, + "routingConfiguration": { + "target": "com.amazonaws.bedrockagent#FlowAliasRoutingConfiguration", + "traits": { + "smithy.api#documentation": "

Contains information about the version that the alias is mapped to.

", + "smithy.api#required": {} + } + }, + "flowId": { + "target": "com.amazonaws.bedrockagent#FlowId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow that the alias belongs to.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.bedrockagent#FlowAliasId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the alias of the flow.

", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.bedrockagent#FlowAliasArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the flow.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the flow was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the flow alias was last updated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GetFlowRequest": { + "type": "structure", + "members": { + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetFlowResponse": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockagent#FlowName", + "traits": { + "smithy.api#documentation": "

The name of the flow.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#FlowDescription", + "traits": { + "smithy.api#documentation": "

The description of the flow.

" + } + }, + "executionRoleArn": { + "target": "com.amazonaws.bedrockagent#FlowExecutionRoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service row for flows in the Amazon Bedrock User Guide.

", + "smithy.api#required": {} + } + }, + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that the flow is encrypted with.

" + } + }, + "id": { + "target": "com.amazonaws.bedrockagent#FlowId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.bedrockagent#FlowArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the flow.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#FlowStatus", + "traits": { + "smithy.api#documentation": "

The status of the flow. The following statuses are possible:

\n
    \n
  • \n

    NotPrepared – The flow has been created or updated, but hasn't been prepared. If you just created the flow, you can't test it. If you updated the flow, the DRAFT version won't contain the latest changes for testing. Send a PrepareFlow request to package the latest changes into the DRAFT version.

    \n
  • \n
  • \n

    Preparing – The flow is being prepared so that the DRAFT version contains the latest changes for testing.

    \n
  • \n
  • \n

    Prepared – The flow is prepared and the DRAFT version contains the latest changes for testing.

    \n
  • \n
  • \n

    Failed – The last API operation that you invoked on the flow failed. Send a GetFlow request and check the error message in the validations field.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the flow was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the flow was last updated.

", + "smithy.api#required": {} + } + }, + "version": { + "target": "com.amazonaws.bedrockagent#DraftVersion", + "traits": { + "smithy.api#documentation": "

The version of the flow for which information was retrieved.

", + "smithy.api#required": {} + } + }, + "definition": { + "target": "com.amazonaws.bedrockagent#FlowDefinition", + "traits": { + "smithy.api#documentation": "

The definition of the nodes and connections between the nodes in the flow.

" + } + }, + "validations": { + "target": "com.amazonaws.bedrockagent#FlowValidations", + "traits": { + "smithy.api#documentation": "

A list of validation error messages related to the last failed operation on the flow.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GetFlowVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetFlowVersionRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetFlowVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves information about a version of a flow. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/flows/{flowIdentifier}/versions/{flowVersion}/" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetFlowVersionRequest": { + "type": "structure", + "members": { + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow for which to get information.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "flowVersion": { + "target": "com.amazonaws.bedrockagent#NumericalVersion", + "traits": { + "smithy.api#documentation": "

The version of the flow for which to get information.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetFlowVersionResponse": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockagent#FlowName", + "traits": { + "smithy.api#documentation": "

The name of the flow version.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#FlowDescription", + "traits": { + "smithy.api#documentation": "

The description of the flow.

" + } + }, + "executionRoleArn": { + "target": "com.amazonaws.bedrockagent#FlowExecutionRoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#required": {} + } + }, + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that the version of the flow is encrypted with.

" + } + }, + "id": { + "target": "com.amazonaws.bedrockagent#FlowId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.bedrockagent#FlowArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the flow.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#FlowStatus", + "traits": { + "smithy.api#documentation": "

The status of the flow.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the flow was created.

", + "smithy.api#required": {} + } + }, + "version": { + "target": "com.amazonaws.bedrockagent#NumericalVersion", + "traits": { + "smithy.api#documentation": "

The version of the flow for which information was retrieved.

", + "smithy.api#required": {} + } + }, + "definition": { + "target": "com.amazonaws.bedrockagent#FlowDefinition", + "traits": { + "smithy.api#documentation": "

The definition of the nodes and connections between nodes in the flow.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GetIngestionJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetIngestionJobRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetIngestionJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets information about a ingestion job, in which a data source is added to a knowledge base.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}/ingestionjobs/{ingestionJobId}" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetIngestionJobRequest": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base for which the ingestion job applies.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "dataSourceId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the data source in the ingestion job.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ingestionJobId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ingestion job.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetIngestionJobResponse": { + "type": "structure", + "members": { + "ingestionJob": { + "target": "com.amazonaws.bedrockagent#IngestionJob", + "traits": { + "smithy.api#documentation": "

Contains details about the ingestion job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GetKnowledgeBase": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetKnowledgeBaseRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetKnowledgeBaseResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets information about a knoweldge base.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/knowledgebases/{knowledgeBaseId}" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetKnowledgeBaseRequest": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base for which to get information.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetKnowledgeBaseResponse": { + "type": "structure", + "members": { + "knowledgeBase": { + "target": "com.amazonaws.bedrockagent#KnowledgeBase", + "traits": { + "smithy.api#documentation": "

Contains details about the knowledge base.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GetPrompt": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#GetPromptRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#GetPromptResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves information about a prompt or a version of it. For more information, see View information about prompts using Prompt management and View information about a version of your prompt in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/prompts/{promptIdentifier}/" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#GetPromptRequest": { + "type": "structure", + "members": { + "promptIdentifier": { + "target": "com.amazonaws.bedrockagent#PromptIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the prompt.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "promptVersion": { + "target": "com.amazonaws.bedrockagent#Version", + "traits": { + "smithy.api#documentation": "

The version of the prompt about which you want to retrieve information.

", + "smithy.api#httpQuery": "promptVersion" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#GetPromptResponse": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockagent#PromptName", + "traits": { + "smithy.api#documentation": "

The name of the prompt.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#PromptDescription", + "traits": { + "smithy.api#documentation": "

The descriptino of the prompt.

" + } + }, + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that the prompt is encrypted with.

" + } + }, + "defaultVariant": { + "target": "com.amazonaws.bedrockagent#PromptVariantName", + "traits": { + "smithy.api#documentation": "

The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object.

" + } + }, + "variants": { + "target": "com.amazonaws.bedrockagent#PromptVariantList", + "traits": { + "smithy.api#documentation": "

A list of objects, each containing details about a variant of the prompt.

" + } + }, + "id": { + "target": "com.amazonaws.bedrockagent#PromptId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the prompt.

", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.bedrockagent#PromptArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the prompt.

", + "smithy.api#required": {} + } + }, + "version": { + "target": "com.amazonaws.bedrockagent#Version", + "traits": { + "smithy.api#documentation": "

The version of the prompt.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the prompt was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the prompt was last updated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#GuardrailConfiguration": { + "type": "structure", + "members": { + "guardrailIdentifier": { + "target": "com.amazonaws.bedrockagent#GuardrailIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the guardrail.

" + } + }, + "guardrailVersion": { + "target": "com.amazonaws.bedrockagent#GuardrailVersion", + "traits": { + "smithy.api#documentation": "

The version of the guardrail.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about the guardrail associated with an agent.

" + } + }, + "com.amazonaws.bedrockagent#GuardrailIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$" + } + }, + "com.amazonaws.bedrockagent#GuardrailVersion": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(([0-9]{1,8})|(DRAFT))$" + } + }, + "com.amazonaws.bedrockagent#HierarchicalChunkingConfiguration": { + "type": "structure", + "members": { + "levelConfigurations": { + "target": "com.amazonaws.bedrockagent#HierarchicalChunkingLevelConfigurations", + "traits": { + "smithy.api#documentation": "

Token settings for each layer.

", + "smithy.api#required": {} + } + }, + "overlapTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of tokens to repeat across chunks in the same layer.

", + "smithy.api#range": { + "min": 1 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for hierarchical document chunking for a data source. Hierarchical chunking splits documents\n into layers of chunks where the first layer contains large chunks, and the second layer contains smaller\n chunks derived from the first layer.

\n

You configure the number of tokens to overlap, or repeat across adjacent chunks. For example,\n if you set overlap tokens to 60, the last 60 tokens in the first chunk are also included at the beginning of\n the second chunk. For each layer, you must also configure the maximum number of tokens in a chunk.

" + } + }, + "com.amazonaws.bedrockagent#HierarchicalChunkingLevelConfiguration": { + "type": "structure", + "members": { + "maxTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of tokens that a chunk can contain in this layer.

", + "smithy.api#range": { + "min": 1, + "max": 8192 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Token settings for a layer in a hierarchical chunking configuration.

" + } + }, + "com.amazonaws.bedrockagent#HierarchicalChunkingLevelConfigurations": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#HierarchicalChunkingLevelConfiguration" + }, + "traits": { + "smithy.api#length": { + "min": 2, + "max": 2 + } + } + }, + "com.amazonaws.bedrockagent#HttpsUrl": { + "type": "string", + "traits": { + "smithy.api#pattern": "^https://[A-Za-z0-9][^\\s]*$" + } + }, + "com.amazonaws.bedrockagent#Id": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9a-zA-Z]{10}$" + } + }, + "com.amazonaws.bedrockagent#InferenceConfiguration": { + "type": "structure", + "members": { + "temperature": { + "target": "com.amazonaws.bedrockagent#Temperature", + "traits": { + "smithy.api#documentation": "

The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options.

" + } + }, + "topP": { + "target": "com.amazonaws.bedrockagent#TopP", + "traits": { + "smithy.api#documentation": "

While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for Top P determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topP to 80, the model only selects the next token from the top 80% of the probability distribution of next tokens.

" + } + }, + "topK": { + "target": "com.amazonaws.bedrockagent#TopK", + "traits": { + "smithy.api#documentation": "

While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for topK is the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set topK to 50, the model selects the next token from among the top 50 most likely choices.

" + } + }, + "maximumLength": { + "target": "com.amazonaws.bedrockagent#MaximumLength", + "traits": { + "smithy.api#documentation": "

The maximum number of tokens to allow in the generated response.

" + } + }, + "stopSequences": { + "target": "com.amazonaws.bedrockagent#StopSequences", + "traits": { + "smithy.api#documentation": "

A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating the response.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the promptType. For more information, see Inference parameters for foundation models.

" + } + }, + "com.amazonaws.bedrockagent#IngestionJob": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source is being added.

", + "smithy.api#required": {} + } + }, + "dataSourceId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ingested data source.

", + "smithy.api#required": {} + } + }, + "ingestionJobId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ingestion job.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#Description", + "traits": { + "smithy.api#documentation": "

The description of the ingestion job.

" + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#IngestionJobStatus", + "traits": { + "smithy.api#documentation": "

The status of the ingestion job.

", + "smithy.api#required": {} + } + }, + "statistics": { + "target": "com.amazonaws.bedrockagent#IngestionJobStatistics", + "traits": { + "smithy.api#documentation": "

Contains statistics about the ingestion job.

" + } + }, + "failureReasons": { + "target": "com.amazonaws.bedrockagent#FailureReasons", + "traits": { + "smithy.api#documentation": "

A list of reasons that the ingestion job failed.

" + } + }, + "startedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the ingestion job started.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the ingestion job was last updated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about an ingestion job, which converts a data source to embeddings for a vector store in knowledge base.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagent#IngestionJobFilter": { + "type": "structure", + "members": { + "attribute": { + "target": "com.amazonaws.bedrockagent#IngestionJobFilterAttribute", + "traits": { + "smithy.api#documentation": "

The attribute by which to filter the results.

", + "smithy.api#required": {} + } + }, + "operator": { + "target": "com.amazonaws.bedrockagent#IngestionJobFilterOperator", + "traits": { + "smithy.api#documentation": "

The operation to carry out between the attribute and the values.

", + "smithy.api#required": {} + } + }, + "values": { + "target": "com.amazonaws.bedrockagent#IngestionJobFilterValues", + "traits": { + "smithy.api#documentation": "

A list of values for the attribute.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines a filter by which to filter the results.

" + } + }, + "com.amazonaws.bedrockagent#IngestionJobFilterAttribute": { + "type": "enum", + "members": { + "STATUS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STATUS" + } + } + } + }, + "com.amazonaws.bedrockagent#IngestionJobFilterOperator": { + "type": "enum", + "members": { + "EQ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQ" + } + } + } + }, + "com.amazonaws.bedrockagent#IngestionJobFilterValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + }, + "smithy.api#pattern": "^.*$" + } + }, + "com.amazonaws.bedrockagent#IngestionJobFilterValues": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#IngestionJobFilterValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.bedrockagent#IngestionJobFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#IngestionJobFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.bedrockagent#IngestionJobResource": { + "type": "resource", + "operations": [ + { + "target": "com.amazonaws.bedrockagent#GetIngestionJob" + }, + { + "target": "com.amazonaws.bedrockagent#ListIngestionJobs" + }, + { + "target": "com.amazonaws.bedrockagent#StartIngestionJob" + } + ] + }, + "com.amazonaws.bedrockagent#IngestionJobSortBy": { + "type": "structure", + "members": { + "attribute": { + "target": "com.amazonaws.bedrockagent#IngestionJobSortByAttribute", + "traits": { + "smithy.api#documentation": "

The attribute by which to sort the results.

", + "smithy.api#required": {} + } + }, + "order": { + "target": "com.amazonaws.bedrockagent#SortOrder", + "traits": { + "smithy.api#documentation": "

The order by which to sort the results.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Parameters by which to sort the results.

" + } + }, + "com.amazonaws.bedrockagent#IngestionJobSortByAttribute": { + "type": "enum", + "members": { + "STATUS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STATUS" + } + }, + "STARTED_AT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STARTED_AT" + } + } + } + }, + "com.amazonaws.bedrockagent#IngestionJobStatistics": { + "type": "structure", + "members": { + "numberOfDocumentsScanned": { + "target": "smithy.api#PrimitiveLong", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The total number of source documents that were scanned. Includes new, updated, and unchanged documents.

" + } + }, + "numberOfMetadataDocumentsScanned": { + "target": "smithy.api#PrimitiveLong", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The total number of metadata files that were scanned. Includes new, updated, and unchanged files.

" + } + }, + "numberOfNewDocumentsIndexed": { + "target": "smithy.api#PrimitiveLong", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The number of new source documents in the data source that were successfully indexed.

" + } + }, + "numberOfModifiedDocumentsIndexed": { + "target": "smithy.api#PrimitiveLong", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The number of modified source documents in the data source that were successfully indexed.

" + } + }, + "numberOfMetadataDocumentsModified": { + "target": "smithy.api#PrimitiveLong", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The number of metadata files that were updated or deleted.

" + } + }, + "numberOfDocumentsDeleted": { + "target": "smithy.api#PrimitiveLong", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The number of source documents that was deleted.

" + } + }, + "numberOfDocumentsFailed": { + "target": "smithy.api#PrimitiveLong", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The number of source documents that failed to be ingested.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the statistics for the ingestion job.

" + } + }, + "com.amazonaws.bedrockagent#IngestionJobStatus": { + "type": "enum", + "members": { + "STARTING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STARTING" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETE" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, + "com.amazonaws.bedrockagent#IngestionJobSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#IngestionJobSummary" + } + }, + "com.amazonaws.bedrockagent#IngestionJobSummary": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source is added.

", + "smithy.api#required": {} + } + }, + "dataSourceId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the data source in the ingestion job.

", + "smithy.api#required": {} + } + }, + "ingestionJobId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the ingestion job.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#Description", + "traits": { + "smithy.api#documentation": "

The description of the ingestion job.

" + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#IngestionJobStatus", + "traits": { + "smithy.api#documentation": "

The status of the ingestion job.

", + "smithy.api#required": {} + } + }, + "startedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the ingestion job was started.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the ingestion job was last updated.

", + "smithy.api#required": {} + } + }, + "statistics": { + "target": "com.amazonaws.bedrockagent#IngestionJobStatistics", + "traits": { + "smithy.api#documentation": "

Contains statistics for the ingestion job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about an ingestion job.

" + } + }, + "com.amazonaws.bedrockagent#InputFlowNodeConfiguration": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

Contains configurations for the input flow node for a flow. This node takes the input from flow invocation and passes it to the next node in the data type that you specify.

" + } + }, + "com.amazonaws.bedrockagent#Instruction": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 40, + "max": 4000 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagent#IntermediateStorage": { + "type": "structure", + "members": { + "s3Location": { + "target": "com.amazonaws.bedrockagent#S3Location", + "traits": { + "smithy.api#documentation": "

An S3 bucket path.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A location for storing content from data sources temporarily as it is processed by\n custom components in the ingestion pipeline.

" + } + }, + "com.amazonaws.bedrockagent#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.bedrockagent#NonBlankString" + } + }, + "traits": { + "smithy.api#documentation": "

An internal server error occurred. Retry your request.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.bedrockagent#IteratorFlowNodeConfiguration": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

Contains configurations for an iterator node in a flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output.

\n

The output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node.

" + } + }, + "com.amazonaws.bedrockagent#KmsKeyArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$" + } + }, + "com.amazonaws.bedrockagent#KnowledgeBase": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.bedrockagent#Name", + "traits": { + "smithy.api#documentation": "

The name of the knowledge base.

", + "smithy.api#required": {} + } + }, + "knowledgeBaseArn": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#Description", + "traits": { + "smithy.api#documentation": "

The description of the knowledge base.

" + } + }, + "roleArn": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseRoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base.

", + "smithy.api#required": {} + } + }, + "knowledgeBaseConfiguration": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseConfiguration", + "traits": { + "smithy.api#documentation": "

Contains details about the embeddings configuration of the knowledge base.

", + "smithy.api#required": {} + } + }, + "storageConfiguration": { + "target": "com.amazonaws.bedrockagent#StorageConfiguration", + "traits": { + "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseStatus", + "traits": { + "smithy.api#documentation": "

The status of the knowledge base. The following statuses are possible:

\n
    \n
  • \n

    CREATING – The knowledge base is being created.

    \n
  • \n
  • \n

    ACTIVE – The knowledge base is ready to be queried.

    \n
  • \n
  • \n

    DELETING – The knowledge base is being deleted.

    \n
  • \n
  • \n

    UPDATING – The knowledge base is being updated.

    \n
  • \n
  • \n

    FAILED – The knowledge base API operation failed.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the knowledge base was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the knowledge base was last updated.

", + "smithy.api#required": {} + } + }, + "failureReasons": { + "target": "com.amazonaws.bedrockagent#FailureReasons", + "traits": { + "smithy.api#documentation": "

A list of reasons that the API operation on the knowledge base failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about a knowledge base.

" + } + }, + "com.amazonaws.bedrockagent#KnowledgeBaseArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 128 + }, + "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):bedrock:[a-zA-Z0-9-]*:[0-9]{12}:knowledge-base/[0-9a-zA-Z]+$" + } + }, + "com.amazonaws.bedrockagent#KnowledgeBaseConfiguration": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseType", + "traits": { + "smithy.api#documentation": "

The type of data that the data source is converted into for the knowledge base.

", + "smithy.api#required": {} + } + }, + "vectorKnowledgeBaseConfiguration": { + "target": "com.amazonaws.bedrockagent#VectorKnowledgeBaseConfiguration", + "traits": { + "smithy.api#documentation": "

Contains details about the embeddings model that'sused to convert the data source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about the embeddings configuration of the knowledge base.

" + } + }, + "com.amazonaws.bedrockagent#KnowledgeBaseFlowNodeConfiguration": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base to query.

", + "smithy.api#required": {} + } + }, + "modelId": { + "target": "com.amazonaws.bedrockagent#ModelIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations for a knowledge base node in a flow. This node takes a query as the input and returns, as the output, the retrieved responses directly (as an array) or a response generated based on the retrieved responses. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + } + }, + "com.amazonaws.bedrockagent#KnowledgeBaseId": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 10 + }, + "smithy.api#pattern": "^[0-9a-zA-Z]+$" + } + }, + "com.amazonaws.bedrockagent#KnowledgeBaseResource": { + "type": "resource", + "operations": [ + { + "target": "com.amazonaws.bedrockagent#AssociateAgentKnowledgeBase" + }, + { + "target": "com.amazonaws.bedrockagent#CreateKnowledgeBase" + }, + { + "target": "com.amazonaws.bedrockagent#DeleteKnowledgeBase" + }, + { + "target": "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBase" + }, + { + "target": "com.amazonaws.bedrockagent#GetAgentKnowledgeBase" + }, + { + "target": "com.amazonaws.bedrockagent#GetKnowledgeBase" + }, + { + "target": "com.amazonaws.bedrockagent#ListAgentKnowledgeBases" + }, + { + "target": "com.amazonaws.bedrockagent#ListKnowledgeBases" + }, + { + "target": "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBase" + }, + { + "target": "com.amazonaws.bedrockagent#UpdateKnowledgeBase" + } + ] + }, + "com.amazonaws.bedrockagent#KnowledgeBaseRoleArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/.+$" + } + }, + "com.amazonaws.bedrockagent#KnowledgeBaseState": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.bedrockagent#KnowledgeBaseStatus": { + "type": "enum", + "members": { + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATING" + } + }, + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETING" + } + }, + "UPDATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATING" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "DELETE_UNSUCCESSFUL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETE_UNSUCCESSFUL" + } + } + } + }, + "com.amazonaws.bedrockagent#KnowledgeBaseStorageType": { + "type": "enum", + "members": { + "OPENSEARCH_SERVERLESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OPENSEARCH_SERVERLESS" + } + }, + "PINECONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PINECONE" + } + }, + "REDIS_ENTERPRISE_CLOUD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REDIS_ENTERPRISE_CLOUD" + } + }, + "RDS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RDS" + } + }, + "MONGO_DB_ATLAS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MONGO_DB_ATLAS" + } + } + } + }, + "com.amazonaws.bedrockagent#KnowledgeBaseSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseSummary" + } + }, + "com.amazonaws.bedrockagent#KnowledgeBaseSummary": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.bedrockagent#Name", + "traits": { + "smithy.api#documentation": "

The name of the knowledge base.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#Description", + "traits": { + "smithy.api#documentation": "

The description of the knowledge base.

" + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseStatus", + "traits": { + "smithy.api#documentation": "

The status of the knowledge base.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the knowledge base was last updated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about a knowledge base.

" + } + }, + "com.amazonaws.bedrockagent#KnowledgeBaseType": { + "type": "enum", + "members": { + "VECTOR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VECTOR" + } + } + } + }, + "com.amazonaws.bedrockagent#LambdaArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$" + } + }, + "com.amazonaws.bedrockagent#LambdaFunctionFlowNodeConfiguration": { + "type": "structure", + "members": { + "lambdaArn": { + "target": "com.amazonaws.bedrockagent#LambdaArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lambda function to invoke.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations for a Lambda function node in the flow. You specify the Lambda function to invoke and the inputs into the function. The output is the response that is defined in the Lambda function. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + } + }, + "com.amazonaws.bedrockagent#LexBotAliasArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 78 + }, + "smithy.api#pattern": "^arn:aws(|-us-gov):lex:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:bot-alias/[0-9a-zA-Z]+/[0-9a-zA-Z]+$" + } + }, + "com.amazonaws.bedrockagent#LexBotLocaleId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.bedrockagent#LexFlowNodeConfiguration": { + "type": "structure", + "members": { + "botAliasArn": { + "target": "com.amazonaws.bedrockagent#LexBotAliasArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke.

", + "smithy.api#required": {} + } + }, + "localeId": { + "target": "com.amazonaws.bedrockagent#LexBotLocaleId", + "traits": { + "smithy.api#documentation": "

The Region to invoke the Amazon Lex bot in.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations for a Lex node in the flow. You specify a Amazon Lex bot to invoke. This node takes an utterance as the input and returns as the output the intent identified by the Amazon Lex bot. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + } + }, + "com.amazonaws.bedrockagent#ListAgentActionGroups": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListAgentActionGroupsRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListAgentActionGroupsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the action groups for an agent and information about each one.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/agents/{agentId}/agentversions/{agentVersion}/actiongroups/" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "actionGroupSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListAgentActionGroupsRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentVersion": { + "target": "com.amazonaws.bedrockagent#Version", + "traits": { + "smithy.api#documentation": "

The version of the agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListAgentActionGroupsResponse": { + "type": "structure", + "members": { + "actionGroupSummaries": { + "target": "com.amazonaws.bedrockagent#ActionGroupSummaries", + "traits": { + "smithy.api#documentation": "

A list of objects, each of which contains information about an action group.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListAgentAliases": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListAgentAliasesRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListAgentAliasesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the aliases of an agent and information about each one.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/agents/{agentId}/agentaliases/" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "agentAliasSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListAgentAliasesRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListAgentAliasesResponse": { + "type": "structure", + "members": { + "agentAliasSummaries": { + "target": "com.amazonaws.bedrockagent#AgentAliasSummaries", + "traits": { + "smithy.api#documentation": "

A list of objects, each of which contains information about an alias of the agent.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListAgentKnowledgeBases": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListAgentKnowledgeBasesRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListAgentKnowledgeBasesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists knowledge bases associated with an agent and information about each one.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/agents/{agentId}/agentversions/{agentVersion}/knowledgebases/" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "agentKnowledgeBaseSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListAgentKnowledgeBasesRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent for which to return information about knowledge bases associated with it.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentVersion": { + "target": "com.amazonaws.bedrockagent#Version", + "traits": { + "smithy.api#documentation": "

The version of the agent for which to return information about knowledge bases associated with it.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListAgentKnowledgeBasesResponse": { + "type": "structure", + "members": { + "agentKnowledgeBaseSummaries": { + "target": "com.amazonaws.bedrockagent#AgentKnowledgeBaseSummaries", + "traits": { + "smithy.api#documentation": "

A list of objects, each of which contains information about a knowledge base associated with the agent.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListAgentVersions": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListAgentVersionsRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListAgentVersionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the versions of an agent and information about each version.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/agents/{agentId}/agentversions/" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "agentVersionSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListAgentVersionsRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListAgentVersionsResponse": { + "type": "structure", + "members": { + "agentVersionSummaries": { + "target": "com.amazonaws.bedrockagent#AgentVersionSummaries", + "traits": { + "smithy.api#documentation": "

A list of objects, each of which contains information about a version of the agent.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListAgents": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListAgentsRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListAgentsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the agents belonging to an account and information about each agent.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/agents/" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "agentSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListAgentsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListAgentsResponse": { + "type": "structure", + "members": { + "agentSummaries": { + "target": "com.amazonaws.bedrockagent#AgentSummaries", + "traits": { + "smithy.api#documentation": "

A list of objects, each of which contains information about an agent.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListDataSources": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListDataSourcesRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListDataSourcesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the data sources in a knowledge base and information about each one.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/knowledgebases/{knowledgeBaseId}/datasources/" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "dataSourceSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListDataSourcesRequest": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base for which to return a list of information.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListDataSourcesResponse": { + "type": "structure", + "members": { + "dataSourceSummaries": { + "target": "com.amazonaws.bedrockagent#DataSourceSummaries", + "traits": { + "smithy.api#documentation": "

A list of objects, each of which contains information about a data source.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListFlowAliases": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListFlowAliasesRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListFlowAliasesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of aliases for a flow.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/flows/{flowIdentifier}/aliases" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "flowAliasSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListFlowAliasesRequest": { + "type": "structure", + "members": { + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow for which aliases are being returned.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListFlowAliasesResponse": { + "type": "structure", + "members": { + "flowAliasSummaries": { + "target": "com.amazonaws.bedrockagent#FlowAliasSummaries", + "traits": { + "smithy.api#documentation": "

A list, each member of which contains information about a flow alias.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListFlowVersions": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListFlowVersionsRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListFlowVersionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of information about each flow. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/flows/{flowIdentifier}/versions" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "flowVersionSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListFlowVersionsRequest": { + "type": "structure", + "members": { + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListFlowVersionsResponse": { + "type": "structure", + "members": { + "flowVersionSummaries": { + "target": "com.amazonaws.bedrockagent#FlowVersionSummaries", + "traits": { + "smithy.api#documentation": "

A list, each member of which contains information about a flow.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListFlows": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListFlowsRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListFlowsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of flows and information about each flow. For more information, see Manage a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/flows/" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "flowSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListFlowsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListFlowsResponse": { + "type": "structure", + "members": { + "flowSummaries": { + "target": "com.amazonaws.bedrockagent#FlowSummaries", + "traits": { + "smithy.api#documentation": "

A list, each member of which contains information about a flow.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListIngestionJobs": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListIngestionJobsRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListIngestionJobsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the ingestion jobs for a data source and information about each of them.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}/ingestionjobs/" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "ingestionJobSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListIngestionJobsRequest": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base for which to return ingestion jobs.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "dataSourceId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the data source for which to return ingestion jobs.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "filters": { + "target": "com.amazonaws.bedrockagent#IngestionJobFilters", + "traits": { + "smithy.api#documentation": "

Contains a definition of a filter for which to filter the results.

" + } + }, + "sortBy": { + "target": "com.amazonaws.bedrockagent#IngestionJobSortBy", + "traits": { + "smithy.api#documentation": "

Contains details about how to sort the results.

" + } + }, + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListIngestionJobsResponse": { + "type": "structure", + "members": { + "ingestionJobSummaries": { + "target": "com.amazonaws.bedrockagent#IngestionJobSummaries", + "traits": { + "smithy.api#documentation": "

A list of objects, each of which contains information about an ingestion job.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListKnowledgeBases": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListKnowledgeBasesRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListKnowledgeBasesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the knowledge bases in an account and information about each of them.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/knowledgebases/" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "knowledgeBaseSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListKnowledgeBasesRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListKnowledgeBasesResponse": { + "type": "structure", + "members": { + "knowledgeBaseSummaries": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseSummaries", + "traits": { + "smithy.api#documentation": "

A list of objects, each of which contains information about a knowledge base.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListPrompts": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListPromptsRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListPromptsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of prompts from the Prompt management tool and information about each prompt. For more information, see View information about prompts using Prompt management in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/prompts/" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "promptSummaries" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#ListPromptsRequest": { + "type": "structure", + "members": { + "promptIdentifier": { + "target": "com.amazonaws.bedrockagent#PromptIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the prompt.

", + "smithy.api#httpQuery": "promptIdentifier" + } + }, + "maxResults": { + "target": "com.amazonaws.bedrockagent#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListPromptsResponse": { + "type": "structure", + "members": { + "promptSummaries": { + "target": "com.amazonaws.bedrockagent#PromptSummaries", + "traits": { + "smithy.api#documentation": "

A list, each member of which contains information about a prompt using Prompt management.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.bedrockagent#NextToken", + "traits": { + "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List all the tags for the resource you specify.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/tags/{resourceArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.bedrockagent#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.bedrockagent#TaggableResourcesArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource for which to list tags.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.bedrockagent#TagsMap", + "traits": { + "smithy.api#documentation": "

The key-value pairs for the tags associated with the resource.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.bedrockagent#MaximumLength": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 4096 + } + } + }, + "com.amazonaws.bedrockagent#MemoryConfiguration": { + "type": "structure", + "members": { + "enabledMemoryTypes": { + "target": "com.amazonaws.bedrockagent#EnabledMemoryTypes", + "traits": { + "smithy.api#documentation": "

The type of memory that is stored.

", + "smithy.api#required": {} + } + }, + "storageDays": { + "target": "com.amazonaws.bedrockagent#StorageDays", + "traits": { + "smithy.api#default": 30, + "smithy.api#documentation": "

The number of days the agent is configured to retain the conversational context.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details of the memory configuration.

" + } + }, + "com.amazonaws.bedrockagent#MemoryType": { + "type": "enum", + "members": { + "SESSION_SUMMARY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SESSION_SUMMARY" + } + } + } + }, + "com.amazonaws.bedrockagent#Microsoft365TenantId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + }, + "smithy.api#pattern": "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + } + }, + "com.amazonaws.bedrockagent#ModelIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)$" + } + }, + "com.amazonaws.bedrockagent#MongoDbAtlasCollectionName": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 63 + }, + "smithy.api#pattern": "^.*$" + } + }, + "com.amazonaws.bedrockagent#MongoDbAtlasConfiguration": { + "type": "structure", + "members": { + "endpoint": { + "target": "com.amazonaws.bedrockagent#MongoDbAtlasEndpoint", + "traits": { + "smithy.api#documentation": "

The endpoint URL of your MongoDB Atlas cluster for your knowledge base.

", + "smithy.api#required": {} + } + }, + "databaseName": { + "target": "com.amazonaws.bedrockagent#MongoDbAtlasDatabaseName", + "traits": { + "smithy.api#documentation": "

The database name in your MongoDB Atlas cluster for your knowledge base.

", + "smithy.api#required": {} + } + }, + "collectionName": { + "target": "com.amazonaws.bedrockagent#MongoDbAtlasCollectionName", + "traits": { + "smithy.api#documentation": "

The collection name of the knowledge base in MongoDB Atlas.

", + "smithy.api#required": {} + } + }, + "vectorIndexName": { + "target": "com.amazonaws.bedrockagent#MongoDbAtlasIndexName", + "traits": { + "smithy.api#documentation": "

The name of the MongoDB Atlas vector search index.

", + "smithy.api#required": {} + } + }, + "credentialsSecretArn": { + "target": "com.amazonaws.bedrockagent#SecretArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret that you created in Secrets Manager that contains user credentials for your MongoDB Atlas cluster.

", + "smithy.api#required": {} + } + }, + "fieldMapping": { + "target": "com.amazonaws.bedrockagent#MongoDbAtlasFieldMapping", + "traits": { + "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

", + "smithy.api#required": {} + } + }, + "endpointServiceName": { + "target": "com.amazonaws.bedrockagent#MongoDbAtlasEndpointServiceName", + "traits": { + "smithy.api#documentation": "

The name of the VPC endpoint service in your account that is connected to your MongoDB Atlas cluster.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in MongoDB Atlas.

" + } + }, + "com.amazonaws.bedrockagent#MongoDbAtlasDatabaseName": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 63 + }, + "smithy.api#pattern": "^.*$" + } + }, + "com.amazonaws.bedrockagent#MongoDbAtlasEndpoint": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^.*$" + } + }, + "com.amazonaws.bedrockagent#MongoDbAtlasEndpointServiceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^(?:arn:aws(?:-us-gov|-cn|-iso|-iso-[a-z])*:.+:.*:\\d+:.+/.+$|[a-zA-Z0-9*]+[a-zA-Z0-9._-]*)$" + } + }, + "com.amazonaws.bedrockagent#MongoDbAtlasFieldMapping": { + "type": "structure", + "members": { + "vectorField": { + "target": "com.amazonaws.bedrockagent#FieldName", + "traits": { + "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources.

", + "smithy.api#required": {} + } + }, + "textField": { + "target": "com.amazonaws.bedrockagent#FieldName", + "traits": { + "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.

", + "smithy.api#required": {} + } + }, + "metadataField": { + "target": "com.amazonaws.bedrockagent#FieldName", + "traits": { + "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores metadata about the vector store.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

" + } + }, + "com.amazonaws.bedrockagent#MongoDbAtlasIndexName": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^.*$" + } + }, + "com.amazonaws.bedrockagent#Name": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9a-zA-Z][_-]?){1,100}$" + } + }, + "com.amazonaws.bedrockagent#NextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^\\S*$" + } + }, + "com.amazonaws.bedrockagent#NonBlankString": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\s\\S]+$" + } + }, + "com.amazonaws.bedrockagent#NumericalVersion": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9]{1,5}$" + } + }, + "com.amazonaws.bedrockagent#OpenSearchServerlessCollectionArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws:aoss:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:collection/[a-z0-9-]{3,32}$" + } + }, + "com.amazonaws.bedrockagent#OpenSearchServerlessConfiguration": { + "type": "structure", + "members": { + "collectionArn": { + "target": "com.amazonaws.bedrockagent#OpenSearchServerlessCollectionArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the OpenSearch Service vector store.

", + "smithy.api#required": {} + } + }, + "vectorIndexName": { + "target": "com.amazonaws.bedrockagent#OpenSearchServerlessIndexName", + "traits": { + "smithy.api#documentation": "

The name of the vector store.

", + "smithy.api#required": {} + } + }, + "fieldMapping": { + "target": "com.amazonaws.bedrockagent#OpenSearchServerlessFieldMapping", + "traits": { + "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in Amazon OpenSearch Service. For more information, see Create a vector index in Amazon OpenSearch Service.

" + } + }, + "com.amazonaws.bedrockagent#OpenSearchServerlessFieldMapping": { + "type": "structure", + "members": { + "vectorField": { + "target": "com.amazonaws.bedrockagent#FieldName", + "traits": { + "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources.

", + "smithy.api#required": {} + } + }, + "textField": { + "target": "com.amazonaws.bedrockagent#FieldName", + "traits": { + "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.

", + "smithy.api#required": {} + } + }, + "metadataField": { + "target": "com.amazonaws.bedrockagent#FieldName", + "traits": { + "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores metadata about the vector store.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

" + } + }, + "com.amazonaws.bedrockagent#OpenSearchServerlessIndexName": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^.*$" + } + }, + "com.amazonaws.bedrockagent#OutputFlowNodeConfiguration": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

Contains configurations for an output flow node in the flow. You specify the data type expected for the input into the node in the type field and how to return the final output in the expression field.

" + } + }, + "com.amazonaws.bedrockagent#ParameterDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 500 + } + } + }, + "com.amazonaws.bedrockagent#ParameterDetail": { + "type": "structure", + "members": { + "description": { + "target": "com.amazonaws.bedrockagent#ParameterDescription", + "traits": { + "smithy.api#documentation": "

A description of the parameter. Helps the foundation model determine how to elicit the parameters from the user.

" + } + }, + "type": { + "target": "com.amazonaws.bedrockagent#Type", + "traits": { + "smithy.api#documentation": "

The data type of the parameter.

", + "smithy.api#required": {} + } + }, + "required": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Whether the parameter is required for the agent to complete the function for action group invocation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about a parameter in a function for an action group.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagent#ParameterMap": { + "type": "map", + "key": { + "target": "com.amazonaws.bedrockagent#Name" + }, + "value": { + "target": "com.amazonaws.bedrockagent#ParameterDetail" + } + }, + "com.amazonaws.bedrockagent#ParsingConfiguration": { + "type": "structure", + "members": { + "parsingStrategy": { + "target": "com.amazonaws.bedrockagent#ParsingStrategy", + "traits": { + "smithy.api#documentation": "

The parsing strategy for the data source.

", + "smithy.api#required": {} + } }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" + "bedrockFoundationModelConfiguration": { + "target": "com.amazonaws.bedrockagent#BedrockFoundationModelConfiguration", + "traits": { + "smithy.api#documentation": "

Settings for a foundation model used to parse documents for a data source.

" + } } - ], + }, "traits": { - "smithy.api#documentation": "

Lists the action groups for an agent and information about each one.

", - "smithy.api#http": { - "code": 200, - "method": "POST", - "uri": "/agents/{agentId}/agentversions/{agentVersion}/actiongroups/" - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "actionGroupSummaries" - }, - "smithy.api#readonly": {}, - "smithy.api#tags": [ - "console" - ] + "smithy.api#documentation": "

Settings for parsing document contents. By default, the service converts the contents of each\n document into text before splitting it into chunks. To improve processing of PDF files with tables and images,\n you can configure the data source to convert the pages of text into images and use a model to describe the\n contents of each page.

\n

To use a model to parse PDF documents, set the parsing strategy to BEDROCK_FOUNDATION_MODEL and\n specify the model to use by ARN. You can also override the default parsing prompt with instructions for how\n to interpret images and tables in your documents. The following models are supported.

\n
    \n
  • \n

    Anthropic Claude 3 Sonnet - anthropic.claude-3-sonnet-20240229-v1:0\n

    \n
  • \n
  • \n

    Anthropic Claude 3 Haiku - anthropic.claude-3-haiku-20240307-v1:0\n

    \n
  • \n
\n

You can get the ARN of a model with the action. Standard model usage\n charges apply for the foundation model parsing strategy.

" } }, - "com.amazonaws.bedrockagent#ListAgentActionGroupsRequest": { + "com.amazonaws.bedrockagent#ParsingPrompt": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "parsingPromptText": { + "target": "com.amazonaws.bedrockagent#ParsingPromptText", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

Instructions for interpreting the contents of a document.

", "smithy.api#required": {} } - }, - "agentVersion": { - "target": "com.amazonaws.bedrockagent#Version", + } + }, + "traits": { + "smithy.api#documentation": "

Instructions for interpreting the contents of a document.

" + } + }, + "com.amazonaws.bedrockagent#ParsingPromptText": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10000 + } + } + }, + "com.amazonaws.bedrockagent#ParsingStrategy": { + "type": "enum", + "members": { + "BEDROCK_FOUNDATION_MODEL": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The version of the agent.

", - "smithy.api#httpLabel": {}, + "smithy.api#enumValue": "BEDROCK_FOUNDATION_MODEL" + } + } + } + }, + "com.amazonaws.bedrockagent#PatternObjectFilter": { + "type": "structure", + "members": { + "objectType": { + "target": "com.amazonaws.bedrockagent#FilteredObjectType", + "traits": { + "smithy.api#documentation": "

The supported object type or content type of the data source.

", "smithy.api#required": {} } }, - "maxResults": { - "target": "com.amazonaws.bedrockagent#MaxResults", + "inclusionFilters": { + "target": "com.amazonaws.bedrockagent#FilterList", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + "smithy.api#documentation": "

A list of one or more inclusion regular expression patterns to include certain \n object types that adhere to the pattern. If you specify an inclusion and exclusion \n filter/pattern and both match a document, the exclusion filter takes precedence \n and the document isn’t crawled.

" } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "exclusionFilters": { + "target": "com.amazonaws.bedrockagent#FilterList", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + "smithy.api#documentation": "

A list of one or more exclusion regular expression patterns to exclude certain \n object types that adhere to the pattern. If you specify an inclusion and exclusion \n filter/pattern and both match a document, the exclusion filter takes precedence \n and the document isn’t crawled.

" } } }, "traits": { - "smithy.api#input": {} + "smithy.api#documentation": "

The specific filters applied to your data source content. You can filter out or \n include certain content.

" } }, - "com.amazonaws.bedrockagent#ListAgentActionGroupsResponse": { + "com.amazonaws.bedrockagent#PatternObjectFilterConfiguration": { "type": "structure", "members": { - "actionGroupSummaries": { - "target": "com.amazonaws.bedrockagent#ActionGroupSummaries", + "filters": { + "target": "com.amazonaws.bedrockagent#PatternObjectFilterList", "traits": { - "smithy.api#documentation": "

A list of objects, each of which contains information about an action group.

", + "smithy.api#documentation": "

The configuration of specific filters applied to your data source content. You can \n filter out or include certain content.

", "smithy.api#required": {} } - }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", - "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" - } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

The configuration of filtering certain objects or content types of the data source.

" } }, - "com.amazonaws.bedrockagent#ListAgentAliases": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#ListAgentAliasesRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#ListAgentAliasesResponse" + "com.amazonaws.bedrockagent#PatternObjectFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#PatternObjectFilter" }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" - }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" - }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" - }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" - } - ], "traits": { - "smithy.api#documentation": "

Lists the aliases of an agent and information about each one.

", - "smithy.api#http": { - "code": 200, - "method": "POST", - "uri": "/agents/{agentId}/agentaliases/" - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "agentAliasSummaries" + "smithy.api#length": { + "min": 1, + "max": 25 }, - "smithy.api#readonly": {}, - "smithy.api#tags": [ - "console" - ] + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagent#ListAgentAliasesRequest": { + "com.amazonaws.bedrockagent#Payload": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagent#PineconeConfiguration": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "connectionString": { + "target": "com.amazonaws.bedrockagent#PineconeConnectionString", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The endpoint URL for your index management page.

", "smithy.api#required": {} } }, - "maxResults": { - "target": "com.amazonaws.bedrockagent#MaxResults", + "credentialsSecretArn": { + "target": "com.amazonaws.bedrockagent#SecretArn", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret that you created in Secrets Manager that is linked to your Pinecone API key.

", + "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "namespace": { + "target": "com.amazonaws.bedrockagent#PineconeNamespace", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + "smithy.api#documentation": "

The namespace to be used to write new data to your database.

" + } + }, + "fieldMapping": { + "target": "com.amazonaws.bedrockagent#PineconeFieldMapping", + "traits": { + "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

", + "smithy.api#required": {} } } }, "traits": { - "smithy.api#input": {} + "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in Pinecone. For more information, see Create a vector index in Pinecone.

" } }, - "com.amazonaws.bedrockagent#ListAgentAliasesResponse": { + "com.amazonaws.bedrockagent#PineconeConnectionString": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^.*$" + } + }, + "com.amazonaws.bedrockagent#PineconeFieldMapping": { "type": "structure", "members": { - "agentAliasSummaries": { - "target": "com.amazonaws.bedrockagent#AgentAliasSummaries", + "textField": { + "target": "com.amazonaws.bedrockagent#FieldName", "traits": { - "smithy.api#documentation": "

A list of objects, each of which contains information about an alias of the agent.

", + "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.

", "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "metadataField": { + "target": "com.amazonaws.bedrockagent#FieldName", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores metadata about the vector store.

", + "smithy.api#required": {} } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

" } }, - "com.amazonaws.bedrockagent#ListAgentKnowledgeBases": { + "com.amazonaws.bedrockagent#PineconeNamespace": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^.*$" + } + }, + "com.amazonaws.bedrockagent#PrepareAgent": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#ListAgentKnowledgeBasesRequest" + "target": "com.amazonaws.bedrockagent#PrepareAgentRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#ListAgentKnowledgeBasesResponse" + "target": "com.amazonaws.bedrockagent#PrepareAgentResponse" }, "errors": [ { "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, { "target": "com.amazonaws.bedrockagent#InternalServerException" }, { "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" }, + { + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.bedrockagent#ThrottlingException" }, @@ -5343,74 +10150,62 @@ } ], "traits": { - "smithy.api#documentation": "

Lists knowledge bases associated with an agent and information about each one.

", + "smithy.api#documentation": "

Creates a DRAFT version of the agent that can be used for internal testing.

", "smithy.api#http": { - "code": 200, + "code": 202, "method": "POST", - "uri": "/agents/{agentId}/agentversions/{agentVersion}/knowledgebases/" - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "agentKnowledgeBaseSummaries" + "uri": "/agents/{agentId}/" }, - "smithy.api#readonly": {}, "smithy.api#tags": [ "console" ] } }, - "com.amazonaws.bedrockagent#ListAgentKnowledgeBasesRequest": { + "com.amazonaws.bedrockagent#PrepareAgentRequest": { "type": "structure", "members": { "agentId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent for which to return information about knowledge bases associated with it.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "agentVersion": { - "target": "com.amazonaws.bedrockagent#Version", - "traits": { - "smithy.api#documentation": "

The version of the agent for which to return information about knowledge bases associated with it.

", + "smithy.api#documentation": "

The unique identifier of the agent for which to create a DRAFT version.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } - }, - "maxResults": { - "target": "com.amazonaws.bedrockagent#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" - } - }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", - "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" - } } }, "traits": { "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#ListAgentKnowledgeBasesResponse": { + "com.amazonaws.bedrockagent#PrepareAgentResponse": { "type": "structure", "members": { - "agentKnowledgeBaseSummaries": { - "target": "com.amazonaws.bedrockagent#AgentKnowledgeBaseSummaries", + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent for which the DRAFT version was created.

", + "smithy.api#required": {} + } + }, + "agentStatus": { + "target": "com.amazonaws.bedrockagent#AgentStatus", + "traits": { + "smithy.api#documentation": "

The status of the DRAFT version and whether it is ready for use.

", + "smithy.api#required": {} + } + }, + "agentVersion": { + "target": "com.amazonaws.bedrockagent#Version", "traits": { - "smithy.api#documentation": "

A list of objects, each of which contains information about a knowledge base associated with the agent.

", + "smithy.api#documentation": "

The version of the agent.

", "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "preparedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + "smithy.api#documentation": "

The time at which the DRAFT version of the agent was last prepared.

", + "smithy.api#required": {} } } }, @@ -5418,24 +10213,30 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#ListAgentVersions": { + "com.amazonaws.bedrockagent#PrepareFlow": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#ListAgentVersionsRequest" + "target": "com.amazonaws.bedrockagent#PrepareFlowRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#ListAgentVersionsResponse" + "target": "com.amazonaws.bedrockagent#PrepareFlowResponse" }, "errors": [ { "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, { "target": "com.amazonaws.bedrockagent#InternalServerException" }, { "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" }, + { + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.bedrockagent#ThrottlingException" }, @@ -5444,66 +10245,48 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the versions of an agent and information about each version.

", + "smithy.api#documentation": "

Prepares the DRAFT version of a flow so that it can be invoked. For more information, see Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", "smithy.api#http": { - "code": 200, + "code": 202, "method": "POST", - "uri": "/agents/{agentId}/agentversions/" - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "agentVersionSummaries" + "uri": "/flows/{flowIdentifier}/" }, - "smithy.api#readonly": {}, "smithy.api#tags": [ "console" ] } }, - "com.amazonaws.bedrockagent#ListAgentVersionsRequest": { + "com.amazonaws.bedrockagent#PrepareFlowRequest": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent.

", + "smithy.api#documentation": "

The unique identifier of the flow.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } - }, - "maxResults": { - "target": "com.amazonaws.bedrockagent#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" - } - }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", - "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" - } } }, "traits": { "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#ListAgentVersionsResponse": { + "com.amazonaws.bedrockagent#PrepareFlowResponse": { "type": "structure", "members": { - "agentVersionSummaries": { - "target": "com.amazonaws.bedrockagent#AgentVersionSummaries", + "id": { + "target": "com.amazonaws.bedrockagent#FlowId", "traits": { - "smithy.api#documentation": "

A list of objects, each of which contains information about a version of the agent.

", + "smithy.api#documentation": "

The unique identifier of the flow.

", "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "status": { + "target": "com.amazonaws.bedrockagent#FlowStatus", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + "smithy.api#documentation": "

The status of the flow. When you submit this request, the status will be NotPrepared. If preparation succeeds, the status becomes Prepared. If it fails, the status becomes FAILED.

", + "smithy.api#required": {} } } }, @@ -5511,581 +10294,619 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#ListAgents": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#ListAgentsRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#ListAgentsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" + "com.amazonaws.bedrockagent#PromptArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10}(?::[0-9]{1,5})?)$" + } + }, + "com.amazonaws.bedrockagent#PromptConfiguration": { + "type": "structure", + "members": { + "promptType": { + "target": "com.amazonaws.bedrockagent#PromptType", + "traits": { + "smithy.api#documentation": "

The step in the agent sequence that this prompt configuration applies to.

" + } }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" + "promptCreationMode": { + "target": "com.amazonaws.bedrockagent#CreationMode", + "traits": { + "smithy.api#documentation": "

Specifies whether to override the default prompt template for this promptType. Set this value to OVERRIDDEN to use the prompt that you provide in the basePromptTemplate. If you leave it as DEFAULT, the agent uses a default prompt template.

" + } }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" + "promptState": { + "target": "com.amazonaws.bedrockagent#PromptState", + "traits": { + "smithy.api#documentation": "

Specifies whether to allow the agent to carry out the step specified in the promptType. If you set this value to DISABLED, the agent skips that step. The default state for each promptType is as follows.

\n
    \n
  • \n

    \n PRE_PROCESSING – ENABLED\n

    \n
  • \n
  • \n

    \n ORCHESTRATION – ENABLED\n

    \n
  • \n
  • \n

    \n KNOWLEDGE_BASE_RESPONSE_GENERATION – ENABLED\n

    \n
  • \n
  • \n

    \n POST_PROCESSING – DISABLED\n

    \n
  • \n
" + } }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

Lists the agents belonging to an account and information about each agent.

", - "smithy.api#http": { - "code": 200, - "method": "POST", - "uri": "/agents/" + "basePromptTemplate": { + "target": "com.amazonaws.bedrockagent#BasePromptTemplate", + "traits": { + "smithy.api#documentation": "

Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see Prompt template placeholder variables. For more information, see Configure the prompt templates.

" + } }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "agentSummaries" + "inferenceConfiguration": { + "target": "com.amazonaws.bedrockagent#InferenceConfiguration", + "traits": { + "smithy.api#documentation": "

Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the promptType. For more information, see Inference parameters for foundation models.

" + } }, - "smithy.api#readonly": {}, - "smithy.api#tags": [ - "console" - ] + "parserMode": { + "target": "com.amazonaws.bedrockagent#CreationMode", + "traits": { + "smithy.api#documentation": "

Specifies whether to override the default parser Lambda function when parsing the raw foundation model output in the part of the agent sequence defined by the promptType. If you set the field as OVERRIDEN, the overrideLambda field in the PromptOverrideConfiguration must be specified with the ARN of a Lambda function.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations to override a prompt template in one part of an agent sequence. For more information, see Advanced prompts.

" } }, - "com.amazonaws.bedrockagent#ListAgentsRequest": { + "com.amazonaws.bedrockagent#PromptConfigurations": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#PromptConfiguration" + }, + "traits": { + "smithy.api#length": { + "max": 10 + } + } + }, + "com.amazonaws.bedrockagent#PromptDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + } + } + }, + "com.amazonaws.bedrockagent#PromptFlowNodeConfiguration": { "type": "structure", "members": { - "maxResults": { - "target": "com.amazonaws.bedrockagent#MaxResults", + "sourceConfiguration": { + "target": "com.amazonaws.bedrockagent#PromptFlowNodeSourceConfiguration", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + "smithy.api#documentation": "

Specifies whether the prompt is from Prompt management or defined inline.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations for a prompt node in the flow. You can use a prompt from Prompt management or you can define one in this node. If the prompt contains variables, the inputs into this node will fill in the variables. The output from this node is the response generated by the model. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

" + } + }, + "com.amazonaws.bedrockagent#PromptFlowNodeInlineConfiguration": { + "type": "structure", + "members": { + "templateType": { + "target": "com.amazonaws.bedrockagent#PromptTemplateType", + "traits": { + "smithy.api#documentation": "

The type of prompt template.

", + "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "templateConfiguration": { + "target": "com.amazonaws.bedrockagent#PromptTemplateConfiguration", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + "smithy.api#documentation": "

Contains a prompt and variables in the prompt that can be replaced with values at runtime.

", + "smithy.api#required": {} + } + }, + "modelId": { + "target": "com.amazonaws.bedrockagent#PromptModelIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the model to run inference with.

", + "smithy.api#required": {} + } + }, + "inferenceConfiguration": { + "target": "com.amazonaws.bedrockagent#PromptInferenceConfiguration", + "traits": { + "smithy.api#documentation": "

Contains inference configurations for the prompt.

" } } }, "traits": { - "smithy.api#input": {} + "smithy.api#documentation": "

Contains configurations for a prompt defined inline in the node.

" } }, - "com.amazonaws.bedrockagent#ListAgentsResponse": { + "com.amazonaws.bedrockagent#PromptFlowNodeResourceConfiguration": { "type": "structure", "members": { - "agentSummaries": { - "target": "com.amazonaws.bedrockagent#AgentSummaries", + "promptArn": { + "target": "com.amazonaws.bedrockagent#PromptArn", "traits": { - "smithy.api#documentation": "

A list of objects, each of which contains information about an agent.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the prompt from Prompt management.

", "smithy.api#required": {} } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations for a prompt from Prompt management to use in a node.

" + } + }, + "com.amazonaws.bedrockagent#PromptFlowNodeSourceConfiguration": { + "type": "union", + "members": { + "resource": { + "target": "com.amazonaws.bedrockagent#PromptFlowNodeResourceConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configurations for a prompt from Prompt management.

" + } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "inline": { + "target": "com.amazonaws.bedrockagent#PromptFlowNodeInlineConfiguration", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + "smithy.api#documentation": "

Contains configurations for a prompt that is defined inline

" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

Contains configurations for a prompt and whether it is from Prompt management or defined inline.

" } }, - "com.amazonaws.bedrockagent#ListDataSources": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#ListDataSourcesRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#ListDataSourcesResponse" + "com.amazonaws.bedrockagent#PromptId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9a-zA-Z]{10}$" + } + }, + "com.amazonaws.bedrockagent#PromptIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9a-zA-Z]{10})|(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10})(?::[0-9]{1,5})?$" + } + }, + "com.amazonaws.bedrockagent#PromptInferenceConfiguration": { + "type": "union", + "members": { + "text": { + "target": "com.amazonaws.bedrockagent#PromptModelInferenceConfiguration", + "traits": { + "smithy.api#documentation": "

Contains inference configurations for a text prompt.

" + } + } }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" - }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" - }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" - }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" + "traits": { + "smithy.api#documentation": "

Contains inference configurations for the prompt.

" + } + }, + "com.amazonaws.bedrockagent#PromptInputVariable": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockagent#PromptInputVariableName", + "traits": { + "smithy.api#documentation": "

The name of the variable.

" + } } - ], + }, "traits": { - "smithy.api#documentation": "

Lists the data sources in a knowledge base and information about each one.

", - "smithy.api#http": { - "code": 200, - "method": "POST", - "uri": "/knowledgebases/{knowledgeBaseId}/datasources/" + "smithy.api#documentation": "

Contains information about a variable in the prompt.

" + } + }, + "com.amazonaws.bedrockagent#PromptInputVariableName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9a-zA-Z][_-]?){1,100}$" + } + }, + "com.amazonaws.bedrockagent#PromptInputVariablesList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#PromptInputVariable" + }, + "traits": { + "smithy.api#length": { + "max": 5 }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "dataSourceSummaries" + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagent#PromptModelIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 }, - "smithy.api#readonly": {}, - "smithy.api#tags": [ - "console" - ] + "smithy.api#pattern": "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" } }, - "com.amazonaws.bedrockagent#ListDataSourcesRequest": { + "com.amazonaws.bedrockagent#PromptModelInferenceConfiguration": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "temperature": { + "target": "com.amazonaws.bedrockagent#Temperature", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base for which to return a list of information.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#documentation": "

Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.

" } }, - "maxResults": { - "target": "com.amazonaws.bedrockagent#MaxResults", + "topP": { + "target": "com.amazonaws.bedrockagent#TopP", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + "smithy.api#documentation": "

The percentage of most-likely candidates that the model considers for the next token.

" } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "topK": { + "target": "com.amazonaws.bedrockagent#TopK", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + "smithy.api#documentation": "

The number of most-likely candidates that the model considers for the next token during generation.

" + } + }, + "maxTokens": { + "target": "com.amazonaws.bedrockagent#MaximumLength", + "traits": { + "smithy.api#documentation": "

The maximum number of tokens to return in the response.

" + } + }, + "stopSequences": { + "target": "com.amazonaws.bedrockagent#StopSequences", + "traits": { + "smithy.api#documentation": "

A list of strings that define sequences after which the model will stop generating.

" } } }, "traits": { - "smithy.api#input": {} + "smithy.api#documentation": "

Contains inference configurations related to model inference for a prompt. For more information, see Inference parameters.

" } }, - "com.amazonaws.bedrockagent#ListDataSourcesResponse": { + "com.amazonaws.bedrockagent#PromptName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9a-zA-Z][_-]?){1,100}$" + } + }, + "com.amazonaws.bedrockagent#PromptOverrideConfiguration": { "type": "structure", "members": { - "dataSourceSummaries": { - "target": "com.amazonaws.bedrockagent#DataSourceSummaries", + "promptConfigurations": { + "target": "com.amazonaws.bedrockagent#PromptConfigurations", "traits": { - "smithy.api#documentation": "

A list of objects, each of which contains information about a data source.

", + "smithy.api#documentation": "

Contains configurations to override a prompt template in one part of an agent sequence. For more information, see Advanced prompts.

", "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "overrideLambda": { + "target": "com.amazonaws.bedrockagent#LambdaArn", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + "smithy.api#documentation": "

The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see Parser Lambda function in Agents for Amazon Bedrock.

" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts.

", + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagent#ListIngestionJobs": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#ListIngestionJobsRequest" + "com.amazonaws.bedrockagent#PromptResource": { + "type": "resource", + "identifiers": { + "promptIdentifier": { + "target": "com.amazonaws.bedrockagent#PromptIdentifier" + } }, - "output": { - "target": "com.amazonaws.bedrockagent#ListIngestionJobsResponse" + "create": { + "target": "com.amazonaws.bedrockagent#CreatePrompt" }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" - }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" - }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" - }, + "read": { + "target": "com.amazonaws.bedrockagent#GetPrompt" + }, + "update": { + "target": "com.amazonaws.bedrockagent#UpdatePrompt" + }, + "delete": { + "target": "com.amazonaws.bedrockagent#DeletePrompt" + }, + "list": { + "target": "com.amazonaws.bedrockagent#ListPrompts" + }, + "operations": [ { - "target": "com.amazonaws.bedrockagent#ValidationException" + "target": "com.amazonaws.bedrockagent#CreatePromptVersion" } ], "traits": { - "smithy.api#documentation": "

Lists the ingestion jobs for a data source and information about each of them.

", - "smithy.api#http": { - "code": 200, - "method": "POST", - "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}/ingestionjobs/" - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "ingestionJobSummaries" + "aws.cloudformation#cfnResource": { + "name": "Prompt" + } + } + }, + "com.amazonaws.bedrockagent#PromptState": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } }, - "smithy.api#readonly": {}, - "smithy.api#tags": [ - "console" - ] + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } } }, - "com.amazonaws.bedrockagent#ListIngestionJobsRequest": { + "com.amazonaws.bedrockagent#PromptSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#PromptSummary" + }, + "traits": { + "smithy.api#length": { + "max": 10 + } + } + }, + "com.amazonaws.bedrockagent#PromptSummary": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "name": { + "target": "com.amazonaws.bedrockagent#PromptName", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base for which to return ingestion jobs.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The name of the prompt.

", "smithy.api#required": {} } }, - "dataSourceId": { - "target": "com.amazonaws.bedrockagent#Id", + "description": { + "target": "com.amazonaws.bedrockagent#PromptDescription", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source for which to return ingestion jobs.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The description of the prompt.

" + } + }, + "id": { + "target": "com.amazonaws.bedrockagent#PromptId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the prompt.

", "smithy.api#required": {} } }, - "filters": { - "target": "com.amazonaws.bedrockagent#IngestionJobFilters", + "arn": { + "target": "com.amazonaws.bedrockagent#PromptArn", "traits": { - "smithy.api#documentation": "

Contains a definition of a filter for which to filter the results.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the prompt.

", + "smithy.api#required": {} } }, - "sortBy": { - "target": "com.amazonaws.bedrockagent#IngestionJobSortBy", + "version": { + "target": "com.amazonaws.bedrockagent#Version", "traits": { - "smithy.api#documentation": "

Contains details about how to sort the results.

" + "smithy.api#documentation": "

The version of the prompt that this summary applies to.

", + "smithy.api#required": {} } }, - "maxResults": { - "target": "com.amazonaws.bedrockagent#MaxResults", + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + "smithy.api#documentation": "

The time at which the prompt was created.

", + "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + "smithy.api#documentation": "

The time at which the prompt was last updated.

", + "smithy.api#required": {} } } }, "traits": { - "smithy.api#input": {} + "smithy.api#documentation": "

Contains information about a prompt in your Prompt management tool.

\n

This data type is used in the following API operations:

\n " } }, - "com.amazonaws.bedrockagent#ListIngestionJobsResponse": { - "type": "structure", + "com.amazonaws.bedrockagent#PromptTemplateConfiguration": { + "type": "union", "members": { - "ingestionJobSummaries": { - "target": "com.amazonaws.bedrockagent#IngestionJobSummaries", - "traits": { - "smithy.api#documentation": "

A list of objects, each of which contains information about an ingestion job.

", - "smithy.api#required": {} - } - }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "text": { + "target": "com.amazonaws.bedrockagent#TextPromptTemplateConfiguration", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + "smithy.api#documentation": "

Contains configurations for the text in a message for a prompt.

" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

Contains the message for a prompt. For more information, see Prompt management in Amazon Bedrock.

" } }, - "com.amazonaws.bedrockagent#ListKnowledgeBases": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#ListKnowledgeBasesRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#ListKnowledgeBasesResponse" - }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" - }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" - }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" - }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" + "com.amazonaws.bedrockagent#PromptTemplateType": { + "type": "enum", + "members": { + "TEXT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TEXT" + } } - ], - "traits": { - "smithy.api#documentation": "

Lists the knowledge bases in an account and information about each of them.

", - "smithy.api#http": { - "code": 200, - "method": "POST", - "uri": "/knowledgebases/" - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "knowledgeBaseSummaries" - }, - "smithy.api#readonly": {}, - "smithy.api#tags": [ - "console" - ] } }, - "com.amazonaws.bedrockagent#ListKnowledgeBasesRequest": { - "type": "structure", + "com.amazonaws.bedrockagent#PromptType": { + "type": "enum", "members": { - "maxResults": { - "target": "com.amazonaws.bedrockagent#MaxResults", + "PRE_PROCESSING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRE_PROCESSING" + } + }, + "ORCHESTRATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ORCHESTRATION" + } + }, + "POST_PROCESSING": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.

" + "smithy.api#enumValue": "POST_PROCESSING" } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "KNOWLEDGE_BASE_RESPONSE_GENERATION": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

" + "smithy.api#enumValue": "KNOWLEDGE_BASE_RESPONSE_GENERATION" } } - }, - "traits": { - "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#ListKnowledgeBasesResponse": { + "com.amazonaws.bedrockagent#PromptVariant": { "type": "structure", "members": { - "knowledgeBaseSummaries": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseSummaries", + "name": { + "target": "com.amazonaws.bedrockagent#PromptVariantName", "traits": { - "smithy.api#documentation": "

A list of objects, each of which contains information about a knowledge base.

", + "smithy.api#documentation": "

The name of the prompt variant.

", "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.bedrockagent#NextToken", + "templateType": { + "target": "com.amazonaws.bedrockagent#PromptTemplateType", "traits": { - "smithy.api#documentation": "

If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results.

" + "smithy.api#documentation": "

The type of prompt template to use.

", + "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.bedrockagent#ListTagsForResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#ListTagsForResourceRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#ListTagsForResourceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" - }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" - }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" - }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

List all the tags for the resource you specify.

", - "smithy.api#http": { - "code": 200, - "method": "GET", - "uri": "/tags/{resourceArn}" + "templateConfiguration": { + "target": "com.amazonaws.bedrockagent#PromptTemplateConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configurations for the prompt template.

" + } }, - "smithy.api#readonly": {} - } - }, - "com.amazonaws.bedrockagent#ListTagsForResourceRequest": { - "type": "structure", - "members": { - "resourceArn": { - "target": "com.amazonaws.bedrockagent#TaggableResourcesArn", + "modelId": { + "target": "com.amazonaws.bedrockagent#PromptModelIdentifier", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource for which to list tags.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#documentation": "

The unique identifier of the model with which to run inference on the prompt.

" } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.bedrockagent#ListTagsForResourceResponse": { - "type": "structure", - "members": { - "tags": { - "target": "com.amazonaws.bedrockagent#TagsMap", + }, + "inferenceConfiguration": { + "target": "com.amazonaws.bedrockagent#PromptInferenceConfiguration", "traits": { - "smithy.api#documentation": "

The key-value pairs for the tags associated with the resource.

" + "smithy.api#documentation": "

Contains inference configurations for the prompt variant.

" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

Contains details about a variant of the prompt.

", + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagent#MaxResults": { - "type": "integer", + "com.amazonaws.bedrockagent#PromptVariantList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#PromptVariant" + }, "traits": { - "smithy.api#range": { - "min": 1, - "max": 1000 - } + "smithy.api#length": { + "max": 3 + }, + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagent#MaximumLength": { - "type": "integer", + "com.amazonaws.bedrockagent#PromptVariantName": { + "type": "string", "traits": { - "smithy.api#range": { - "min": 0, - "max": 4096 - } + "smithy.api#pattern": "^([0-9a-zA-Z][_-]?){1,100}$" } }, - "com.amazonaws.bedrockagent#ModelIdentifier": { + "com.amazonaws.bedrockagent#ProvisionedModelIdentifier": { "type": "string", "traits": { "smithy.api#length": { "min": 1, "max": 2048 }, - "smithy.api#pattern": "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)$" + "smithy.api#pattern": "^((([0-9a-zA-Z][_-]?){1,63})|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:provisioned-model/[a-z0-9]{12}))$" } }, - "com.amazonaws.bedrockagent#MongoDbAtlasCollectionName": { + "com.amazonaws.bedrockagent#RdsArn": { "type": "string", "traits": { - "smithy.api#length": { - "max": 63 - }, - "smithy.api#pattern": "^.*$" + "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):rds:[a-zA-Z0-9-]*:[0-9]{12}:cluster:[a-zA-Z0-9-]{1,63}$" } }, - "com.amazonaws.bedrockagent#MongoDbAtlasConfiguration": { + "com.amazonaws.bedrockagent#RdsConfiguration": { "type": "structure", "members": { - "endpoint": { - "target": "com.amazonaws.bedrockagent#MongoDbAtlasEndpoint", - "traits": { - "smithy.api#documentation": "

The endpoint URL of your MongoDB Atlas cluster for your knowledge base.

", - "smithy.api#required": {} - } - }, - "databaseName": { - "target": "com.amazonaws.bedrockagent#MongoDbAtlasDatabaseName", + "resourceArn": { + "target": "com.amazonaws.bedrockagent#RdsArn", "traits": { - "smithy.api#documentation": "

The database name in your MongoDB Atlas cluster for your knowledge base.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the vector store.

", "smithy.api#required": {} } }, - "collectionName": { - "target": "com.amazonaws.bedrockagent#MongoDbAtlasCollectionName", + "credentialsSecretArn": { + "target": "com.amazonaws.bedrockagent#SecretArn", "traits": { - "smithy.api#documentation": "

The collection name of the knowledge base in MongoDB Atlas.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret that you created in Secrets Manager that is linked to your Amazon RDS database.

", "smithy.api#required": {} } }, - "vectorIndexName": { - "target": "com.amazonaws.bedrockagent#MongoDbAtlasIndexName", + "databaseName": { + "target": "com.amazonaws.bedrockagent#RdsDatabaseName", "traits": { - "smithy.api#documentation": "

The name of the MongoDB Atlas vector search index.

", + "smithy.api#documentation": "

The name of your Amazon RDS database.

", "smithy.api#required": {} } }, - "credentialsSecretArn": { - "target": "com.amazonaws.bedrockagent#SecretArn", + "tableName": { + "target": "com.amazonaws.bedrockagent#RdsTableName", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret that you created in Secrets Manager that contains user credentials for your MongoDB Atlas cluster.

", + "smithy.api#documentation": "

The name of the table in the database.

", "smithy.api#required": {} } }, "fieldMapping": { - "target": "com.amazonaws.bedrockagent#MongoDbAtlasFieldMapping", + "target": "com.amazonaws.bedrockagent#RdsFieldMapping", "traits": { "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

", "smithy.api#required": {} } - }, - "endpointServiceName": { - "target": "com.amazonaws.bedrockagent#MongoDbAtlasEndpointServiceName", - "traits": { - "smithy.api#documentation": "

The name of the VPC endpoint service in your account that is connected to your MongoDB Atlas cluster.

" - } } }, "traits": { - "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in MongoDB Atlas.

" + "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in Amazon RDS. For more information, see Create a vector index in Amazon RDS.

" } }, - "com.amazonaws.bedrockagent#MongoDbAtlasDatabaseName": { + "com.amazonaws.bedrockagent#RdsDatabaseName": { "type": "string", "traits": { "smithy.api#length": { "max": 63 }, - "smithy.api#pattern": "^.*$" - } - }, - "com.amazonaws.bedrockagent#MongoDbAtlasEndpoint": { - "type": "string", - "traits": { - "smithy.api#length": { - "max": 2048 - }, - "smithy.api#pattern": "^.*$" - } - }, - "com.amazonaws.bedrockagent#MongoDbAtlasEndpointServiceName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 255 - }, - "smithy.api#pattern": "^(?:arn:aws(?:-us-gov|-cn|-iso|-iso-[a-z])*:.+:.*:\\d+:.+/.+$|[a-zA-Z0-9*]+[a-zA-Z0-9._-]*)$" + "smithy.api#pattern": "^[a-zA-Z0-9_\\-]+$" } }, - "com.amazonaws.bedrockagent#MongoDbAtlasFieldMapping": { + "com.amazonaws.bedrockagent#RdsFieldMapping": { "type": "structure", "members": { + "primaryKeyField": { + "target": "com.amazonaws.bedrockagent#ColumnName", + "traits": { + "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the ID for each entry.

", + "smithy.api#required": {} + } + }, "vectorField": { - "target": "com.amazonaws.bedrockagent#FieldName", + "target": "com.amazonaws.bedrockagent#ColumnName", "traits": { "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources.

", "smithy.api#required": {} } }, "textField": { - "target": "com.amazonaws.bedrockagent#FieldName", + "target": "com.amazonaws.bedrockagent#ColumnName", "traits": { "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.

", "smithy.api#required": {} } }, "metadataField": { - "target": "com.amazonaws.bedrockagent#FieldName", + "target": "com.amazonaws.bedrockagent#ColumnName", "traits": { "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores metadata about the vector store.

", "smithy.api#required": {} @@ -6096,71 +10917,60 @@ "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

" } }, - "com.amazonaws.bedrockagent#MongoDbAtlasIndexName": { + "com.amazonaws.bedrockagent#RdsTableName": { "type": "string", "traits": { "smithy.api#length": { - "max": 2048 + "max": 63 }, - "smithy.api#pattern": "^.*$" - } - }, - "com.amazonaws.bedrockagent#Name": { - "type": "string", - "traits": { - "smithy.api#pattern": "^([0-9a-zA-Z][_-]?){1,100}$" + "smithy.api#pattern": "^[a-zA-Z0-9_\\.\\-]+$" } }, - "com.amazonaws.bedrockagent#NextToken": { + "com.amazonaws.bedrockagent#RecommendedAction": { "type": "string", "traits": { "smithy.api#length": { - "min": 1, "max": 2048 - }, - "smithy.api#pattern": "^\\S*$" - } - }, - "com.amazonaws.bedrockagent#NonBlankString": { - "type": "string", - "traits": { - "smithy.api#pattern": "^[\\s\\S]+$" - } - }, - "com.amazonaws.bedrockagent#NumericalVersion": { - "type": "string", - "traits": { - "smithy.api#pattern": "^[0-9]{1,5}$" + } } }, - "com.amazonaws.bedrockagent#OpenSearchServerlessCollectionArn": { - "type": "string", + "com.amazonaws.bedrockagent#RecommendedActions": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#RecommendedAction" + }, "traits": { "smithy.api#length": { "max": 2048 - }, - "smithy.api#pattern": "^arn:aws:aoss:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:collection/[a-z0-9-]{3,32}$" + } } }, - "com.amazonaws.bedrockagent#OpenSearchServerlessConfiguration": { + "com.amazonaws.bedrockagent#RedisEnterpriseCloudConfiguration": { "type": "structure", "members": { - "collectionArn": { - "target": "com.amazonaws.bedrockagent#OpenSearchServerlessCollectionArn", + "endpoint": { + "target": "com.amazonaws.bedrockagent#RedisEnterpriseCloudEndpoint", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the OpenSearch Service vector store.

", + "smithy.api#documentation": "

The endpoint URL of the Redis Enterprise Cloud database.

", "smithy.api#required": {} } }, "vectorIndexName": { - "target": "com.amazonaws.bedrockagent#OpenSearchServerlessIndexName", + "target": "com.amazonaws.bedrockagent#RedisEnterpriseCloudIndexName", "traits": { - "smithy.api#documentation": "

The name of the vector store.

", + "smithy.api#documentation": "

The name of the vector index.

", + "smithy.api#required": {} + } + }, + "credentialsSecretArn": { + "target": "com.amazonaws.bedrockagent#SecretArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret that you created in Secrets Manager that is linked to your Redis Enterprise Cloud database.

", "smithy.api#required": {} } }, "fieldMapping": { - "target": "com.amazonaws.bedrockagent#OpenSearchServerlessFieldMapping", + "target": "com.amazonaws.bedrockagent#RedisEnterpriseCloudFieldMapping", "traits": { "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

", "smithy.api#required": {} @@ -6168,10 +10978,19 @@ } }, "traits": { - "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in Amazon OpenSearch Service. For more information, see Create a vector index in Amazon OpenSearch Service.

" + "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in Redis Enterprise Cloud. For more information, see Create a vector index in Redis Enterprise Cloud.

" } }, - "com.amazonaws.bedrockagent#OpenSearchServerlessFieldMapping": { + "com.amazonaws.bedrockagent#RedisEnterpriseCloudEndpoint": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^.*$" + } + }, + "com.amazonaws.bedrockagent#RedisEnterpriseCloudFieldMapping": { "type": "structure", "members": { "vectorField": { @@ -6200,7 +11019,7 @@ "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

" } }, - "com.amazonaws.bedrockagent#OpenSearchServerlessIndexName": { + "com.amazonaws.bedrockagent#RedisEnterpriseCloudIndexName": { "type": "string", "traits": { "smithy.api#length": { @@ -6209,697 +11028,906 @@ "smithy.api#pattern": "^.*$" } }, - "com.amazonaws.bedrockagent#ParameterDescription": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 500 + "com.amazonaws.bedrockagent#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.bedrockagent#NonBlankString" } + }, + "traits": { + "smithy.api#documentation": "

The specified resource Amazon Resource Name (ARN) was not found. Check the Amazon Resource Name (ARN) and try your request again.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 } }, - "com.amazonaws.bedrockagent#ParameterDetail": { + "com.amazonaws.bedrockagent#RetrievalFlowNodeConfiguration": { "type": "structure", "members": { - "description": { - "target": "com.amazonaws.bedrockagent#ParameterDescription", + "serviceConfiguration": { + "target": "com.amazonaws.bedrockagent#RetrievalFlowNodeServiceConfiguration", "traits": { - "smithy.api#documentation": "

A description of the parameter. Helps the foundation model determine how to elicit the parameters from the user.

" + "smithy.api#documentation": "

Contains configurations for the service to use for retrieving data to return as the output from the node.

", + "smithy.api#required": {} } - }, - "type": { - "target": "com.amazonaws.bedrockagent#Type", + } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations for a Retrieval node in a flow. This node retrieves data from the Amazon S3 location that you specify and returns it as the output.

" + } + }, + "com.amazonaws.bedrockagent#RetrievalFlowNodeS3Configuration": { + "type": "structure", + "members": { + "bucketName": { + "target": "com.amazonaws.bedrockagent#S3BucketName", "traits": { - "smithy.api#documentation": "

The data type of the parameter.

", + "smithy.api#documentation": "

The name of the Amazon S3 bucket from which to retrieve data.

", "smithy.api#required": {} } - }, - "required": { - "target": "smithy.api#Boolean", + } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations for the Amazon S3 location from which to retrieve data to return as the output from the node.

" + } + }, + "com.amazonaws.bedrockagent#RetrievalFlowNodeServiceConfiguration": { + "type": "union", + "members": { + "s3": { + "target": "com.amazonaws.bedrockagent#RetrievalFlowNodeS3Configuration", "traits": { - "smithy.api#documentation": "

Whether the parameter is required for the agent to complete the function for action group invocation.

" + "smithy.api#documentation": "

Contains configurations for the Amazon S3 location from which to retrieve data to return as the output from the node.

" } } }, "traits": { - "smithy.api#documentation": "

Contains details about a parameter in a function for an action group.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains configurations for the service to use for retrieving data to return as the output from the node.

" } }, - "com.amazonaws.bedrockagent#ParameterMap": { - "type": "map", - "key": { - "target": "com.amazonaws.bedrockagent#Name" - }, - "value": { - "target": "com.amazonaws.bedrockagent#ParameterDetail" + "com.amazonaws.bedrockagent#S3BucketArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):s3:::[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$" } }, - "com.amazonaws.bedrockagent#Payload": { + "com.amazonaws.bedrockagent#S3BucketName": { "type": "string", "traits": { - "smithy.api#sensitive": {} + "smithy.api#length": { + "min": 3, + "max": 63 + }, + "smithy.api#pattern": "^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$" } }, - "com.amazonaws.bedrockagent#PineconeConfiguration": { + "com.amazonaws.bedrockagent#S3BucketUri": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^s3://.{1,128}$" + } + }, + "com.amazonaws.bedrockagent#S3DataSourceConfiguration": { "type": "structure", "members": { - "connectionString": { - "target": "com.amazonaws.bedrockagent#PineconeConnectionString", + "bucketArn": { + "target": "com.amazonaws.bedrockagent#S3BucketArn", "traits": { - "smithy.api#documentation": "

The endpoint URL for your index management page.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the S3 bucket that contains your data.

", "smithy.api#required": {} } }, - "credentialsSecretArn": { - "target": "com.amazonaws.bedrockagent#SecretArn", + "inclusionPrefixes": { + "target": "com.amazonaws.bedrockagent#S3Prefixes", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret that you created in Secrets Manager that is linked to your Pinecone API key.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A list of S3 prefixes to include certain files or content. For more information, \n see Organizing objects using prefixes.

" } }, - "namespace": { - "target": "com.amazonaws.bedrockagent#PineconeNamespace", + "bucketOwnerAccountId": { + "target": "com.amazonaws.bedrockagent#BucketOwnerAccountId", "traits": { - "smithy.api#documentation": "

The namespace to be used to write new data to your database.

" + "smithy.api#documentation": "

The account ID for the owner of the S3 bucket.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration information to connect to Amazon S3 as your data source.

" + } + }, + "com.amazonaws.bedrockagent#S3Identifier": { + "type": "structure", + "members": { + "s3BucketName": { + "target": "com.amazonaws.bedrockagent#S3BucketName", + "traits": { + "smithy.api#documentation": "

The name of the S3 bucket.

" } }, - "fieldMapping": { - "target": "com.amazonaws.bedrockagent#PineconeFieldMapping", + "s3ObjectKey": { + "target": "com.amazonaws.bedrockagent#S3ObjectKey", "traits": { - "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

", + "smithy.api#documentation": "

The S3 object key for the S3 resource.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The identifier information for an Amazon S3 bucket.

" + } + }, + "com.amazonaws.bedrockagent#S3Location": { + "type": "structure", + "members": { + "uri": { + "target": "com.amazonaws.bedrockagent#S3BucketUri", + "traits": { + "smithy.api#documentation": "

The location's URI. For example, s3://my-bucket/chunk-processor/.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in Pinecone. For more information, see Create a vector index in Pinecone.

" + "smithy.api#documentation": "

An Amazon S3 location.

" } }, - "com.amazonaws.bedrockagent#PineconeConnectionString": { + "com.amazonaws.bedrockagent#S3ObjectKey": { "type": "string", "traits": { "smithy.api#length": { - "max": 2048 + "min": 1, + "max": 1024 }, - "smithy.api#pattern": "^.*$" + "smithy.api#pattern": "^[\\.\\-\\!\\*\\_\\'\\(\\)a-zA-Z0-9][\\.\\-\\!\\*\\_\\'\\(\\)\\/a-zA-Z0-9]*$" } }, - "com.amazonaws.bedrockagent#PineconeFieldMapping": { - "type": "structure", + "com.amazonaws.bedrockagent#S3Prefix": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 300 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagent#S3Prefixes": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#S3Prefix" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.bedrockagent#SalesforceAuthType": { + "type": "enum", "members": { - "textField": { - "target": "com.amazonaws.bedrockagent#FieldName", + "OAUTH2_CLIENT_CREDENTIALS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.

", - "smithy.api#required": {} + "smithy.api#enumValue": "OAUTH2_CLIENT_CREDENTIALS" } - }, - "metadataField": { - "target": "com.amazonaws.bedrockagent#FieldName", + } + } + }, + "com.amazonaws.bedrockagent#SalesforceCrawlerConfiguration": { + "type": "structure", + "members": { + "filterConfiguration": { + "target": "com.amazonaws.bedrockagent#CrawlFilterConfiguration", "traits": { - "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores metadata about the vector store.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The configuration of filtering the Salesforce content. For example, \n configuring regular expression patterns to include or exclude certain \n content.

" } } }, "traits": { - "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

" + "smithy.api#documentation": "

The configuration of the Salesforce content. For example, configuring \n specific types of Salesforce content.

" } }, - "com.amazonaws.bedrockagent#PineconeNamespace": { - "type": "string", - "traits": { - "smithy.api#length": { - "max": 2048 + "com.amazonaws.bedrockagent#SalesforceDataSourceConfiguration": { + "type": "structure", + "members": { + "sourceConfiguration": { + "target": "com.amazonaws.bedrockagent#SalesforceSourceConfiguration", + "traits": { + "smithy.api#documentation": "

The endpoint information to connect to your Salesforce data source.

", + "smithy.api#required": {} + } }, - "smithy.api#pattern": "^.*$" + "crawlerConfiguration": { + "target": "com.amazonaws.bedrockagent#SalesforceCrawlerConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the Salesforce content. For example, configuring \n specific types of Salesforce content.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration information to connect to Salesforce as your data source.

" } }, - "com.amazonaws.bedrockagent#PrepareAgent": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#PrepareAgentRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#PrepareAgentResponse" - }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" - }, - { - "target": "com.amazonaws.bedrockagent#ConflictException" - }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" - }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + "com.amazonaws.bedrockagent#SalesforceSourceConfiguration": { + "type": "structure", + "members": { + "hostUrl": { + "target": "com.amazonaws.bedrockagent#HttpsUrl", + "traits": { + "smithy.api#documentation": "

The Salesforce host URL or instance URL.

", + "smithy.api#required": {} + } }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" + "authType": { + "target": "com.amazonaws.bedrockagent#SalesforceAuthType", + "traits": { + "smithy.api#documentation": "

The supported authentication type to authenticate and connect to your \n Salesforce instance.

", + "smithy.api#required": {} + } }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" + "credentialsSecretArn": { + "target": "com.amazonaws.bedrockagent#SecretArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name of an Secrets Manager secret that \n stores your authentication credentials for your SharePoint site/sites. \n For more information on the key-value pairs that must be included in \n your secret, depending on your authentication type, see \n Salesforce connection configuration.

", + "smithy.api#required": {} + } } - ], - "traits": { - "smithy.api#documentation": "

Creates a DRAFT version of the agent that can be used for internal testing.

", - "smithy.api#http": { - "code": 202, - "method": "POST", - "uri": "/agents/{agentId}/" - }, - "smithy.api#tags": [ - "console" - ] + }, + "traits": { + "smithy.api#documentation": "

The endpoint information to connect to your Salesforce data source.

" } }, - "com.amazonaws.bedrockagent#PrepareAgentRequest": { + "com.amazonaws.bedrockagent#SecretArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):secretsmanager:[a-z0-9-]{1,20}:([0-9]{12}|):secret:[a-zA-Z0-9!/_+=.@-]{1,512}$" + } + }, + "com.amazonaws.bedrockagent#SeedUrl": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "url": { + "target": "com.amazonaws.bedrockagent#Url", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent for which to create a DRAFT version.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#documentation": "

A seed or starting point URL.

" } } }, "traits": { - "smithy.api#input": {} + "smithy.api#documentation": "

The seed or starting point URL. \n You should be authorized to crawl the URL.

" } }, - "com.amazonaws.bedrockagent#PrepareAgentResponse": { + "com.amazonaws.bedrockagent#SeedUrls": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#SeedUrl" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.bedrockagent#SemanticChunkingConfiguration": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", - "traits": { - "smithy.api#documentation": "

The unique identifier of the agent for which the DRAFT version was created.

", - "smithy.api#required": {} - } - }, - "agentStatus": { - "target": "com.amazonaws.bedrockagent#AgentStatus", + "maxTokens": { + "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The status of the DRAFT version and whether it is ready for use.

", + "smithy.api#documentation": "

The maximum number of tokens that a chunk can contain.

", + "smithy.api#range": { + "min": 1 + }, "smithy.api#required": {} } }, - "agentVersion": { - "target": "com.amazonaws.bedrockagent#Version", + "bufferSize": { + "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The version of the agent.

", + "smithy.api#documentation": "

The buffer size.

", + "smithy.api#range": { + "min": 0, + "max": 1 + }, "smithy.api#required": {} } }, - "preparedAt": { - "target": "com.amazonaws.bedrockagent#DateTimestamp", + "breakpointPercentileThreshold": { + "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The time at which the DRAFT version of the agent was last prepared.

", + "smithy.api#documentation": "

The dissimilarity threshold for splitting chunks.

", + "smithy.api#range": { + "min": 50, + "max": 99 + }, "smithy.api#required": {} } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

Settings for semantic document chunking for a data source. Semantic chunking splits\n a document into into smaller documents based on groups of similar content derived from the text\n with natural language processing.

\n

With semantic chunking, each sentence is compared to the next to determine how similar they are.\n You specify a threshold in the form of a percentile, where adjacent sentences that are less similar than\n that percentage of sentence pairs are divided into separate chunks. For example, if you set the threshold to \n 90, then the 10 percent of sentence pairs that are least similar are split. So if you have 101 sentences,\n 100 sentence pairs are compared, and the 10 with the least similarity are split, creating 11 chunks. These\n chunks are further split if they exceed the max token size.

\n

You must also specify a buffer size, which determines whether sentences are compared in isolation, or\n within a moving context window that includes the previous and following sentence. For example, if you set\n the buffer size to 1, the embedding for sentence 10 is derived from sentences 9, 10, and 11\n combined.

" } }, - "com.amazonaws.bedrockagent#PromptConfiguration": { + "com.amazonaws.bedrockagent#ServerSideEncryptionConfiguration": { "type": "structure", "members": { - "promptType": { - "target": "com.amazonaws.bedrockagent#PromptType", - "traits": { - "smithy.api#documentation": "

The step in the agent sequence that this prompt configuration applies to.

" - } - }, - "promptCreationMode": { - "target": "com.amazonaws.bedrockagent#CreationMode", + "kmsKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", "traits": { - "smithy.api#documentation": "

Specifies whether to override the default prompt template for this promptType. Set this value to OVERRIDDEN to use the prompt that you provide in the basePromptTemplate. If you leave it as DEFAULT, the agent uses a default prompt template.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key used to encrypt the resource.

" } - }, - "promptState": { - "target": "com.amazonaws.bedrockagent#PromptState", + } + }, + "traits": { + "smithy.api#documentation": "

Contains the configuration for server-side encryption.

" + } + }, + "com.amazonaws.bedrockagent#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.bedrockagent#NonBlankString" + } + }, + "traits": { + "smithy.api#documentation": "

The number of requests exceeds the service quota. Resubmit your request later.

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.bedrockagent#SessionTTL": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 60, + "max": 3600 + } + } + }, + "com.amazonaws.bedrockagent#SharePointAuthType": { + "type": "enum", + "members": { + "OAUTH2_CLIENT_CREDENTIALS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

Specifies whether to allow the agent to carry out the step specified in the promptType. If you set this value to DISABLED, the agent skips that step. The default state for each promptType is as follows.

\n
    \n
  • \n

    \n PRE_PROCESSING – ENABLED\n

    \n
  • \n
  • \n

    \n ORCHESTRATION – ENABLED\n

    \n
  • \n
  • \n

    \n KNOWLEDGE_BASE_RESPONSE_GENERATION – ENABLED\n

    \n
  • \n
  • \n

    \n POST_PROCESSING – DISABLED\n

    \n
  • \n
" + "smithy.api#enumValue": "OAUTH2_CLIENT_CREDENTIALS" } - }, - "basePromptTemplate": { - "target": "com.amazonaws.bedrockagent#BasePromptTemplate", + } + } + }, + "com.amazonaws.bedrockagent#SharePointCrawlerConfiguration": { + "type": "structure", + "members": { + "filterConfiguration": { + "target": "com.amazonaws.bedrockagent#CrawlFilterConfiguration", "traits": { - "smithy.api#documentation": "

Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see Prompt template placeholder variables. For more information, see Configure the prompt templates.

" + "smithy.api#documentation": "

The configuration of filtering the SharePoint content. For example, \n configuring regular expression patterns to include or exclude certain content.

" } - }, - "inferenceConfiguration": { - "target": "com.amazonaws.bedrockagent#InferenceConfiguration", + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of the SharePoint content. For example, configuring \n specific types of SharePoint content.

" + } + }, + "com.amazonaws.bedrockagent#SharePointDataSourceConfiguration": { + "type": "structure", + "members": { + "sourceConfiguration": { + "target": "com.amazonaws.bedrockagent#SharePointSourceConfiguration", "traits": { - "smithy.api#documentation": "

Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the promptType. For more information, see Inference parameters for foundation models.

" + "smithy.api#documentation": "

The endpoint information to connect to your SharePoint data source.

", + "smithy.api#required": {} } }, - "parserMode": { - "target": "com.amazonaws.bedrockagent#CreationMode", + "crawlerConfiguration": { + "target": "com.amazonaws.bedrockagent#SharePointCrawlerConfiguration", "traits": { - "smithy.api#documentation": "

Specifies whether to override the default parser Lambda function when parsing the raw foundation model output in the part of the agent sequence defined by the promptType. If you set the field as OVERRIDEN, the overrideLambda field in the PromptOverrideConfiguration must be specified with the ARN of a Lambda function.

" + "smithy.api#documentation": "

The configuration of the SharePoint content. For example, configuring \n specific types of SharePoint content.

" } } }, "traits": { - "smithy.api#documentation": "

Contains configurations to override a prompt template in one part of an agent sequence. For more information, see Advanced prompts.

" + "smithy.api#documentation": "

The configuration information to connect to SharePoint as your data source.

" } }, - "com.amazonaws.bedrockagent#PromptConfigurations": { + "com.amazonaws.bedrockagent#SharePointDomain": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.bedrockagent#SharePointHostType": { + "type": "enum", + "members": { + "ONLINE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ONLINE" + } + } + } + }, + "com.amazonaws.bedrockagent#SharePointSiteUrls": { "type": "list", "member": { - "target": "com.amazonaws.bedrockagent#PromptConfiguration" + "target": "com.amazonaws.bedrockagent#HttpsUrl" }, "traits": { "smithy.api#length": { - "max": 10 + "min": 1, + "max": 100 } } }, - "com.amazonaws.bedrockagent#PromptOverrideConfiguration": { + "com.amazonaws.bedrockagent#SharePointSourceConfiguration": { "type": "structure", "members": { - "promptConfigurations": { - "target": "com.amazonaws.bedrockagent#PromptConfigurations", + "tenantId": { + "target": "com.amazonaws.bedrockagent#Microsoft365TenantId", "traits": { - "smithy.api#documentation": "

Contains configurations to override a prompt template in one part of an agent sequence. For more information, see Advanced prompts.

", + "smithy.api#documentation": "

The identifier of your Microsoft 365 tenant.

" + } + }, + "domain": { + "target": "com.amazonaws.bedrockagent#SharePointDomain", + "traits": { + "smithy.api#documentation": "

The domain of your SharePoint instance or site URL/URLs.

", "smithy.api#required": {} } }, - "overrideLambda": { - "target": "com.amazonaws.bedrockagent#LambdaArn", + "siteUrls": { + "target": "com.amazonaws.bedrockagent#SharePointSiteUrls", "traits": { - "smithy.api#documentation": "

The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see Parser Lambda function in Agents for Amazon Bedrock.

" + "smithy.api#documentation": "

A list of one or more SharePoint site URLs.

", + "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#documentation": "

Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts.

", - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.bedrockagent#PromptState": { - "type": "enum", - "members": { - "ENABLED": { - "target": "smithy.api#Unit", + }, + "hostType": { + "target": "com.amazonaws.bedrockagent#SharePointHostType", "traits": { - "smithy.api#enumValue": "ENABLED" + "smithy.api#documentation": "

The supported host type, whether online/cloud or server/on-premises.

", + "smithy.api#required": {} } }, - "DISABLED": { - "target": "smithy.api#Unit", + "authType": { + "target": "com.amazonaws.bedrockagent#SharePointAuthType", "traits": { - "smithy.api#enumValue": "DISABLED" + "smithy.api#documentation": "

The supported authentication type to authenticate and connect \n to your SharePoint site/sites.

", + "smithy.api#required": {} + } + }, + "credentialsSecretArn": { + "target": "com.amazonaws.bedrockagent#SecretArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name of an Secrets Manager secret that \n stores your authentication credentials for your SharePoint site/sites. \n For more information on the key-value pairs that must be included in \n your secret, depending on your authentication type, see \n SharePoint connection configuration.

", + "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#documentation": "

The endpoint information to connect to your SharePoint data source.

" } }, - "com.amazonaws.bedrockagent#PromptType": { + "com.amazonaws.bedrockagent#SortOrder": { "type": "enum", "members": { - "PRE_PROCESSING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PRE_PROCESSING" - } - }, - "ORCHESTRATION": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ORCHESTRATION" - } - }, - "POST_PROCESSING": { + "ASCENDING": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "POST_PROCESSING" + "smithy.api#enumValue": "ASCENDING" } }, - "KNOWLEDGE_BASE_RESPONSE_GENERATION": { + "DESCENDING": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "KNOWLEDGE_BASE_RESPONSE_GENERATION" + "smithy.api#enumValue": "DESCENDING" } } } }, - "com.amazonaws.bedrockagent#ProvisionedModelIdentifier": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 2048 + "com.amazonaws.bedrockagent#StartIngestionJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#StartIngestionJobRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#StartIngestionJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, - "smithy.api#pattern": "^((([0-9a-zA-Z][_-]?){1,63})|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:provisioned-model/[a-z0-9]{12}))$" - } - }, - "com.amazonaws.bedrockagent#RdsArn": { - "type": "string", + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], "traits": { - "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):rds:[a-zA-Z0-9-]*:[0-9]{12}:cluster:[a-zA-Z0-9-]{1,63}$" + "smithy.api#documentation": "

Begins an ingestion job, in which a data source is added to a knowledge base.

", + "smithy.api#http": { + "code": 202, + "method": "PUT", + "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}/ingestionjobs/" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "console" + ] } }, - "com.amazonaws.bedrockagent#RdsConfiguration": { + "com.amazonaws.bedrockagent#StartIngestionJobRequest": { "type": "structure", "members": { - "resourceArn": { - "target": "com.amazonaws.bedrockagent#RdsArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the vector store.

", - "smithy.api#required": {} - } - }, - "credentialsSecretArn": { - "target": "com.amazonaws.bedrockagent#SecretArn", + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret that you created in Secrets Manager that is linked to your Amazon RDS database.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base to which to add the data source.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "databaseName": { - "target": "com.amazonaws.bedrockagent#RdsDatabaseName", + "dataSourceId": { + "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The name of your Amazon RDS database.

", + "smithy.api#documentation": "

The unique identifier of the data source to ingest.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "tableName": { - "target": "com.amazonaws.bedrockagent#RdsTableName", + "clientToken": { + "target": "com.amazonaws.bedrockagent#ClientToken", "traits": { - "smithy.api#documentation": "

The name of the table in the database.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "smithy.api#idempotencyToken": {} } }, - "fieldMapping": { - "target": "com.amazonaws.bedrockagent#RdsFieldMapping", + "description": { + "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A description of the ingestion job.

" } } }, "traits": { - "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in Amazon RDS. For more information, see Create a vector index in Amazon RDS.

" - } - }, - "com.amazonaws.bedrockagent#RdsDatabaseName": { - "type": "string", - "traits": { - "smithy.api#length": { - "max": 63 - }, - "smithy.api#pattern": "^[a-zA-Z0-9_\\-]+$" + "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#RdsFieldMapping": { + "com.amazonaws.bedrockagent#StartIngestionJobResponse": { "type": "structure", "members": { - "primaryKeyField": { - "target": "com.amazonaws.bedrockagent#ColumnName", - "traits": { - "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the ID for each entry.

", - "smithy.api#required": {} - } - }, - "vectorField": { - "target": "com.amazonaws.bedrockagent#ColumnName", - "traits": { - "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources.

", - "smithy.api#required": {} - } - }, - "textField": { - "target": "com.amazonaws.bedrockagent#ColumnName", - "traits": { - "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.

", - "smithy.api#required": {} - } - }, - "metadataField": { - "target": "com.amazonaws.bedrockagent#ColumnName", + "ingestionJob": { + "target": "com.amazonaws.bedrockagent#IngestionJob", "traits": { - "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores metadata about the vector store.

", + "smithy.api#documentation": "

An object containing information about the ingestion job.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

" - } - }, - "com.amazonaws.bedrockagent#RdsTableName": { - "type": "string", - "traits": { - "smithy.api#length": { - "max": 63 - }, - "smithy.api#pattern": "^[a-zA-Z0-9_\\.\\-]+$" + "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#RecommendedAction": { - "type": "string", - "traits": { - "smithy.api#length": { - "max": 2048 + "com.amazonaws.bedrockagent#StepType": { + "type": "enum", + "members": { + "POST_CHUNKING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "POST_CHUNKING" + } } } }, - "com.amazonaws.bedrockagent#RecommendedActions": { + "com.amazonaws.bedrockagent#StopSequences": { "type": "list", "member": { - "target": "com.amazonaws.bedrockagent#RecommendedAction" + "target": "smithy.api#String" }, "traits": { "smithy.api#length": { - "max": 2048 + "min": 0, + "max": 4 } } }, - "com.amazonaws.bedrockagent#RedisEnterpriseCloudConfiguration": { + "com.amazonaws.bedrockagent#StorageConfiguration": { "type": "structure", "members": { - "endpoint": { - "target": "com.amazonaws.bedrockagent#RedisEnterpriseCloudEndpoint", + "type": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseStorageType", "traits": { - "smithy.api#documentation": "

The endpoint URL of the Redis Enterprise Cloud database.

", + "smithy.api#documentation": "

The vector store service in which the knowledge base is stored.

", "smithy.api#required": {} } }, - "vectorIndexName": { - "target": "com.amazonaws.bedrockagent#RedisEnterpriseCloudIndexName", + "opensearchServerlessConfiguration": { + "target": "com.amazonaws.bedrockagent#OpenSearchServerlessConfiguration", "traits": { - "smithy.api#documentation": "

The name of the vector index.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Contains the storage configuration of the knowledge base in Amazon OpenSearch Service.

" } }, - "credentialsSecretArn": { - "target": "com.amazonaws.bedrockagent#SecretArn", + "pineconeConfiguration": { + "target": "com.amazonaws.bedrockagent#PineconeConfiguration", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret that you created in Secrets Manager that is linked to your Redis Enterprise Cloud database.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Contains the storage configuration of the knowledge base in Pinecone.

" } }, - "fieldMapping": { - "target": "com.amazonaws.bedrockagent#RedisEnterpriseCloudFieldMapping", + "redisEnterpriseCloudConfiguration": { + "target": "com.amazonaws.bedrockagent#RedisEnterpriseCloudConfiguration", "traits": { - "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Contains the storage configuration of the knowledge base in Redis Enterprise Cloud.

" + } + }, + "rdsConfiguration": { + "target": "com.amazonaws.bedrockagent#RdsConfiguration", + "traits": { + "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in Amazon RDS. For more information, see Create a vector index in Amazon RDS.

" + } + }, + "mongoDbAtlasConfiguration": { + "target": "com.amazonaws.bedrockagent#MongoDbAtlasConfiguration", + "traits": { + "smithy.api#documentation": "

Contains the storage configuration of the knowledge base in MongoDB Atlas.

" } } }, "traits": { - "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in Redis Enterprise Cloud. For more information, see Create a vector index in Redis Enterprise Cloud.

" + "smithy.api#documentation": "

Contains the storage configuration of the knowledge base.

" } }, - "com.amazonaws.bedrockagent#RedisEnterpriseCloudEndpoint": { - "type": "string", + "com.amazonaws.bedrockagent#StorageDays": { + "type": "integer", "traits": { - "smithy.api#length": { - "max": 2048 - }, - "smithy.api#pattern": "^.*$" + "smithy.api#default": 30, + "smithy.api#range": { + "min": 0, + "max": 30 + } } }, - "com.amazonaws.bedrockagent#RedisEnterpriseCloudFieldMapping": { + "com.amazonaws.bedrockagent#StorageFlowNodeConfiguration": { "type": "structure", "members": { - "vectorField": { - "target": "com.amazonaws.bedrockagent#FieldName", - "traits": { - "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources.

", - "smithy.api#required": {} - } - }, - "textField": { - "target": "com.amazonaws.bedrockagent#FieldName", - "traits": { - "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.

", - "smithy.api#required": {} - } - }, - "metadataField": { - "target": "com.amazonaws.bedrockagent#FieldName", + "serviceConfiguration": { + "target": "com.amazonaws.bedrockagent#StorageFlowNodeServiceConfiguration", "traits": { - "smithy.api#documentation": "

The name of the field in which Amazon Bedrock stores metadata about the vector store.

", + "smithy.api#documentation": "

Contains configurations for the service to use for storing the input into the node.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Contains the names of the fields to which to map information about the vector store.

" + "smithy.api#documentation": "

Contains configurations for a Storage node in a flow. This node stores the input in an Amazon S3 location that you specify.

" } }, - "com.amazonaws.bedrockagent#RedisEnterpriseCloudIndexName": { - "type": "string", + "com.amazonaws.bedrockagent#StorageFlowNodeS3Configuration": { + "type": "structure", + "members": { + "bucketName": { + "target": "com.amazonaws.bedrockagent#S3BucketName", + "traits": { + "smithy.api#documentation": "

The name of the Amazon S3 bucket in which to store the input into the node.

", + "smithy.api#required": {} + } + } + }, "traits": { - "smithy.api#length": { - "max": 2048 - }, - "smithy.api#pattern": "^.*$" + "smithy.api#documentation": "

Contains configurations for the Amazon S3 location in which to store the input into the node.

" } }, - "com.amazonaws.bedrockagent#ResourceNotFoundException": { - "type": "structure", + "com.amazonaws.bedrockagent#StorageFlowNodeServiceConfiguration": { + "type": "union", "members": { - "message": { - "target": "com.amazonaws.bedrockagent#NonBlankString" + "s3": { + "target": "com.amazonaws.bedrockagent#StorageFlowNodeS3Configuration", + "traits": { + "smithy.api#documentation": "

Contains configurations for the Amazon S3 location in which to store the input into the node.

" + } } }, "traits": { - "smithy.api#documentation": "

The specified resource Amazon Resource Name (ARN) was not found. Check the Amazon Resource Name (ARN) and try your request again.

", - "smithy.api#error": "client", - "smithy.api#httpError": 404 + "smithy.api#documentation": "

Contains configurations for the service to use for storing the input into the node.

" } }, - "com.amazonaws.bedrockagent#S3BucketArn": { + "com.amazonaws.bedrockagent#TagKey": { "type": "string", "traits": { "smithy.api#length": { "min": 1, - "max": 2048 + "max": 128 }, - "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):s3:::[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$" + "smithy.api#pattern": "^[a-zA-Z0-9\\s._:/=+@-]*$" } }, - "com.amazonaws.bedrockagent#S3BucketName": { - "type": "string", + "com.amazonaws.bedrockagent#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagent#TagKey" + }, "traits": { "smithy.api#length": { - "min": 3, - "max": 63 + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.bedrockagent#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, - "smithy.api#pattern": "^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$" + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/tags/{resourceArn}" + } } }, - "com.amazonaws.bedrockagent#S3DataSourceConfiguration": { + "com.amazonaws.bedrockagent#TagResourceRequest": { "type": "structure", "members": { - "bucketArn": { - "target": "com.amazonaws.bedrockagent#S3BucketArn", + "resourceArn": { + "target": "com.amazonaws.bedrockagent#TaggableResourcesArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the bucket that contains the data source.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to tag.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "inclusionPrefixes": { - "target": "com.amazonaws.bedrockagent#S3Prefixes", - "traits": { - "smithy.api#documentation": "

A list of S3 prefixes that define the object containing the data sources. For more information, see Organizing objects using prefixes.

" - } - }, - "bucketOwnerAccountId": { - "target": "com.amazonaws.bedrockagent#BucketOwnerAccountId", + "tags": { + "target": "com.amazonaws.bedrockagent#TagsMap", "traits": { - "smithy.api#documentation": "

The bucket account owner ID for the S3 bucket.

" + "smithy.api#documentation": "

An object containing key-value pairs that define the tags to attach to the resource.

", + "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Contains information about the S3 configuration of the data source.

" + "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#S3Identifier": { + "com.amazonaws.bedrockagent#TagResourceResponse": { "type": "structure", - "members": { - "s3BucketName": { - "target": "com.amazonaws.bedrockagent#S3BucketName", - "traits": { - "smithy.api#documentation": "

The name of the S3 bucket.

" - } - }, - "s3ObjectKey": { - "target": "com.amazonaws.bedrockagent#S3ObjectKey", - "traits": { - "smithy.api#documentation": "

The S3 object key containing the resource.

" - } - } - }, + "members": {}, "traits": { - "smithy.api#documentation": "

Contains information about the S3 object containing the resource.

" + "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#S3ObjectKey": { + "com.amazonaws.bedrockagent#TagValue": { "type": "string", "traits": { "smithy.api#length": { - "min": 1, - "max": 1024 + "min": 0, + "max": 256 }, - "smithy.api#pattern": "^[\\.\\-\\!\\*\\_\\'\\(\\)a-zA-Z0-9][\\.\\-\\!\\*\\_\\'\\(\\)\\/a-zA-Z0-9]*$" + "smithy.api#pattern": "^[a-zA-Z0-9\\s._:/=+@-]*$" } }, - "com.amazonaws.bedrockagent#S3Prefix": { + "com.amazonaws.bedrockagent#TaggableResourcesArn": { "type": "string", "traits": { "smithy.api#length": { - "min": 1, - "max": 300 - } + "min": 20, + "max": 1011 + }, + "smithy.api#pattern": "(^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:(agent|agent-alias|knowledge-base|flow|prompt)/[A-Z0-9]{10}(?:/[A-Z0-9]{10})?$|^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:flow/([A-Z0-9]{10})/alias/([A-Z0-9]{10})$|^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:prompt/([A-Z0-9]{10})?(?::/d+)?$)" } }, - "com.amazonaws.bedrockagent#S3Prefixes": { - "type": "list", - "member": { - "target": "com.amazonaws.bedrockagent#S3Prefix" + "com.amazonaws.bedrockagent#TaggingResource": { + "type": "resource", + "operations": [ + { + "target": "com.amazonaws.bedrockagent#ListTagsForResource" + }, + { + "target": "com.amazonaws.bedrockagent#TagResource" + }, + { + "target": "com.amazonaws.bedrockagent#UntagResource" + } + ] + }, + "com.amazonaws.bedrockagent#TagsMap": { + "type": "map", + "key": { + "target": "com.amazonaws.bedrockagent#TagKey" }, + "value": { + "target": "com.amazonaws.bedrockagent#TagValue" + } + }, + "com.amazonaws.bedrockagent#Temperature": { + "type": "float", "traits": { - "smithy.api#length": { - "min": 1, + "smithy.api#range": { + "min": 0, "max": 1 } } }, - "com.amazonaws.bedrockagent#SecretArn": { + "com.amazonaws.bedrockagent#TextPrompt": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):secretsmanager:[a-z0-9-]{1,20}:([0-9]{12}|):secret:[a-zA-Z0-9!/_+=.@-]{1,512}$" + "smithy.api#length": { + "min": 1, + "max": 200000 + }, + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagent#ServerSideEncryptionConfiguration": { + "com.amazonaws.bedrockagent#TextPromptTemplateConfiguration": { "type": "structure", "members": { - "kmsKeyArn": { - "target": "com.amazonaws.bedrockagent#KmsKeyArn", + "text": { + "target": "com.amazonaws.bedrockagent#TextPrompt", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key used to encrypt the resource.

" + "smithy.api#documentation": "

The message for the prompt.

", + "smithy.api#required": {} + } + }, + "inputVariables": { + "target": "com.amazonaws.bedrockagent#PromptInputVariablesList", + "traits": { + "smithy.api#documentation": "

An array of the variables in the prompt template.

" } } }, "traits": { - "smithy.api#documentation": "

Contains the configuration for server-side encryption.

" + "smithy.api#documentation": "

Contains configurations for a text prompt template. To include a variable, enclose a word in double curly braces as in {{variable}}.

", + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagent#ServiceQuotaExceededException": { + "com.amazonaws.bedrockagent#ThrottlingException": { "type": "structure", "members": { "message": { @@ -6907,219 +11935,135 @@ } }, "traits": { - "smithy.api#documentation": "

The number of requests exceeds the service quota. Resubmit your request later.

", + "smithy.api#documentation": "

The number of requests exceeds the limit. Resubmit your request later.

", "smithy.api#error": "client", - "smithy.api#httpError": 402 + "smithy.api#httpError": 429 } }, - "com.amazonaws.bedrockagent#SessionTTL": { + "com.amazonaws.bedrockagent#TopK": { "type": "integer", "traits": { "smithy.api#range": { - "min": 60, - "max": 3600 + "min": 0, + "max": 500 } } }, - "com.amazonaws.bedrockagent#SortOrder": { - "type": "enum", + "com.amazonaws.bedrockagent#TopP": { + "type": "float", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 1 + } + } + }, + "com.amazonaws.bedrockagent#Transformation": { + "type": "structure", "members": { - "ASCENDING": { - "target": "smithy.api#Unit", + "transformationFunction": { + "target": "com.amazonaws.bedrockagent#TransformationFunction", "traits": { - "smithy.api#enumValue": "ASCENDING" + "smithy.api#documentation": "

A Lambda function that processes documents.

", + "smithy.api#required": {} } }, - "DESCENDING": { - "target": "smithy.api#Unit", + "stepToApply": { + "target": "com.amazonaws.bedrockagent#StepType", "traits": { - "smithy.api#enumValue": "DESCENDING" + "smithy.api#documentation": "

When the service applies the transformation.

", + "smithy.api#required": {} } } - } - }, - "com.amazonaws.bedrockagent#StartIngestionJob": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#StartIngestionJobRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#StartIngestionJobResponse" }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" - }, - { - "target": "com.amazonaws.bedrockagent#ConflictException" - }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" - }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" - }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" - }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" - } - ], "traits": { - "smithy.api#documentation": "

Begins an ingestion job, in which a data source is added to a knowledge base.

", - "smithy.api#http": { - "code": 202, - "method": "PUT", - "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}/ingestionjobs/" - }, - "smithy.api#idempotent": {}, - "smithy.api#tags": [ - "console" - ] + "smithy.api#documentation": "

A custom processing step for documents moving through a data source ingestion pipeline. To\n process documents after they have been converted into chunks, set the step to apply to\n POST_CHUNKING.

" } }, - "com.amazonaws.bedrockagent#StartIngestionJobRequest": { + "com.amazonaws.bedrockagent#TransformationFunction": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", - "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to which to add the data source.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "dataSourceId": { - "target": "com.amazonaws.bedrockagent#Id", + "transformationLambdaConfiguration": { + "target": "com.amazonaws.bedrockagent#TransformationLambdaConfiguration", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source to ingest.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The Lambda function.

", "smithy.api#required": {} } - }, - "clientToken": { - "target": "com.amazonaws.bedrockagent#ClientToken", - "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", - "smithy.api#idempotencyToken": {} - } - }, - "description": { - "target": "com.amazonaws.bedrockagent#Description", - "traits": { - "smithy.api#documentation": "

A description of the ingestion job.

" - } } }, "traits": { - "smithy.api#input": {} + "smithy.api#documentation": "

A Lambda function that processes documents.

" } }, - "com.amazonaws.bedrockagent#StartIngestionJobResponse": { + "com.amazonaws.bedrockagent#TransformationLambdaConfiguration": { "type": "structure", "members": { - "ingestionJob": { - "target": "com.amazonaws.bedrockagent#IngestionJob", + "lambdaArn": { + "target": "com.amazonaws.bedrockagent#LambdaArn", "traits": { - "smithy.api#documentation": "

An object containing information about the ingestion job.

", + "smithy.api#documentation": "

The function's ARN identifier.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

A Lambda function that processes documents.

" } }, - "com.amazonaws.bedrockagent#StopSequences": { + "com.amazonaws.bedrockagent#Transformations": { "type": "list", "member": { - "target": "smithy.api#String" + "target": "com.amazonaws.bedrockagent#Transformation" }, "traits": { "smithy.api#length": { - "min": 0, - "max": 4 + "min": 1, + "max": 1 } } }, - "com.amazonaws.bedrockagent#StorageConfiguration": { - "type": "structure", + "com.amazonaws.bedrockagent#Type": { + "type": "enum", "members": { - "type": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseStorageType", - "traits": { - "smithy.api#documentation": "

The vector store service in which the knowledge base is stored.

", - "smithy.api#required": {} - } - }, - "opensearchServerlessConfiguration": { - "target": "com.amazonaws.bedrockagent#OpenSearchServerlessConfiguration", - "traits": { - "smithy.api#documentation": "

Contains the storage configuration of the knowledge base in Amazon OpenSearch Service.

" - } - }, - "pineconeConfiguration": { - "target": "com.amazonaws.bedrockagent#PineconeConfiguration", + "STRING": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

Contains the storage configuration of the knowledge base in Pinecone.

" + "smithy.api#enumValue": "string" } }, - "redisEnterpriseCloudConfiguration": { - "target": "com.amazonaws.bedrockagent#RedisEnterpriseCloudConfiguration", + "NUMBER": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

Contains the storage configuration of the knowledge base in Redis Enterprise Cloud.

" + "smithy.api#enumValue": "number" } }, - "rdsConfiguration": { - "target": "com.amazonaws.bedrockagent#RdsConfiguration", + "INTEGER": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

Contains details about the storage configuration of the knowledge base in Amazon RDS. For more information, see Create a vector index in Amazon RDS.

" + "smithy.api#enumValue": "integer" } }, - "mongoDbAtlasConfiguration": { - "target": "com.amazonaws.bedrockagent#MongoDbAtlasConfiguration", + "BOOLEAN": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

Contains the storage configuration of the knowledge base in MongoDB Atlas.

" + "smithy.api#enumValue": "boolean" } - } - }, - "traits": { - "smithy.api#documentation": "

Contains the storage configuration of the knowledge base.

" - } - }, - "com.amazonaws.bedrockagent#TagKey": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 128 }, - "smithy.api#pattern": "^[a-zA-Z0-9\\s._:/=+@-]*$" - } - }, - "com.amazonaws.bedrockagent#TagKeyList": { - "type": "list", - "member": { - "target": "com.amazonaws.bedrockagent#TagKey" - }, - "traits": { - "smithy.api#length": { - "min": 0, - "max": 200 + "ARRAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "array" + } } } }, - "com.amazonaws.bedrockagent#TagResource": { + "com.amazonaws.bedrockagent#UntagResource": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#TagResourceRequest" + "target": "com.amazonaws.bedrockagent#UntagResourceRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#TagResourceResponse" + "target": "com.amazonaws.bedrockagent#UntagResourceResponse" }, "errors": [ { @@ -7131,9 +12075,6 @@ { "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" }, - { - "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" - }, { "target": "com.amazonaws.bedrockagent#ThrottlingException" }, @@ -7142,29 +12083,31 @@ } ], "traits": { - "smithy.api#documentation": "

Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Remove tags from a resource.

", "smithy.api#http": { "code": 200, - "method": "POST", + "method": "DELETE", "uri": "/tags/{resourceArn}" - } + }, + "smithy.api#idempotent": {} } }, - "com.amazonaws.bedrockagent#TagResourceRequest": { + "com.amazonaws.bedrockagent#UntagResourceRequest": { "type": "structure", "members": { "resourceArn": { "target": "com.amazonaws.bedrockagent#TaggableResourcesArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to tag.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource from which to remove tags.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "tags": { - "target": "com.amazonaws.bedrockagent#TagsMap", + "tagKeys": { + "target": "com.amazonaws.bedrockagent#TagKeyList", "traits": { - "smithy.api#documentation": "

An object containing key-value pairs that define the tags to attach to the resource.

", + "smithy.api#documentation": "

A list of keys of the tags to remove from the resource.

", + "smithy.api#httpQuery": "tagKeys", "smithy.api#required": {} } } @@ -7173,204 +12116,198 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#TagResourceResponse": { + "com.amazonaws.bedrockagent#UntagResourceResponse": { "type": "structure", "members": {}, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#TagValue": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 256 + "com.amazonaws.bedrockagent#UpdateAgent": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#UpdateAgentRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#UpdateAgentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" }, - "smithy.api#pattern": "^[a-zA-Z0-9\\s._:/=+@-]*$" - } - }, - "com.amazonaws.bedrockagent#TaggableResourcesArn": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 20, - "max": 1011 + { + "target": "com.amazonaws.bedrockagent#ConflictException" }, - "smithy.api#pattern": "(^arn:aws:bedrock:[a-zA-Z0-9-]+:/d{12}:(agent|agent-alias|knowledge-base)/[A-Z0-9]{10}(?:/[A-Z0-9]{10})?$)" - } - }, - "com.amazonaws.bedrockagent#TaggingResource": { - "type": "resource", - "operations": [ { - "target": "com.amazonaws.bedrockagent#ListTagsForResource" + "target": "com.amazonaws.bedrockagent#InternalServerException" }, { - "target": "com.amazonaws.bedrockagent#TagResource" + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" }, { - "target": "com.amazonaws.bedrockagent#UntagResource" + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" } - ] - }, - "com.amazonaws.bedrockagent#TagsMap": { - "type": "map", - "key": { - "target": "com.amazonaws.bedrockagent#TagKey" - }, - "value": { - "target": "com.amazonaws.bedrockagent#TagValue" - } - }, - "com.amazonaws.bedrockagent#Temperature": { - "type": "float", + ], "traits": { - "smithy.api#range": { - "min": 0, - "max": 1 - } + "smithy.api#documentation": "

Updates the configuration of an agent.

", + "smithy.api#http": { + "code": 202, + "method": "PUT", + "uri": "/agents/{agentId}/" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "console" + ] } }, - "com.amazonaws.bedrockagent#ThrottlingException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.bedrockagent#NonBlankString" - } + "com.amazonaws.bedrockagent#UpdateAgentActionGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#UpdateAgentActionGroupRequest" }, - "traits": { - "smithy.api#documentation": "

The number of requests exceeds the limit. Resubmit your request later.

", - "smithy.api#error": "client", - "smithy.api#httpError": 429 - } - }, - "com.amazonaws.bedrockagent#TopK": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 0, - "max": 500 + "output": { + "target": "com.amazonaws.bedrockagent#UpdateAgentActionGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" } - } - }, - "com.amazonaws.bedrockagent#TopP": { - "type": "float", + ], "traits": { - "smithy.api#range": { - "min": 0, - "max": 1 - } + "smithy.api#documentation": "

Updates the configuration for an action group for an agent.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/agents/{agentId}/agentversions/{agentVersion}/actiongroups/{actionGroupId}/" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "console" + ] } }, - "com.amazonaws.bedrockagent#Type": { - "type": "enum", + "com.amazonaws.bedrockagent#UpdateAgentActionGroupRequest": { + "type": "structure", "members": { - "STRING": { - "target": "smithy.api#Unit", + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#enumValue": "string" + "smithy.api#documentation": "

The unique identifier of the agent for which to update the action group.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "NUMBER": { - "target": "smithy.api#Unit", + "agentVersion": { + "target": "com.amazonaws.bedrockagent#DraftVersion", "traits": { - "smithy.api#enumValue": "number" + "smithy.api#documentation": "

The unique identifier of the agent version for which to update the action group.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "INTEGER": { - "target": "smithy.api#Unit", + "actionGroupId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the action group.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "actionGroupName": { + "target": "com.amazonaws.bedrockagent#Name", + "traits": { + "smithy.api#documentation": "

Specifies a new name for the action group.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#Description", + "traits": { + "smithy.api#documentation": "

Specifies a new name for the action group.

" + } + }, + "parentActionGroupSignature": { + "target": "com.amazonaws.bedrockagent#ActionGroupSignature", + "traits": { + "smithy.api#documentation": "

To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

\n

During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

" + } + }, + "actionGroupExecutor": { + "target": "com.amazonaws.bedrockagent#ActionGroupExecutor", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lambda function containing the business logic that is carried out upon invoking the action.

" + } + }, + "actionGroupState": { + "target": "com.amazonaws.bedrockagent#ActionGroupState", "traits": { - "smithy.api#enumValue": "integer" + "smithy.api#documentation": "

Specifies whether the action group is available for the agent to invoke or not when sending an InvokeAgent request.

" } }, - "BOOLEAN": { - "target": "smithy.api#Unit", + "apiSchema": { + "target": "com.amazonaws.bedrockagent#APISchema", "traits": { - "smithy.api#enumValue": "boolean" + "smithy.api#documentation": "

Contains either details about the S3 object containing the OpenAPI schema for the action group or the JSON or YAML-formatted payload defining the schema. For more information, see Action group OpenAPI schemas.

" } }, - "ARRAY": { - "target": "smithy.api#Unit", + "functionSchema": { + "target": "com.amazonaws.bedrockagent#FunctionSchema", "traits": { - "smithy.api#enumValue": "array" + "smithy.api#documentation": "

Contains details about the function schema for the action group or the JSON or YAML-formatted payload defining the schema.

" } } - } - }, - "com.amazonaws.bedrockagent#UntagResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.bedrockagent#UntagResourceRequest" - }, - "output": { - "target": "com.amazonaws.bedrockagent#UntagResourceResponse" }, - "errors": [ - { - "target": "com.amazonaws.bedrockagent#AccessDeniedException" - }, - { - "target": "com.amazonaws.bedrockagent#InternalServerException" - }, - { - "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.bedrockagent#ThrottlingException" - }, - { - "target": "com.amazonaws.bedrockagent#ValidationException" - } - ], "traits": { - "smithy.api#documentation": "

Remove tags from a resource.

", - "smithy.api#http": { - "code": 200, - "method": "DELETE", - "uri": "/tags/{resourceArn}" - }, - "smithy.api#idempotent": {} + "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#UntagResourceRequest": { + "com.amazonaws.bedrockagent#UpdateAgentActionGroupResponse": { "type": "structure", "members": { - "resourceArn": { - "target": "com.amazonaws.bedrockagent#TaggableResourcesArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource from which to remove tags.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tagKeys": { - "target": "com.amazonaws.bedrockagent#TagKeyList", + "agentActionGroup": { + "target": "com.amazonaws.bedrockagent#AgentActionGroup", "traits": { - "smithy.api#documentation": "

A list of keys of the tags to remove from the resource.

", - "smithy.api#httpQuery": "tagKeys", + "smithy.api#documentation": "

Contains details about the action group that was updated.

", "smithy.api#required": {} } } }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.bedrockagent#UntagResourceResponse": { - "type": "structure", - "members": {}, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#UpdateAgent": { + "com.amazonaws.bedrockagent#UpdateAgentAlias": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#UpdateAgentRequest" + "target": "com.amazonaws.bedrockagent#UpdateAgentAliasRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#UpdateAgentResponse" + "target": "com.amazonaws.bedrockagent#UpdateAgentAliasResponse" }, "errors": [ { @@ -7396,11 +12333,11 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the configuration of an agent.

", + "smithy.api#documentation": "

Updates configurations for an alias of an agent.

", "smithy.api#http": { "code": 202, "method": "PUT", - "uri": "/agents/{agentId}/" + "uri": "/agents/{agentId}/agentaliases/{agentAliasId}/" }, "smithy.api#idempotent": {}, "smithy.api#tags": [ @@ -7408,13 +12345,71 @@ ] } }, - "com.amazonaws.bedrockagent#UpdateAgentActionGroup": { + "com.amazonaws.bedrockagent#UpdateAgentAliasRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentAliasId": { + "target": "com.amazonaws.bedrockagent#AgentAliasId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the alias.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentAliasName": { + "target": "com.amazonaws.bedrockagent#Name", + "traits": { + "smithy.api#documentation": "

Specifies a new name for the alias.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#Description", + "traits": { + "smithy.api#documentation": "

Specifies a new description for the alias.

" + } + }, + "routingConfiguration": { + "target": "com.amazonaws.bedrockagent#AgentAliasRoutingConfiguration", + "traits": { + "smithy.api#documentation": "

Contains details about the routing configuration of the alias.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#UpdateAgentAliasResponse": { + "type": "structure", + "members": { + "agentAlias": { + "target": "com.amazonaws.bedrockagent#AgentAlias", + "traits": { + "smithy.api#documentation": "

Contains details about the alias that was updated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBase": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#UpdateAgentActionGroupRequest" + "target": "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBaseRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#UpdateAgentActionGroupResponse" + "target": "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBaseResponse" }, "errors": [ { @@ -7429,9 +12424,6 @@ { "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" }, - { - "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" - }, { "target": "com.amazonaws.bedrockagent#ThrottlingException" }, @@ -7440,11 +12432,11 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the configuration for an action group for an agent.

", + "smithy.api#documentation": "

Updates the configuration for a knowledge base that has been associated with an agent.

", "smithy.api#http": { "code": 200, "method": "PUT", - "uri": "/agents/{agentId}/agentversions/{agentVersion}/actiongroups/{actionGroupId}/" + "uri": "/agents/{agentId}/agentversions/{agentVersion}/knowledgebases/{knowledgeBaseId}/" }, "smithy.api#idempotent": {}, "smithy.api#tags": [ @@ -7452,13 +12444,13 @@ ] } }, - "com.amazonaws.bedrockagent#UpdateAgentActionGroupRequest": { + "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBaseRequest": { "type": "structure", "members": { "agentId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent for which to update the action group.

", + "smithy.api#documentation": "

The unique identifier of the agent associated with the knowledge base that you want to update.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7466,60 +12458,124 @@ "agentVersion": { "target": "com.amazonaws.bedrockagent#DraftVersion", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent version for which to update the action group.

", + "smithy.api#documentation": "

The version of the agent associated with the knowledge base that you want to update.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "actionGroupId": { + "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the action group.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base that has been associated with an agent.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "actionGroupName": { + "description": { + "target": "com.amazonaws.bedrockagent#Description", + "traits": { + "smithy.api#documentation": "

Specifies a new description for the knowledge base associated with an agent.

" + } + }, + "knowledgeBaseState": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseState", + "traits": { + "smithy.api#documentation": "

Specifies whether the agent uses the knowledge base or not when sending an InvokeAgent request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBaseResponse": { + "type": "structure", + "members": { + "agentKnowledgeBase": { + "target": "com.amazonaws.bedrockagent#AgentKnowledgeBase", + "traits": { + "smithy.api#documentation": "

Contains details about the knowledge base that has been associated with an agent.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockagent#UpdateAgentRequest": { + "type": "structure", + "members": { + "agentId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "agentName": { "target": "com.amazonaws.bedrockagent#Name", "traits": { - "smithy.api#documentation": "

Specifies a new name for the action group.

", + "smithy.api#documentation": "

Specifies a new name for the agent.

", + "smithy.api#required": {} + } + }, + "instruction": { + "target": "com.amazonaws.bedrockagent#Instruction", + "traits": { + "smithy.api#documentation": "

Specifies new instructions that tell the agent what it should do and how it should interact with users.

" + } + }, + "foundationModel": { + "target": "com.amazonaws.bedrockagent#ModelIdentifier", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Specifies a new foundation model to be used for orchestration by the agent.

", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

Specifies a new name for the action group.

" + "smithy.api#documentation": "

Specifies a new description of the agent.

" } }, - "parentActionGroupSignature": { - "target": "com.amazonaws.bedrockagent#ActionGroupSignature", + "idleSessionTTLInSeconds": { + "target": "com.amazonaws.bedrockagent#SessionTTL", "traits": { - "smithy.api#documentation": "

To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

\n

During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an Observation reprompting the user for more information.

" + "smithy.api#documentation": "

The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent.

\n

A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout.

" } }, - "actionGroupExecutor": { - "target": "com.amazonaws.bedrockagent#ActionGroupExecutor", + "agentResourceRoleArn": { + "target": "com.amazonaws.bedrockagent#AgentRoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lambda function containing the business logic that is carried out upon invoking the action.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the agent.

", + "smithy.api#required": {} } }, - "actionGroupState": { - "target": "com.amazonaws.bedrockagent#ActionGroupState", + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", "traits": { - "smithy.api#documentation": "

Specifies whether the action group is available for the agent to invoke or not when sending an InvokeAgent request.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key with which to encrypt the agent.

" } }, - "apiSchema": { - "target": "com.amazonaws.bedrockagent#APISchema", + "promptOverrideConfiguration": { + "target": "com.amazonaws.bedrockagent#PromptOverrideConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts.

" + } + }, + "guardrailConfiguration": { + "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", "traits": { - "smithy.api#documentation": "

Contains either details about the S3 object containing the OpenAPI schema for the action group or the JSON or YAML-formatted payload defining the schema. For more information, see Action group OpenAPI schemas.

" + "smithy.api#documentation": "

The unique Guardrail configuration assigned to the agent when it is updated.

" } }, - "functionSchema": { - "target": "com.amazonaws.bedrockagent#FunctionSchema", + "memoryConfiguration": { + "target": "com.amazonaws.bedrockagent#MemoryConfiguration", "traits": { - "smithy.api#documentation": "

Contains details about the function schema for the action group or the JSON or YAML-formatted payload defining the schema.

" + "smithy.api#documentation": "

Specifies the new memory configuration for the agent.

" } } }, @@ -7527,13 +12583,13 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#UpdateAgentActionGroupResponse": { + "com.amazonaws.bedrockagent#UpdateAgentResponse": { "type": "structure", "members": { - "agentActionGroup": { - "target": "com.amazonaws.bedrockagent#AgentActionGroup", + "agent": { + "target": "com.amazonaws.bedrockagent#Agent", "traits": { - "smithy.api#documentation": "

Contains details about the action group that was updated.

", + "smithy.api#documentation": "

Contains details about the agent that was updated.

", "smithy.api#required": {} } } @@ -7542,13 +12598,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#UpdateAgentAlias": { + "com.amazonaws.bedrockagent#UpdateDataSource": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#UpdateAgentAliasRequest" + "target": "com.amazonaws.bedrockagent#UpdateDataSourceRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#UpdateAgentAliasResponse" + "target": "com.amazonaws.bedrockagent#UpdateDataSourceResponse" }, "errors": [ { @@ -7563,9 +12619,6 @@ { "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" }, - { - "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" - }, { "target": "com.amazonaws.bedrockagent#ThrottlingException" }, @@ -7574,11 +12627,11 @@ } ], "traits": { - "smithy.api#documentation": "

Updates configurations for an alias of an agent.

", + "smithy.api#documentation": "

Updates the configurations for a data source connector.

\n \n

You can't change the chunkingConfiguration after you create the data source connector. Specify the existing chunkingConfiguration.

\n
", "smithy.api#http": { - "code": 202, + "code": 200, "method": "PUT", - "uri": "/agents/{agentId}/agentaliases/{agentAliasId}/" + "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}" }, "smithy.api#idempotent": {}, "smithy.api#tags": [ @@ -7586,42 +12639,61 @@ ] } }, - "com.amazonaws.bedrockagent#UpdateAgentAliasRequest": { + "com.amazonaws.bedrockagent#UpdateDataSourceRequest": { "type": "structure", "members": { - "agentId": { + "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base for the data source.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "agentAliasId": { - "target": "com.amazonaws.bedrockagent#AgentAliasId", + "dataSourceId": { + "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the alias.

", + "smithy.api#documentation": "

The unique identifier of the data source.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "agentAliasName": { + "name": { "target": "com.amazonaws.bedrockagent#Name", "traits": { - "smithy.api#documentation": "

Specifies a new name for the alias.

", + "smithy.api#documentation": "

Specifies a new name for the data source.

", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

Specifies a new description for the alias.

" + "smithy.api#documentation": "

Specifies a new description for the data source.

" } }, - "routingConfiguration": { - "target": "com.amazonaws.bedrockagent#AgentAliasRoutingConfiguration", + "dataSourceConfiguration": { + "target": "com.amazonaws.bedrockagent#DataSourceConfiguration", "traits": { - "smithy.api#documentation": "

Contains details about the routing configuration of the alias.

" + "smithy.api#documentation": "

The connection configuration for the data source that you want to update.

", + "smithy.api#required": {} + } + }, + "dataDeletionPolicy": { + "target": "com.amazonaws.bedrockagent#DataDeletionPolicy", + "traits": { + "smithy.api#documentation": "

The data deletion policy for the data source that you want to update.

" + } + }, + "serverSideEncryptionConfiguration": { + "target": "com.amazonaws.bedrockagent#ServerSideEncryptionConfiguration", + "traits": { + "smithy.api#documentation": "

Contains details about server-side encryption of the data source.

" + } + }, + "vectorIngestionConfiguration": { + "target": "com.amazonaws.bedrockagent#VectorIngestionConfiguration", + "traits": { + "smithy.api#documentation": "

Contains details about how to ingest the documents in the data source.

" } } }, @@ -7629,13 +12701,13 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#UpdateAgentAliasResponse": { + "com.amazonaws.bedrockagent#UpdateDataSourceResponse": { "type": "structure", "members": { - "agentAlias": { - "target": "com.amazonaws.bedrockagent#AgentAlias", + "dataSource": { + "target": "com.amazonaws.bedrockagent#DataSource", "traits": { - "smithy.api#documentation": "

Contains details about the alias that was updated.

", + "smithy.api#documentation": "

Contains details about the data source.

", "smithy.api#required": {} } } @@ -7644,13 +12716,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBase": { + "com.amazonaws.bedrockagent#UpdateFlow": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBaseRequest" + "target": "com.amazonaws.bedrockagent#UpdateFlowRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBaseResponse" + "target": "com.amazonaws.bedrockagent#UpdateFlowResponse" }, "errors": [ { @@ -7665,6 +12737,9 @@ { "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" }, + { + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.bedrockagent#ThrottlingException" }, @@ -7673,11 +12748,11 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the configuration for a knowledge base that has been associated with an agent.

", + "smithy.api#documentation": "

Modifies a flow. Include both fields that you want to keep and fields that you want to change. For more information, see How it works and Create a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 200, "method": "PUT", - "uri": "/agents/{agentId}/agentversions/{agentVersion}/knowledgebases/{knowledgeBaseId}/" + "uri": "/flows/{flowIdentifier}/" }, "smithy.api#idempotent": {}, "smithy.api#tags": [ @@ -7685,43 +12760,87 @@ ] } }, - "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBaseRequest": { + "com.amazonaws.bedrockagent#UpdateFlowAlias": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#UpdateFlowAliasRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#UpdateFlowAliasResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the alias of a flow. Include both fields that you want to keep and ones that you want to change. For more information, see Deploy a flow in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/flows/{flowIdentifier}/aliases/{aliasIdentifier}" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#UpdateFlowAliasRequest": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "name": { + "target": "com.amazonaws.bedrockagent#Name", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent associated with the knowledge base that you want to update.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The name of the flow alias.

", "smithy.api#required": {} } }, - "agentVersion": { - "target": "com.amazonaws.bedrockagent#DraftVersion", + "description": { + "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

The version of the agent associated with the knowledge base that you want to update.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#documentation": "

A description for the flow alias.

" } }, - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "routingConfiguration": { + "target": "com.amazonaws.bedrockagent#FlowAliasRoutingConfiguration", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base that has been associated with an agent.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

Contains information about the version to which to map the alias.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.bedrockagent#Description", + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", "traits": { - "smithy.api#documentation": "

Specifies a new description for the knowledge base associated with an agent.

" + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "knowledgeBaseState": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseState", + "aliasIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowAliasIdentifier", "traits": { - "smithy.api#documentation": "

Specifies whether the agent uses the knowledge base or not when sending an InvokeAgent request.

" + "smithy.api#documentation": "

The unique identifier of the alias.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } } }, @@ -7729,13 +12848,61 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#UpdateAgentKnowledgeBaseResponse": { + "com.amazonaws.bedrockagent#UpdateFlowAliasResponse": { "type": "structure", "members": { - "agentKnowledgeBase": { - "target": "com.amazonaws.bedrockagent#AgentKnowledgeBase", + "name": { + "target": "com.amazonaws.bedrockagent#Name", "traits": { - "smithy.api#documentation": "

Contains details about the knowledge base that has been associated with an agent.

", + "smithy.api#documentation": "

The name of the flow alias.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockagent#Description", + "traits": { + "smithy.api#documentation": "

The description of the flow.

" + } + }, + "routingConfiguration": { + "target": "com.amazonaws.bedrockagent#FlowAliasRoutingConfiguration", + "traits": { + "smithy.api#documentation": "

Contains information about the version that the alias is mapped to.

", + "smithy.api#required": {} + } + }, + "flowId": { + "target": "com.amazonaws.bedrockagent#FlowId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.bedrockagent#FlowAliasId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the alias.

", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.bedrockagent#FlowAliasArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the flow.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the flow was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the flow alias was last updated.

", "smithy.api#required": {} } } @@ -7744,88 +12911,129 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#UpdateAgentRequest": { + "com.amazonaws.bedrockagent#UpdateFlowRequest": { "type": "structure", "members": { - "agentId": { - "target": "com.amazonaws.bedrockagent#Id", + "name": { + "target": "com.amazonaws.bedrockagent#FlowName", "traits": { - "smithy.api#documentation": "

The unique identifier of the agent.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

A name for the flow.

", "smithy.api#required": {} } }, - "agentName": { - "target": "com.amazonaws.bedrockagent#Name", + "description": { + "target": "com.amazonaws.bedrockagent#FlowDescription", "traits": { - "smithy.api#documentation": "

Specifies a new name for the agent.

", + "smithy.api#documentation": "

A description for the flow.

" + } + }, + "executionRoleArn": { + "target": "com.amazonaws.bedrockagent#FlowExecutionRoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the service role with permissions to create and manage a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

", "smithy.api#required": {} } }, - "instruction": { - "target": "com.amazonaws.bedrockagent#Instruction", + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", "traits": { - "smithy.api#documentation": "

Specifies new instructions that tell the agent what it should do and how it should interact with users.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key to encrypt the flow.

" } }, - "foundationModel": { - "target": "com.amazonaws.bedrockagent#ModelIdentifier", + "definition": { + "target": "com.amazonaws.bedrockagent#FlowDefinition", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Specifies a new foundation model to be used for orchestration by the agent.

", + "smithy.api#documentation": "

A definition of the nodes and the connections between the nodes in the flow.

" + } + }, + "flowIdentifier": { + "target": "com.amazonaws.bedrockagent#FlowIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#UpdateFlowResponse": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockagent#FlowName", + "traits": { + "smithy.api#documentation": "

The name of the flow.

", "smithy.api#required": {} } }, "description": { - "target": "com.amazonaws.bedrockagent#Description", + "target": "com.amazonaws.bedrockagent#FlowDescription", "traits": { - "smithy.api#documentation": "

Specifies a new description of the agent.

" + "smithy.api#documentation": "

The description of the flow.

" } }, - "idleSessionTTLInSeconds": { - "target": "com.amazonaws.bedrockagent#SessionTTL", + "executionRoleArn": { + "target": "com.amazonaws.bedrockagent#FlowExecutionRoleArn", "traits": { - "smithy.api#documentation": "

The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent.

\n

A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see Create a service role for flows in Amazon Bedrock in the Amazon Bedrock User Guide.

", + "smithy.api#required": {} } }, - "agentResourceRoleArn": { - "target": "com.amazonaws.bedrockagent#AgentRoleArn", + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the agent.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that the flow was encrypted with.

" + } + }, + "id": { + "target": "com.amazonaws.bedrockagent#FlowId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the flow.

", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.bedrockagent#FlowArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the flow.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrockagent#FlowStatus", + "traits": { + "smithy.api#documentation": "

The status of the flow. When you submit this request, the status will be NotPrepared. If updating fails, the status becomes Failed.

", "smithy.api#required": {} } }, - "customerEncryptionKeyArn": { - "target": "com.amazonaws.bedrockagent#KmsKeyArn", + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key with which to encrypt the agent.

" + "smithy.api#documentation": "

The time at which the flow was created.

", + "smithy.api#required": {} } }, - "promptOverrideConfiguration": { - "target": "com.amazonaws.bedrockagent#PromptOverrideConfiguration", + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts.

" + "smithy.api#documentation": "

The time at which the flow was last updated.

", + "smithy.api#required": {} } }, - "guardrailConfiguration": { - "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", + "version": { + "target": "com.amazonaws.bedrockagent#DraftVersion", "traits": { - "smithy.api#documentation": "

The unique Guardrail configuration assigned to the agent when it is updated.

" + "smithy.api#documentation": "

The version of the flow. When you update a flow, the version updated is the DRAFT version.

", + "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.bedrockagent#UpdateAgentResponse": { - "type": "structure", - "members": { - "agent": { - "target": "com.amazonaws.bedrockagent#Agent", + }, + "definition": { + "target": "com.amazonaws.bedrockagent#FlowDefinition", "traits": { - "smithy.api#documentation": "

Contains details about the agent that was updated.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A definition of the nodes and the connections between nodes in the flow.

" } } }, @@ -7833,13 +13041,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#UpdateDataSource": { + "com.amazonaws.bedrockagent#UpdateKnowledgeBase": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#UpdateDataSourceRequest" + "target": "com.amazonaws.bedrockagent#UpdateKnowledgeBaseRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#UpdateDataSourceResponse" + "target": "com.amazonaws.bedrockagent#UpdateKnowledgeBaseResponse" }, "errors": [ { @@ -7862,11 +13070,11 @@ } ], "traits": { - "smithy.api#documentation": "

Updates configurations for a data source.

\n \n

You can't change the chunkingConfiguration after you create the data source. Specify the existing chunkingConfiguration.

\n
", + "smithy.api#documentation": "

Updates the configuration of a knowledge base with the fields that you specify. Because all fields will be overwritten, you must include the same values for fields that you want to keep the same.

\n

You can change the following fields:

\n
    \n
  • \n

    \n name\n

    \n
  • \n
  • \n

    \n description\n

    \n
  • \n
  • \n

    \n roleArn\n

    \n
  • \n
\n

You can't change the knowledgeBaseConfiguration or storageConfiguration fields, so you must specify the same configurations as when you created the knowledge base. You can send a GetKnowledgeBase request and copy the same configurations.

", "smithy.api#http": { - "code": 200, + "code": 202, "method": "PUT", - "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}" + "uri": "/knowledgebases/{knowledgeBaseId}" }, "smithy.api#idempotent": {}, "smithy.api#tags": [ @@ -7874,21 +13082,13 @@ ] } }, - "com.amazonaws.bedrockagent#UpdateDataSourceRequest": { + "com.amazonaws.bedrockagent#UpdateKnowledgeBaseRequest": { "type": "structure", "members": { "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source belongs.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "dataSourceId": { - "target": "com.amazonaws.bedrockagent#Id", - "traits": { - "smithy.api#documentation": "

The unique identifier of the data source.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base to update.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7896,39 +13096,35 @@ "name": { "target": "com.amazonaws.bedrockagent#Name", "traits": { - "smithy.api#documentation": "

Specifies a new name for the data source.

", + "smithy.api#documentation": "

Specifies a new name for the knowledge base.

", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

Specifies a new description for the data source.

" + "smithy.api#documentation": "

Specifies a new description for the knowledge base.

" } }, - "dataSourceConfiguration": { - "target": "com.amazonaws.bedrockagent#DataSourceConfiguration", + "roleArn": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseRoleArn", "traits": { - "smithy.api#documentation": "

Contains details about the storage configuration of the data source.

", + "smithy.api#documentation": "

Specifies a different Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base.

", "smithy.api#required": {} } }, - "dataDeletionPolicy": { - "target": "com.amazonaws.bedrockagent#DataDeletionPolicy", - "traits": { - "smithy.api#documentation": "

The data deletion policy of the updated data source.

" - } - }, - "serverSideEncryptionConfiguration": { - "target": "com.amazonaws.bedrockagent#ServerSideEncryptionConfiguration", + "knowledgeBaseConfiguration": { + "target": "com.amazonaws.bedrockagent#KnowledgeBaseConfiguration", "traits": { - "smithy.api#documentation": "

Contains details about server-side encryption of the data source.

" + "smithy.api#documentation": "

Specifies the configuration for the embeddings model used for the knowledge base. You must use the same configuration as when the knowledge base was created.

", + "smithy.api#required": {} } }, - "vectorIngestionConfiguration": { - "target": "com.amazonaws.bedrockagent#VectorIngestionConfiguration", + "storageConfiguration": { + "target": "com.amazonaws.bedrockagent#StorageConfiguration", "traits": { - "smithy.api#documentation": "

Contains details about how to ingest the documents in the data source.

" + "smithy.api#documentation": "

Specifies the configuration for the vector store used for the knowledge base. You must use the same configuration as when the knowledge base was created.

", + "smithy.api#required": {} } } }, @@ -7936,13 +13132,13 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockagent#UpdateDataSourceResponse": { + "com.amazonaws.bedrockagent#UpdateKnowledgeBaseResponse": { "type": "structure", "members": { - "dataSource": { - "target": "com.amazonaws.bedrockagent#DataSource", + "knowledgeBase": { + "target": "com.amazonaws.bedrockagent#KnowledgeBase", "traits": { - "smithy.api#documentation": "

Contains details about the data source.

", + "smithy.api#documentation": "

Contains details about the knowledge base.

", "smithy.api#required": {} } } @@ -7951,13 +13147,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#UpdateKnowledgeBase": { + "com.amazonaws.bedrockagent#UpdatePrompt": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockagent#UpdateKnowledgeBaseRequest" + "target": "com.amazonaws.bedrockagent#UpdatePromptRequest" }, "output": { - "target": "com.amazonaws.bedrockagent#UpdateKnowledgeBaseResponse" + "target": "com.amazonaws.bedrockagent#UpdatePromptResponse" }, "errors": [ { @@ -7972,6 +13168,9 @@ { "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" }, + { + "target": "com.amazonaws.bedrockagent#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.bedrockagent#ThrottlingException" }, @@ -7980,11 +13179,11 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the configuration of a knowledge base with the fields that you specify. Because all fields will be overwritten, you must include the same values for fields that you want to keep the same.

\n

You can change the following fields:

\n
    \n
  • \n

    \n name\n

    \n
  • \n
  • \n

    \n description\n

    \n
  • \n
  • \n

    \n roleArn\n

    \n
  • \n
\n

You can't change the knowledgeBaseConfiguration or storageConfiguration fields, so you must specify the same configurations as when you created the knowledge base. You can send a GetKnowledgeBase request and copy the same configurations.

", + "smithy.api#documentation": "

Modifies a prompt in your prompt library. Include both fields that you want to keep and fields that you want to replace. For more information, see Prompt management in Amazon Bedrock and Edit prompts in your prompt library in the Amazon Bedrock User Guide.

", "smithy.api#http": { - "code": 202, + "code": 200, "method": "PUT", - "uri": "/knowledgebases/{knowledgeBaseId}" + "uri": "/prompts/{promptIdentifier}/" }, "smithy.api#idempotent": {}, "smithy.api#tags": [ @@ -7992,69 +13191,145 @@ ] } }, - "com.amazonaws.bedrockagent#UpdateKnowledgeBaseRequest": { + "com.amazonaws.bedrockagent#UpdatePromptRequest": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.bedrockagent#Id", + "name": { + "target": "com.amazonaws.bedrockagent#PromptName", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to update.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

A name for the prompt.

", "smithy.api#required": {} } }, + "description": { + "target": "com.amazonaws.bedrockagent#PromptDescription", + "traits": { + "smithy.api#documentation": "

A description for the prompt.

" + } + }, + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key to encrypt the prompt.

" + } + }, + "defaultVariant": { + "target": "com.amazonaws.bedrockagent#PromptVariantName", + "traits": { + "smithy.api#documentation": "

The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object.

" + } + }, + "variants": { + "target": "com.amazonaws.bedrockagent#PromptVariantList", + "traits": { + "smithy.api#documentation": "

A list of objects, each containing details about a variant of the prompt.

" + } + }, + "promptIdentifier": { + "target": "com.amazonaws.bedrockagent#PromptIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the prompt.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#UpdatePromptResponse": { + "type": "structure", + "members": { "name": { - "target": "com.amazonaws.bedrockagent#Name", + "target": "com.amazonaws.bedrockagent#PromptName", "traits": { - "smithy.api#documentation": "

Specifies a new name for the knowledge base.

", + "smithy.api#documentation": "

The name of the prompt.

", "smithy.api#required": {} } }, "description": { - "target": "com.amazonaws.bedrockagent#Description", + "target": "com.amazonaws.bedrockagent#PromptDescription", "traits": { - "smithy.api#documentation": "

Specifies a new description for the knowledge base.

" + "smithy.api#documentation": "

The description of the prompt.

" } }, - "roleArn": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseRoleArn", + "customerEncryptionKeyArn": { + "target": "com.amazonaws.bedrockagent#KmsKeyArn", "traits": { - "smithy.api#documentation": "

Specifies a different Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key to encrypt the prompt.

" + } + }, + "defaultVariant": { + "target": "com.amazonaws.bedrockagent#PromptVariantName", + "traits": { + "smithy.api#documentation": "

The name of the default variant for the prompt. This value must match the name field in the relevant PromptVariant object.

" + } + }, + "variants": { + "target": "com.amazonaws.bedrockagent#PromptVariantList", + "traits": { + "smithy.api#documentation": "

A list of objects, each containing details about a variant of the prompt.

" + } + }, + "id": { + "target": "com.amazonaws.bedrockagent#PromptId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the prompt.

", "smithy.api#required": {} } }, - "knowledgeBaseConfiguration": { - "target": "com.amazonaws.bedrockagent#KnowledgeBaseConfiguration", + "arn": { + "target": "com.amazonaws.bedrockagent#PromptArn", "traits": { - "smithy.api#documentation": "

Specifies the configuration for the embeddings model used for the knowledge base. You must use the same configuration as when the knowledge base was created.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the prompt.

", "smithy.api#required": {} } }, - "storageConfiguration": { - "target": "com.amazonaws.bedrockagent#StorageConfiguration", + "version": { + "target": "com.amazonaws.bedrockagent#Version", "traits": { - "smithy.api#documentation": "

Specifies the configuration for the vector store used for the knowledge base. You must use the same configuration as when the knowledge base was created.

", + "smithy.api#documentation": "

The version of the prompt. When you update a prompt, the version updated is the DRAFT version.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the prompt was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.bedrockagent#DateTimestamp", + "traits": { + "smithy.api#documentation": "

The time at which the prompt was last updated.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#input": {} + "smithy.api#output": {} } }, - "com.amazonaws.bedrockagent#UpdateKnowledgeBaseResponse": { + "com.amazonaws.bedrockagent#Url": { + "type": "string", + "traits": { + "smithy.api#pattern": "^https?://[A-Za-z0-9][^\\s]*$" + } + }, + "com.amazonaws.bedrockagent#UrlConfiguration": { "type": "structure", "members": { - "knowledgeBase": { - "target": "com.amazonaws.bedrockagent#KnowledgeBase", + "seedUrls": { + "target": "com.amazonaws.bedrockagent#SeedUrls", "traits": { - "smithy.api#documentation": "

Contains details about the knowledge base.

", - "smithy.api#required": {} + "smithy.api#documentation": "

One or more seed or starting point URLs.

" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

The configuration of web URLs that you want to crawl. \n You should be authorized to crawl the URLs.

" } }, "com.amazonaws.bedrockagent#ValidationException": { @@ -8112,6 +13387,18 @@ "traits": { "smithy.api#documentation": "

Details about how to chunk the documents in the data source. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried.

" } + }, + "customTransformationConfiguration": { + "target": "com.amazonaws.bedrockagent#CustomTransformationConfiguration", + "traits": { + "smithy.api#documentation": "

A custom document transformer for parsed data source documents.

" + } + }, + "parsingConfiguration": { + "target": "com.amazonaws.bedrockagent#ParsingConfiguration", + "traits": { + "smithy.api#documentation": "

A custom parser for data source documents.

" + } } }, "traits": { @@ -8162,6 +13449,109 @@ "target": "com.amazonaws.bedrockagent#ListAgentVersions" } ] + }, + "com.amazonaws.bedrockagent#WebCrawlerConfiguration": { + "type": "structure", + "members": { + "crawlerLimits": { + "target": "com.amazonaws.bedrockagent#WebCrawlerLimits", + "traits": { + "smithy.api#documentation": "

The configuration of crawl limits for the web URLs.

" + } + }, + "inclusionFilters": { + "target": "com.amazonaws.bedrockagent#FilterList", + "traits": { + "smithy.api#documentation": "

A list of one or more inclusion regular expression patterns to include \n certain URLs. If you specify an inclusion and exclusion filter/pattern \n and both match a URL, the exclusion filter takes precedence and the web \n content of the URL isn’t crawled.

" + } + }, + "exclusionFilters": { + "target": "com.amazonaws.bedrockagent#FilterList", + "traits": { + "smithy.api#documentation": "

A list of one or more exclusion regular expression patterns to exclude \n certain URLs. If you specify an inclusion and exclusion filter/pattern \n and both match a URL, the exclusion filter takes precedence and the web \n content of the URL isn’t crawled.

" + } + }, + "scope": { + "target": "com.amazonaws.bedrockagent#WebScopeType", + "traits": { + "smithy.api#documentation": "

The scope of what is crawled for your URLs.

\n

You can choose to crawl only web pages that belong to the same host or primary \n domain. For example, only web pages that contain the seed URL \n \"https://docs.aws.amazon.com/bedrock/latest/userguide/\" and no other domains. \n You can choose to include sub domains in addition to the host or primary domain. \n For example, web pages that contain \"aws.amazon.com\" can also include sub domain \n \"docs.aws.amazon.com\".

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of web URLs that you want to crawl. \n You should be authorized to crawl the URLs.

" + } + }, + "com.amazonaws.bedrockagent#WebCrawlerLimits": { + "type": "structure", + "members": { + "rateLimit": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The max rate at which pages are crawled, up to 300 per minute per host.

", + "smithy.api#range": { + "min": 1, + "max": 300 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

The rate limits for the URLs that you want to crawl. \n You should be authorized to crawl the URLs.

" + } + }, + "com.amazonaws.bedrockagent#WebDataSourceConfiguration": { + "type": "structure", + "members": { + "sourceConfiguration": { + "target": "com.amazonaws.bedrockagent#WebSourceConfiguration", + "traits": { + "smithy.api#documentation": "

The source configuration details for the web data source.

", + "smithy.api#required": {} + } + }, + "crawlerConfiguration": { + "target": "com.amazonaws.bedrockagent#WebCrawlerConfiguration", + "traits": { + "smithy.api#documentation": "

The Web Crawler configuration details for the web data source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration details for the web data source.

" + } + }, + "com.amazonaws.bedrockagent#WebScopeType": { + "type": "enum", + "members": { + "HOST_ONLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HOST_ONLY" + } + }, + "SUBDOMAINS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUBDOMAINS" + } + } + } + }, + "com.amazonaws.bedrockagent#WebSourceConfiguration": { + "type": "structure", + "members": { + "urlConfiguration": { + "target": "com.amazonaws.bedrockagent#UrlConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the URL/URLs.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of the URL/URLs for the web content that you want \n to crawl. You should be authorized to crawl the URLs.

" + } } } } \ No newline at end of file diff --git a/models/bedrock-runtime.json b/models/bedrock-runtime.json index 9c3ca78f33..e6963c7808 100644 --- a/models/bedrock-runtime.json +++ b/models/bedrock-runtime.json @@ -35,6 +35,9 @@ "type": "service", "version": "2023-09-30", "resources": [ + { + "target": "com.amazonaws.bedrockruntime#GuardrailResource" + }, { "target": "com.amazonaws.bedrockruntime#InferenceResource" } @@ -724,6 +727,117 @@ "smithy.api#documentation": "

The model must request at least one tool (no text is generated). For example, {\"any\" : {}}.

" } }, + "com.amazonaws.bedrockruntime#ApplyGuardrail": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockruntime#ApplyGuardrailRequest" + }, + "output": { + "target": "com.amazonaws.bedrockruntime#ApplyGuardrailResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockruntime#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockruntime#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockruntime#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockruntime#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockruntime#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockruntime#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

The action to apply a guardrail.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/guardrail/{guardrailIdentifier}/version/{guardrailVersion}/apply" + } + } + }, + "com.amazonaws.bedrockruntime#ApplyGuardrailRequest": { + "type": "structure", + "members": { + "guardrailIdentifier": { + "target": "com.amazonaws.bedrockruntime#GuardrailIdentifier", + "traits": { + "smithy.api#documentation": "

The guardrail identifier used in the request to apply the guardrail.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "guardrailVersion": { + "target": "com.amazonaws.bedrockruntime#GuardrailVersion", + "traits": { + "smithy.api#documentation": "

The guardrail version used in the request to apply the guardrail.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "source": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentSource", + "traits": { + "smithy.api#documentation": "

The source of data used in the request to apply the guardrail.

", + "smithy.api#required": {} + } + }, + "content": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentBlockList", + "traits": { + "smithy.api#documentation": "

The content details used in the request to apply the guardrail.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockruntime#ApplyGuardrailResponse": { + "type": "structure", + "members": { + "usage": { + "target": "com.amazonaws.bedrockruntime#GuardrailUsage", + "traits": { + "smithy.api#documentation": "

The usage details in the response from the guardrail.

", + "smithy.api#required": {} + } + }, + "action": { + "target": "com.amazonaws.bedrockruntime#GuardrailAction", + "traits": { + "smithy.api#documentation": "

The action taken in the response from the guardrail.

", + "smithy.api#required": {} + } + }, + "outputs": { + "target": "com.amazonaws.bedrockruntime#GuardrailOutputContentList", + "traits": { + "smithy.api#documentation": "

The output details in the response from the guardrail.

", + "smithy.api#required": {} + } + }, + "assessments": { + "target": "com.amazonaws.bedrockruntime#GuardrailAssessmentList", + "traits": { + "smithy.api#documentation": "

The assessment details in the response from the guardrail.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.bedrockruntime#AutoToolChoice": { "type": "structure", "members": {}, @@ -764,7 +878,7 @@ "toolUse": { "target": "com.amazonaws.bedrockruntime#ToolUseBlock", "traits": { - "smithy.api#documentation": "

Information about a tool use request from a model.

" + "smithy.api#documentation": "

Information about a tool use request from a model.

" } }, "toolResult": { @@ -781,7 +895,7 @@ } }, "traits": { - "smithy.api#documentation": "

A block of content for a message that you pass to, or receive from, a model with the Converse API (Converse and ConverseStream).

" + "smithy.api#documentation": "

A block of content for a message that you pass to, or receive from, a model with the Converse or ConverseStream API operations.

" } }, "com.amazonaws.bedrockruntime#ContentBlockDelta": { @@ -1155,7 +1269,7 @@ "trace": { "target": "com.amazonaws.bedrockruntime#ConverseStreamTrace", "traits": { - "smithy.api#documentation": "

The trace object in the response from ConverseStream that contains information about the guardrail behavior.

" + "smithy.api#documentation": "

The trace object in the response from ConverseStream that contains information about the guardrail behavior.

" } } }, @@ -1335,7 +1449,7 @@ } }, "traits": { - "smithy.api#documentation": "

The trace object in a response from ConverseStream. Currently, you can only trace guardrails.

" + "smithy.api#documentation": "

The trace object in a response from ConverseStream. Currently, you can only trace guardrails.

" } }, "com.amazonaws.bedrockruntime#ConverseTrace": { @@ -1349,7 +1463,7 @@ } }, "traits": { - "smithy.api#documentation": "

The trace object in a response from Converse. Currently, you can only trace guardrails.

" + "smithy.api#documentation": "

The trace object in a response from Converse. Currently, you can only trace guardrails.

" } }, "com.amazonaws.bedrockruntime#DocumentBlock": { @@ -1365,7 +1479,7 @@ "name": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

A name for the document.

", + "smithy.api#documentation": "

A name for the document. The name can only contain the following characters:

\n
    \n
  • \n

    Alphanumeric characters

    \n
  • \n
  • \n

    Whitespace characters (no more than one in a row)

    \n
  • \n
  • \n

    Hyphens

    \n
  • \n
  • \n

    Parentheses

    \n
  • \n
  • \n

    Square brackets

    \n
  • \n
\n \n

This field is vulnerable to prompt injections, because the model might inadvertently interpret it as instructions. Therefore, we recommend that you specify a neutral name.

\n
", "smithy.api#length": { "min": 1, "max": 200 @@ -1382,7 +1496,7 @@ } }, "traits": { - "smithy.api#documentation": "

A document to include in a message when sending a Converse or ConverseStream request. You can include up to 5 documents in a request. The maximum document size is 50 MB.

" + "smithy.api#documentation": "

A document to include in a message.

" } }, "com.amazonaws.bedrockruntime#DocumentFormat": { @@ -1450,7 +1564,7 @@ "bytes": { "target": "smithy.api#Blob", "traits": { - "smithy.api#documentation": "

A base64-encoded string of a UTF-8 encoded file, that is the document to include in the message.

", + "smithy.api#documentation": "

The raw bytes for the document. If you use an Amazon Web Services SDK, you don't need to encode the bytes in base64.

", "smithy.api#length": { "min": 1 } @@ -1458,7 +1572,24 @@ } }, "traits": { - "smithy.api#documentation": "

Contains the content of the document included in a message when sending a Converse or ConverseStream request or in the response.

" + "smithy.api#documentation": "

Contains the content of a document.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailAction": { + "type": "enum", + "members": { + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + }, + "GUARDRAIL_INTERVENED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GUARDRAIL_INTERVENED" + } + } } }, "com.amazonaws.bedrockruntime#GuardrailAssessment": { @@ -1487,6 +1618,12 @@ "traits": { "smithy.api#documentation": "

The sensitive information policy.

" } + }, + "contextualGroundingPolicy": { + "target": "com.amazonaws.bedrockruntime#GuardrailContextualGroundingPolicyAssessment", + "traits": { + "smithy.api#documentation": "

The contextual grounding policy used for the guardrail assessment.

" + } } }, "traits": { @@ -1543,7 +1680,27 @@ } }, "traits": { - "smithy.api#documentation": "

Configuration information for a guardrail that you use with the Converse action.

" + "smithy.api#documentation": "

Configuration information for a guardrail that you use with the Converse operation.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailContentBlock": { + "type": "union", + "members": { + "text": { + "target": "com.amazonaws.bedrockruntime#GuardrailTextBlock", + "traits": { + "smithy.api#documentation": "

Text within content block to be evaluated by the guardrail.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The content block to be evaluated by the guardrail.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailContentBlockList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentBlock" } }, "com.amazonaws.bedrockruntime#GuardrailContentFilter": { @@ -1677,6 +1834,156 @@ "smithy.api#documentation": "

An assessment of a content policy for a guardrail.

" } }, + "com.amazonaws.bedrockruntime#GuardrailContentPolicyUnitsProcessed": { + "type": "integer" + }, + "com.amazonaws.bedrockruntime#GuardrailContentQualifier": { + "type": "enum", + "members": { + "GROUNDING_SOURCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "grounding_source" + } + }, + "QUERY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "query" + } + }, + "GUARD_CONTENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "guard_content" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailContentQualifierList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentQualifier" + } + }, + "com.amazonaws.bedrockruntime#GuardrailContentSource": { + "type": "enum", + "members": { + "INPUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INPUT" + } + }, + "OUTPUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OUTPUT" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailContextualGroundingFilter": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.bedrockruntime#GuardrailContextualGroundingFilterType", + "traits": { + "smithy.api#documentation": "

The contextual grounding filter type.

", + "smithy.api#required": {} + } + }, + "threshold": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

The threshold used by contextual grounding filter to determine whether the content is grounded or not.

", + "smithy.api#range": { + "min": 0, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "score": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

The score generated by contextual grounding filter.

", + "smithy.api#range": { + "min": 0, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "action": { + "target": "com.amazonaws.bedrockruntime#GuardrailContextualGroundingPolicyAction", + "traits": { + "smithy.api#documentation": "

The action performed by the guardrails contextual grounding filter.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details for the guardrails contextual grounding filter.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailContextualGroundingFilterType": { + "type": "enum", + "members": { + "GROUNDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GROUNDING" + } + }, + "RELEVANCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RELEVANCE" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailContextualGroundingFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailContextualGroundingFilter" + } + }, + "com.amazonaws.bedrockruntime#GuardrailContextualGroundingPolicyAction": { + "type": "enum", + "members": { + "BLOCKED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BLOCKED" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailContextualGroundingPolicyAssessment": { + "type": "structure", + "members": { + "filters": { + "target": "com.amazonaws.bedrockruntime#GuardrailContextualGroundingFilters", + "traits": { + "smithy.api#documentation": "

The filter details for the guardrails contextual grounding filter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The policy assessment details for the guardrails contextual grounding filter.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailContextualGroundingPolicyUnitsProcessed": { + "type": "integer" + }, "com.amazonaws.bedrockruntime#GuardrailConverseContentBlock": { "type": "union", "members": { @@ -1688,7 +1995,36 @@ } }, "traits": { - "smithy.api#documentation": "

\n

A content block for selective guarding with the Converse API (Converse and ConverseStream).\n

" + "smithy.api#documentation": "

\n

A content block for selective guarding with the Converse or ConverseStream API operations.\n

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailConverseContentQualifier": { + "type": "enum", + "members": { + "GROUNDING_SOURCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "grounding_source" + } + }, + "QUERY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "query" + } + }, + "GUARD_CONTENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "guard_content" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailConverseContentQualifierList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailConverseContentQualifier" } }, "com.amazonaws.bedrockruntime#GuardrailConverseTextBlock": { @@ -1700,6 +2036,12 @@ "smithy.api#documentation": "

The text that you want to guard.

", "smithy.api#required": {} } + }, + "qualifiers": { + "target": "com.amazonaws.bedrockruntime#GuardrailConverseContentQualifierList", + "traits": { + "smithy.api#documentation": "

The qualifier details for the guardrails contextual grounding filter.

" + } } }, "traits": { @@ -1789,6 +2131,26 @@ } } }, + "com.amazonaws.bedrockruntime#GuardrailOutputContent": { + "type": "structure", + "members": { + "text": { + "target": "com.amazonaws.bedrockruntime#GuardrailOutputText", + "traits": { + "smithy.api#documentation": "

The specific text for the output content produced by the guardrail.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The output content produced by the guardrail.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailOutputContentList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailOutputContent" + } + }, "com.amazonaws.bedrockruntime#GuardrailOutputText": { "type": "string" }, @@ -2057,6 +2419,14 @@ "target": "com.amazonaws.bedrockruntime#GuardrailRegexFilter" } }, + "com.amazonaws.bedrockruntime#GuardrailResource": { + "type": "resource", + "operations": [ + { + "target": "com.amazonaws.bedrockruntime#ApplyGuardrail" + } + ] + }, "com.amazonaws.bedrockruntime#GuardrailSensitiveInformationPolicyAction": { "type": "enum", "members": { @@ -2096,6 +2466,12 @@ "smithy.api#documentation": "

The assessment for aPersonally Identifiable Information (PII) policy.

" } }, + "com.amazonaws.bedrockruntime#GuardrailSensitiveInformationPolicyFreeUnitsProcessed": { + "type": "integer" + }, + "com.amazonaws.bedrockruntime#GuardrailSensitiveInformationPolicyUnitsProcessed": { + "type": "integer" + }, "com.amazonaws.bedrockruntime#GuardrailStreamConfiguration": { "type": "structure", "members": { @@ -2149,6 +2525,27 @@ } } }, + "com.amazonaws.bedrockruntime#GuardrailTextBlock": { + "type": "structure", + "members": { + "text": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The input text details to be evaluated by the guardrail.

", + "smithy.api#required": {} + } + }, + "qualifiers": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentQualifierList", + "traits": { + "smithy.api#documentation": "

The qualifiers describing the text block.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The text block to be evaluated by the guardrail.

" + } + }, "com.amazonaws.bedrockruntime#GuardrailTopic": { "type": "structure", "members": { @@ -2210,6 +2607,9 @@ "smithy.api#documentation": "

A behavior assessment of a topic policy.

" } }, + "com.amazonaws.bedrockruntime#GuardrailTopicPolicyUnitsProcessed": { + "type": "integer" + }, "com.amazonaws.bedrockruntime#GuardrailTopicType": { "type": "enum", "members": { @@ -2264,6 +2664,56 @@ "smithy.api#documentation": "

A Top level guardrail trace object. For more information, see ConverseTrace.

" } }, + "com.amazonaws.bedrockruntime#GuardrailUsage": { + "type": "structure", + "members": { + "topicPolicyUnits": { + "target": "com.amazonaws.bedrockruntime#GuardrailTopicPolicyUnitsProcessed", + "traits": { + "smithy.api#documentation": "

The topic policy units processed by the guardrail.

", + "smithy.api#required": {} + } + }, + "contentPolicyUnits": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentPolicyUnitsProcessed", + "traits": { + "smithy.api#documentation": "

The content policy units processed by the guardrail.

", + "smithy.api#required": {} + } + }, + "wordPolicyUnits": { + "target": "com.amazonaws.bedrockruntime#GuardrailWordPolicyUnitsProcessed", + "traits": { + "smithy.api#documentation": "

The word policy units processed by the guardrail.

", + "smithy.api#required": {} + } + }, + "sensitiveInformationPolicyUnits": { + "target": "com.amazonaws.bedrockruntime#GuardrailSensitiveInformationPolicyUnitsProcessed", + "traits": { + "smithy.api#documentation": "

The sensitive information policy units processed by the guardrail.

", + "smithy.api#required": {} + } + }, + "sensitiveInformationPolicyFreeUnits": { + "target": "com.amazonaws.bedrockruntime#GuardrailSensitiveInformationPolicyFreeUnitsProcessed", + "traits": { + "smithy.api#documentation": "

The sensitive information policy free units processed by the guardrail.

", + "smithy.api#required": {} + } + }, + "contextualGroundingPolicyUnits": { + "target": "com.amazonaws.bedrockruntime#GuardrailContextualGroundingPolicyUnitsProcessed", + "traits": { + "smithy.api#documentation": "

The contextual grounding policy units processed by the guardrail.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details on the use of the guardrail.

" + } + }, "com.amazonaws.bedrockruntime#GuardrailVersion": { "type": "string", "traits": { @@ -2303,6 +2753,9 @@ "smithy.api#documentation": "

The word policy assessment.

" } }, + "com.amazonaws.bedrockruntime#GuardrailWordPolicyUnitsProcessed": { + "type": "integer" + }, "com.amazonaws.bedrockruntime#ImageBlock": { "type": "structure", "members": { @@ -2360,7 +2813,7 @@ "bytes": { "target": "smithy.api#Blob", "traits": { - "smithy.api#documentation": "

The raw image bytes for the image. If you use an AWS SDK, you don't need to base64 encode the image bytes.

", + "smithy.api#documentation": "

The raw image bytes for the image. If you use an AWS SDK, you don't need to encode the image bytes in base64.

", "smithy.api#length": { "min": 1 } @@ -2731,7 +3184,7 @@ "content": { "target": "com.amazonaws.bedrockruntime#ContentBlocks", "traits": { - "smithy.api#documentation": "

The message content.

", + "smithy.api#documentation": "

The message content. Note the following restrictions:

\n
    \n
  • \n

    You can include up to 20 images. Each image's size, height, and width must be no more than 3.75 MB, 8000 px, and 8000 px, respectively.

    \n
  • \n
  • \n

    You can include up to five documents. Each document's size must be no more than 4.5 MB.

    \n
  • \n
  • \n

    If you include a ContentBlock with a document field in the array, you must also include a ContentBlock with a text field.

    \n
  • \n
  • \n

    You can only include images and documents if the role is user.

    \n
  • \n
", "smithy.api#required": {} } } @@ -3067,7 +3520,7 @@ "guardContent": { "target": "com.amazonaws.bedrockruntime#GuardrailConverseContentBlock", "traits": { - "smithy.api#documentation": "

A content block to assess with the guardrail. Use with the Converse API (Converse and ConverseStream).

\n

For more information, see Use a guardrail with the Converse\n API in the Amazon Bedrock User Guide.

" + "smithy.api#documentation": "

A content block to assess with the guardrail. Use with the Converse or ConverseStream API operations.

\n

For more information, see Use a guardrail with the Converse\n API in the Amazon Bedrock User Guide.

" } } }, @@ -3143,7 +3596,7 @@ } }, "traits": { - "smithy.api#documentation": "

Information about a tool that you can use with the Converse API.

" + "smithy.api#documentation": "

Information about a tool that you can use with the Converse API. For more information, see Tool use (function calling) in the Amazon Bedrock User Guide.

" } }, "com.amazonaws.bedrockruntime#ToolChoice": { @@ -3193,7 +3646,7 @@ } }, "traits": { - "smithy.api#documentation": "

Configuration information for the tools that you pass to a model.

\n \n

This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.

\n
" + "smithy.api#documentation": "

Configuration information for the tools that you pass to a model. For more information, see Tool use (function calling) in the Amazon Bedrock User Guide.

\n \n

This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.

\n
" } }, "com.amazonaws.bedrockruntime#ToolInputSchema": { diff --git a/models/bedrock.json b/models/bedrock.json index ab3ccfd847..77d9d57b78 100644 --- a/models/bedrock.json +++ b/models/bedrock.json @@ -990,7 +990,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a guardrail to block topics and to filter out harmful content.

\n
    \n
  • \n

    Specify a name and optional description.

    \n
  • \n
  • \n

    Specify messages for when the guardrail successfully blocks a prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields.

    \n
  • \n
  • \n

    Specify topics for the guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the topicsConfig list pertains to one topic.

    \n
      \n
    • \n

      Give a name and description so that the guardrail can properly identify the topic.

      \n
    • \n
    • \n

      Specify DENY in the type field.

      \n
    • \n
    • \n

      (Optional) Provide up to five prompts that you would categorize as belonging to the topic in the examples list.

      \n
    • \n
    \n
  • \n
  • \n

    Specify filter strengths for the harmful categories defined in Amazon Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful category. For more information, see Content filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig.

    \n
      \n
    • \n

      Specify the category in the type field.

      \n
    • \n
    • \n

      Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig.

      \n
    • \n
    \n
  • \n
  • \n

    (Optional) For security, include the ARN of a KMS key in the kmsKeyId field.

    \n
  • \n
  • \n

    (Optional) Attach any tags to the guardrail in the tags object. For more information, see Tag resources.

    \n
  • \n
", + "smithy.api#documentation": "

Creates a guardrail to block topics and to implement safeguards for your generative AI applications.

\n

You can configure the following policies in a guardrail to avoid undesirable and harmful content, filter \n out denied topics and words, and remove sensitive information for privacy protection.

\n
    \n
  • \n

    \n Content filters - Adjust filter strengths\n to block input prompts or model responses containing harmful content.

    \n
  • \n
  • \n

    \n Denied topics - Define a set of topics that\n are undesirable in the context of your application. These topics will be blocked if\n detected in user queries or model responses.

    \n
  • \n
  • \n

    \n Word filters - Configure filters to block\n undesirable words, phrases, and profanity. Such words can include offensive terms, \n competitor names etc.

    \n
  • \n
  • \n

    \n Sensitive information filters - Block or\n mask sensitive information such as personally identifiable information (PII) or custom \n regex in user inputs and model responses.

    \n
  • \n
\n

In addition to the above policies, you can also configure the messages to be returned to \n the user if a user input or model response is in violation of the policies defined in the guardrail.

\n

For more information, see Guardrails for Amazon Bedrock in\n the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 202, "method": "POST", @@ -1038,6 +1038,12 @@ "smithy.api#documentation": "

The sensitive information policy to configure for the guardrail.

" } }, + "contextualGroundingPolicyConfig": { + "target": "com.amazonaws.bedrock#GuardrailContextualGroundingPolicyConfig", + "traits": { + "smithy.api#documentation": "

The contextual grounding policy configuration used to create a guardrail.

" + } + }, "blockedInputMessaging": { "target": "com.amazonaws.bedrock#GuardrailBlockedMessaging", "traits": { @@ -1089,14 +1095,14 @@ "guardrailArn": { "target": "com.amazonaws.bedrock#GuardrailArn", "traits": { - "smithy.api#documentation": "

The ARN of the guardrail that was created.

", + "smithy.api#documentation": "

The ARN of the guardrail.

", "smithy.api#required": {} } }, "version": { "target": "com.amazonaws.bedrock#GuardrailDraftVersion", "traits": { - "smithy.api#documentation": "

The version of the guardrail that was created. This value should be 1.

", + "smithy.api#documentation": "

The version of the guardrail that was created. \n This value will always be DRAFT.

", "smithy.api#required": {} } }, @@ -1158,7 +1164,7 @@ "guardrailIdentifier": { "target": "com.amazonaws.bedrock#GuardrailIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the guardrail.

", + "smithy.api#documentation": "

The unique identifier of the guardrail. This can be an ID or the ARN.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1663,7 +1669,7 @@ "guardrailIdentifier": { "target": "com.amazonaws.bedrock#GuardrailIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the guardrail.

", + "smithy.api#documentation": "

The unique identifier of the guardrail. This can be an ID or the ARN.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2923,7 +2929,7 @@ "guardrailIdentifier": { "target": "com.amazonaws.bedrock#GuardrailIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the guardrail for which to get details.

", + "smithy.api#documentation": "

The unique identifier of the guardrail for which to get details. This can be an ID or the ARN.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2966,7 +2972,7 @@ "guardrailArn": { "target": "com.amazonaws.bedrock#GuardrailArn", "traits": { - "smithy.api#documentation": "

The ARN of the guardrail that was created.

", + "smithy.api#documentation": "

The ARN of the guardrail.

", "smithy.api#required": {} } }, @@ -3008,6 +3014,12 @@ "smithy.api#documentation": "

The sensitive information policy that was configured for the guardrail.

" } }, + "contextualGroundingPolicy": { + "target": "com.amazonaws.bedrock#GuardrailContextualGroundingPolicy", + "traits": { + "smithy.api#documentation": "

The contextual grounding policy used in the guardrail.

" + } + }, "createdAt": { "target": "com.amazonaws.bedrock#Timestamp", "traits": { @@ -3525,7 +3537,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains filter strengths for harmful content. Guardrails support the following content filters to detect and filter harmful user inputs and FM-generated outputs.

\n
    \n
  • \n

    \n Hate – Describes language or a statement that discriminates, criticizes, insults, denounces, or dehumanizes a person or group on the basis of an identity (such as race, ethnicity, gender, religion, sexual orientation, ability, and national origin).

    \n
  • \n
  • \n

    \n Insults – Describes language or a statement that includes demeaning, humiliating, mocking, insulting, or belittling language. This type of language is also labeled as bullying.

    \n
  • \n
  • \n

    \n Sexual – Describes language or a statement that indicates sexual interest, activity, or arousal using direct or indirect references to body parts, physical traits, or sex.

    \n
  • \n
  • \n

    \n Violence – Describes language or a statement that includes glorification of or threats to inflict physical pain, hurt, or injury toward a person, group or thing.

    \n
  • \n
\n

Content filtering depends on the confidence classification of user inputs and FM\n responses across each of the four harmful categories. All input and output statements are\n classified into one of four confidence levels (NONE, LOW, MEDIUM, HIGH) for each\n harmful category. For example, if a statement is classified as\n Hate with HIGH confidence, the likelihood of the statement\n representing hateful content is high. A single statement can be classified across\n multiple categories with varying confidence levels. For example, a single statement\n can be classified as Hate with HIGH confidence, Insults with LOW confidence, Sexual with NONE confidence, and Violence with MEDIUM confidence.

\n

For more information, see Guardrails content filters.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains filter strengths for harmful content. Guardrails support the following content filters to detect and filter harmful user inputs and FM-generated outputs.

\n
    \n
  • \n

    \n Hate – Describes language or a statement that discriminates, criticizes, insults, denounces, or dehumanizes a person or group on the basis of an identity (such as race, ethnicity, gender, religion, sexual orientation, ability, and national origin).

    \n
  • \n
  • \n

    \n Insults – Describes language or a statement that includes demeaning, humiliating, mocking, insulting, or belittling language. This type of language is also labeled as bullying.

    \n
  • \n
  • \n

    \n Sexual – Describes language or a statement that indicates sexual interest, activity, or arousal using direct or indirect references to body parts, physical traits, or sex.

    \n
  • \n
  • \n

    \n Violence – Describes language or a statement that includes glorification of or threats to inflict physical pain, hurt, or injury toward a person, group or thing.

    \n
  • \n
\n

Content filtering depends on the confidence classification of user inputs and FM\n responses across each of the four harmful categories. All input and output statements are\n classified into one of four confidence levels (NONE, LOW, MEDIUM, HIGH) for each\n harmful category. For example, if a statement is classified as\n Hate with HIGH confidence, the likelihood of the statement\n representing hateful content is high. A single statement can be classified across\n multiple categories with varying confidence levels. For example, a single statement\n can be classified as Hate with HIGH confidence, Insults with LOW confidence, Sexual with NONE confidence, and Violence with MEDIUM confidence.

\n

For more information, see Guardrails content filters.

" } }, "com.amazonaws.bedrock#GuardrailContentFilterType": { @@ -3619,7 +3631,126 @@ } }, "traits": { - "smithy.api#documentation": "

Contains details about how to handle harmful content.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains details about how to handle harmful content.

" + } + }, + "com.amazonaws.bedrock#GuardrailContextualGroundingFilter": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.bedrock#GuardrailContextualGroundingFilterType", + "traits": { + "smithy.api#documentation": "

The filter type details for the guardrails contextual grounding filter.

", + "smithy.api#required": {} + } + }, + "threshold": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

The threshold details for the guardrails contextual grounding filter.

", + "smithy.api#range": { + "min": 0 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details for the guardrails contextual grounding filter.

" + } + }, + "com.amazonaws.bedrock#GuardrailContextualGroundingFilterConfig": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.bedrock#GuardrailContextualGroundingFilterType", + "traits": { + "smithy.api#documentation": "

The filter details for the guardrails contextual grounding filter.

", + "smithy.api#required": {} + } + }, + "threshold": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

The threshold details for the guardrails contextual grounding filter.

", + "smithy.api#range": { + "min": 0 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The filter configuration details for the guardrails contextual grounding filter.

" + } + }, + "com.amazonaws.bedrock#GuardrailContextualGroundingFilterType": { + "type": "enum", + "members": { + "GROUNDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GROUNDING" + } + }, + "RELEVANCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RELEVANCE" + } + } + } + }, + "com.amazonaws.bedrock#GuardrailContextualGroundingFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrock#GuardrailContextualGroundingFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.bedrock#GuardrailContextualGroundingFiltersConfig": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrock#GuardrailContextualGroundingFilterConfig" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.bedrock#GuardrailContextualGroundingPolicy": { + "type": "structure", + "members": { + "filters": { + "target": "com.amazonaws.bedrock#GuardrailContextualGroundingFilters", + "traits": { + "smithy.api#documentation": "

The filter details for the guardrails contextual grounding policy.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details for the guardrails contextual grounding policy.

" + } + }, + "com.amazonaws.bedrock#GuardrailContextualGroundingPolicyConfig": { + "type": "structure", + "members": { + "filtersConfig": { + "target": "com.amazonaws.bedrock#GuardrailContextualGroundingFiltersConfig", + "traits": { + "smithy.api#documentation": "

The filter configuration details for the guardrails contextual grounding policy.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The policy configuration details for the guardrails contextual grounding policy.

" } }, "com.amazonaws.bedrock#GuardrailDescription": { @@ -3734,7 +3865,7 @@ } }, "traits": { - "smithy.api#documentation": "

The managed word list that was configured for the guardrail.\n (This is a list of words that are pre-defined and managed by Guardrails only.)

" + "smithy.api#documentation": "

The managed word list that was configured for the guardrail.\n (This is a list of words that are pre-defined and managed by guardrails only.)

" } }, "com.amazonaws.bedrock#GuardrailManagedWordsConfig": { @@ -3832,7 +3963,7 @@ "type": { "target": "com.amazonaws.bedrock#GuardrailPiiEntityType", "traits": { - "smithy.api#documentation": "

Configure guardrail type when the PII entity is detected.

", + "smithy.api#documentation": "

Configure guardrail type when the PII entity is detected.

\n

The following PIIs are used to block or mask sensitive information:

\n
    \n
  • \n

    \n General\n

    \n
      \n
    • \n

      \n ADDRESS\n

      \n

      A physical address, such as \"100 Main Street, Anytown, USA\" \n or \"Suite #12, Building 123\". An address can include information \n such as the street, building, location, city, state, country, county, \n zip code, precinct, and neighborhood.

      \n
    • \n
    • \n

      \n AGE\n

      \n

      An individual's age, including the quantity and unit of time. For \n example, in the phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" \n as an age.\n

      \n
    • \n
    • \n

      \n NAME\n

      \n

      An individual's name. This entity type does not include titles, such as \n Dr., Mr., Mrs., or Miss. guardrails doesn't apply this entity type to names that \n are part of organizations or addresses. For example, guardrails recognizes \n the \"John Doe Organization\" as an organization, and it recognizes \"Jane Doe \n Street\" as an address.\n

      \n
    • \n
    • \n

      \n EMAIL\n

      \n

      An email address, such as marymajor@email.com.

      \n
    • \n
    • \n

      \n PHONE\n

      \n

      A phone number. This entity type also includes fax and pager numbers.\n

      \n
    • \n
    • \n

      \n USERNAME\n

      \n

      A user name that identifies an account, such as a login name, screen name, \n nick name, or handle.\n

      \n
    • \n
    • \n

      \n PASSWORD\n

      \n

      An alphanumeric string that is used as a password, such as \n \"*very20special#pass*\".\n

      \n
    • \n
    • \n

      \n DRIVER_ID\n

      \n

      The number assigned to a driver's license, which is an official \n document permitting an individual to operate one or more motorized \n vehicles on a public road. A driver's license number consists of \n alphanumeric characters.\n

      \n
    • \n
    • \n

      \n LICENSE_PLATE\n

      \n

      A license plate for a vehicle is issued by the state or country where \n the vehicle is registered. The format for passenger vehicles is typically \n five to eight digits, consisting of upper-case letters and numbers. The \n format varies depending on the location of the issuing state or country.\n

      \n
    • \n
    • \n

      \n VEHICLE_IDENTIFICATION_NUMBER\n

      \n

      A Vehicle Identification Number (VIN) uniquely identifies a vehicle. \n VIN content and format are defined in the ISO 3779 specification. \n Each country has specific codes and formats for VINs.\n

      \n
    • \n
    \n
  • \n
  • \n

    \n Finance\n

    \n
      \n
    • \n

      \n REDIT_DEBIT_CARD_CVV\n

      \n

      A three-digit card verification code (CVV) that is present on VISA, \n MasterCard, and Discover credit and debit cards. For American Express \n credit or debit cards, the CVV is a four-digit numeric code.\n

      \n
    • \n
    • \n

      \n CREDIT_DEBIT_CARD_EXPIRY\n

      \n

      The expiration date for a credit or debit card. This number is usually \n four digits long and is often formatted as month/year or \n MM/YY. Guardrails recognizes expiration dates such as \n 01/21, 01/2021, and Jan 2021.\n

      \n
    • \n
    • \n

      \n CREDIT_DEBIT_CARD_NUMBER\n

      \n

      The number for a credit or debit card. These numbers can vary from 13 to 16 \n digits in length. However, Amazon Comprehend also recognizes credit or debit \n card numbers when only the last four digits are present.\n

      \n
    • \n
    • \n

      \n PIN\n

      \n

      A four-digit personal identification number (PIN) with which you can \n access your bank account.\n

      \n
    • \n
    • \n

      \n INTERNATIONAL_BANK_ACCOUNT_NUMBER\n

      \n

      An International Bank Account Number has specific formats in each country. \n For more information, see www.iban.com/structure.

      \n
    • \n
    • \n

      \n SWIFT_CODE\n

      \n

      A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify \n a particular bank or branch. Banks use these codes for money transfers such as \n international wire transfers.

      \n

      SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific \n branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the \n head or primary office.

      \n
    • \n
    \n
  • \n
  • \n

    \n IT\n

    \n
      \n
    • \n

      \n IP_ADDRESS\n

      \n

      An IPv4 address, such as 198.51.100.0.\n

      \n
    • \n
    • \n

      \n MAC_ADDRESS\n

      \n

      A media access control (MAC) address is a unique identifier \n assigned to a network interface controller (NIC).\n

      \n
    • \n
    • \n

      \n URL\n

      \n

      A web address, such as www.example.com.\n

      \n
    • \n
    • \n

      \n AWS_ACCESS_KEY\n

      \n

      A unique identifier that's associated with a secret access key; \n you use the access key ID and secret access key to sign programmatic \n Amazon Web Services requests cryptographically.\n

      \n
    • \n
    • \n

      \n AWS_SECRET_KEY\n

      \n

      A unique identifier that's associated with an access key. You use the \n access key ID and secret access key to sign programmatic Amazon Web Services \n requests cryptographically.\n

      \n
    • \n
    \n
  • \n
  • \n

    \n USA specific\n

    \n
      \n
    • \n

      \n US_BANK_ACCOUNT_NUMBER\n

      \n

      A US bank account number, which is typically 10 to 12 digits long. \n

      \n
    • \n
    • \n

      \n US_BANK_ROUTING_NUMBER\n

      \n

      A US bank account routing number. These are typically nine digits long, \n

      \n
    • \n
    • \n

      \n US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER\n

      \n

      A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number \n that starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN \n can be formatted with a space or a dash after the third and forth digits.\n

      \n
    • \n
    • \n

      \n US_PASSPORT_NUMBER\n

      \n

      A US passport number. Passport numbers range from six to nine alphanumeric \n characters.\n

      \n
    • \n
    • \n

      \n US_SOCIAL_SECURITY_NUMBER\n

      \n

      A US Social Security Number (SSN) is a nine-digit number that is issued to \n US citizens, permanent residents, and temporary working residents. \n

      \n
    • \n
    \n
  • \n
  • \n

    \n Canada specific\n

    \n
      \n
    • \n

      \n CA_HEALTH_NUMBER\n

      \n

      A Canadian Health Service Number is a 10-digit unique identifier, \n required for individuals to access healthcare benefits.\n

      \n
    • \n
    • \n

      \n CA_SOCIAL_INSURANCE_NUMBER\n

      \n

      A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, \n required for individuals to access government programs and benefits.

      \n

      The SIN is formatted as three groups of three digits, such as \n 123-456-789. A SIN can be validated through a simple \n check-digit process called the Luhn algorithm.

      \n
    • \n
    \n
  • \n
  • \n

    \n UK Specific\n

    \n
      \n
    • \n

      \n UK_NATIONAL_HEALTH_SERVICE_NUMBER\n

      \n

      A UK National Health Service Number is a 10-17 digit number, \n such as 485 777 3456. The current system formats the 10-digit \n number with spaces after the third and sixth digits. The final digit is an \n error-detecting checksum.

      \n
    • \n
    • \n

      \n UK_NATIONAL_INSURANCE_NUMBER\n

      \n

      A UK National Insurance Number (NINO) provides individuals with access to National \n Insurance (social security) benefits. It is also used for some purposes in the UK \n tax system.

      \n

      The number is nine digits long and starts with two letters, followed by six \n numbers and one letter. A NINO can be formatted with a space or a dash after \n the two letters and after the second, forth, and sixth digits.

      \n
    • \n
    • \n

      \n UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER\n

      \n

      A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business.\n

      \n
    • \n
    \n
  • \n
  • \n

    \n Custom\n

    \n
      \n
    • \n

      \n Regex filter - You can use\n a regular expressions to define patterns for a guardrail to recognize\n and act upon such as serial number, booking ID etc..

      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } }, @@ -4410,7 +4541,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details about topics for the guardrail to identify and deny.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Details about topics for the guardrail to identify and deny.

" } }, "com.amazonaws.bedrock#GuardrailTopicDefinition": { @@ -4483,7 +4614,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains details about topics that the guardrail should identify and deny.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains details about topics that the guardrail should identify and deny.

" } }, "com.amazonaws.bedrock#GuardrailTopicType": { @@ -5225,7 +5356,7 @@ "guardrailIdentifier": { "target": "com.amazonaws.bedrock#GuardrailIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the guardrail.

", + "smithy.api#documentation": "

The unique identifier of the guardrail. This can be an ID or the ARN.

", "smithy.api#httpQuery": "guardrailIdentifier" } }, @@ -6791,7 +6922,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a guardrail with the values you specify.

\n
    \n
  • \n

    Specify a name and optional description.

    \n
  • \n
  • \n

    Specify messages for when the guardrail successfully blocks a prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields.

    \n
  • \n
  • \n

    Specify topics for the guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the topicsConfig list pertains to one topic.

    \n
      \n
    • \n

      Give a name and description so that the guardrail can properly identify the topic.

      \n
    • \n
    • \n

      Specify DENY in the type field.

      \n
    • \n
    • \n

      (Optional) Provide up to five prompts that you would categorize as belonging to the topic in the examples list.

      \n
    • \n
    \n
  • \n
  • \n

    Specify filter strengths for the harmful categories defined in Amazon Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful category. For more information, see Content filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig.

    \n
      \n
    • \n

      Specify the category in the type field.

      \n
    • \n
    • \n

      Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig.

      \n
    • \n
    \n
  • \n
  • \n

    (Optional) For security, include the ARN of a KMS key in the kmsKeyId field.

    \n
  • \n
  • \n

    (Optional) Attach any tags to the guardrail in the tags object. For more information, see Tag resources.

    \n
  • \n
", + "smithy.api#documentation": "

Updates a guardrail with the values you specify.

\n
    \n
  • \n

    Specify a name and optional description.

    \n
  • \n
  • \n

    Specify messages for when the guardrail successfully blocks a prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields.

    \n
  • \n
  • \n

    Specify topics for the guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the topicsConfig list pertains to one topic.

    \n
      \n
    • \n

      Give a name and description so that the guardrail can properly identify the topic.

      \n
    • \n
    • \n

      Specify DENY in the type field.

      \n
    • \n
    • \n

      (Optional) Provide up to five prompts that you would categorize as belonging to the topic in the examples list.

      \n
    • \n
    \n
  • \n
  • \n

    Specify filter strengths for the harmful categories defined in Amazon Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful category. For more information, see Content filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig.

    \n
      \n
    • \n

      Specify the category in the type field.

      \n
    • \n
    • \n

      Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig.

      \n
    • \n
    \n
  • \n
  • \n

    (Optional) For security, include the ARN of a KMS key in the kmsKeyId field.

    \n
  • \n
", "smithy.api#http": { "code": 202, "method": "PUT", @@ -6806,7 +6937,7 @@ "guardrailIdentifier": { "target": "com.amazonaws.bedrock#GuardrailIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the guardrail

", + "smithy.api#documentation": "

The unique identifier of the guardrail. This can be an ID or the ARN.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -6848,6 +6979,12 @@ "smithy.api#documentation": "

The sensitive information policy to configure for the guardrail.

" } }, + "contextualGroundingPolicyConfig": { + "target": "com.amazonaws.bedrock#GuardrailContextualGroundingPolicyConfig", + "traits": { + "smithy.api#documentation": "

The contextual grounding policy configuration used to update a guardrail.

" + } + }, "blockedInputMessaging": { "target": "com.amazonaws.bedrock#GuardrailBlockedMessaging", "traits": { @@ -6886,7 +7023,7 @@ "guardrailArn": { "target": "com.amazonaws.bedrock#GuardrailArn", "traits": { - "smithy.api#documentation": "

The ARN of the guardrail that was created.

", + "smithy.api#documentation": "

The ARN of the guardrail.

", "smithy.api#required": {} } }, diff --git a/models/codebuild.json b/models/codebuild.json index 98f50908cd..5b1dcc7ce6 100644 --- a/models/codebuild.json +++ b/models/codebuild.json @@ -4916,7 +4916,20 @@ "inputToken": "nextToken", "outputToken": "nextToken", "items": "ids" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListBuildsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.codebuild#ListBuildsForProject": { diff --git a/models/codedeploy.json b/models/codedeploy.json index b1045a9e39..fd15084baf 100644 --- a/models/codedeploy.json +++ b/models/codedeploy.json @@ -7283,7 +7283,20 @@ "inputToken": "nextToken", "outputToken": "nextToken", "items": "applications" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListApplicationsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.codedeploy#ListApplicationsInput": { diff --git a/models/connect.json b/models/connect.json index 04b778e2b9..3e909398a2 100644 --- a/models/connect.json +++ b/models/connect.json @@ -576,6 +576,12 @@ "com.amazonaws.connect#AgentStatusId": { "type": "string" }, + "com.amazonaws.connect#AgentStatusList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#AgentStatus" + } + }, "com.amazonaws.connect#AgentStatusName": { "type": "string", "traits": { @@ -620,6 +626,52 @@ "smithy.api#documentation": "

Information about the agent's status.

" } }, + "com.amazonaws.connect#AgentStatusSearchConditionList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#AgentStatusSearchCriteria" + } + }, + "com.amazonaws.connect#AgentStatusSearchCriteria": { + "type": "structure", + "members": { + "OrConditions": { + "target": "com.amazonaws.connect#AgentStatusSearchConditionList", + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an OR\n condition.

" + } + }, + "AndConditions": { + "target": "com.amazonaws.connect#AgentStatusSearchConditionList", + "traits": { + "smithy.api#documentation": "

A leaf node condition which can be used to specify a string condition.

\n \n

The currently supported values for FieldName are name,\u2028\u2028\n description, state, type, displayOrder,\u2028\n and resourceID.

\n
" + } + }, + "StringCondition": { + "target": "com.amazonaws.connect#StringCondition", + "traits": { + "smithy.api#documentation": "

A leaf node condition which can be used to specify a string condition.

\n \n

The currently supported values for FieldName are name,\u2028\u2028\n description, state, type, displayOrder,\u2028\n and resourceID.

\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return agent statuses.

" + } + }, + "com.amazonaws.connect#AgentStatusSearchFilter": { + "type": "structure", + "members": { + "AttributeFilter": { + "target": "com.amazonaws.connect#ControlPlaneAttributeFilter", + "traits": { + "smithy.api#documentation": "

An object that can be used to specify Tag conditions inside the SearchFilter.\n This accepts an OR of AND (List of List) input where:

\n
    \n
  • \n

    The top level list specifies conditions that need to be applied with OR\n operator.

    \n
  • \n
  • \n

    The inner list specifies conditions that need to be applied with AND\n operator.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

Filters to be applied to search results.

" + } + }, "com.amazonaws.connect#AgentStatusState": { "type": "enum", "members": { @@ -742,7 +794,7 @@ } }, "traits": { - "smithy.api#documentation": "

Can be used to define a list of preferred agents to target the contact within the queue.\n Note that agents must have the queue in their routing profile in order to be offered the\n contact.

" + "smithy.api#documentation": "

Can be used to define a list of preferred agents to target the contact within the queue.\n Note that agents must have the queue in their routing profile in order to be offered the\n contact.

" } }, "com.amazonaws.connect#AgentsMinOneMaxHundred": { @@ -1336,6 +1388,9 @@ { "target": "com.amazonaws.connect#ResumeContactRecording" }, + { + "target": "com.amazonaws.connect#SearchAgentStatuses" + }, { "target": "com.amazonaws.connect#SearchAvailablePhoneNumbers" }, @@ -1372,6 +1427,9 @@ { "target": "com.amazonaws.connect#SearchSecurityProfiles" }, + { + "target": "com.amazonaws.connect#SearchUserHierarchyGroups" + }, { "target": "com.amazonaws.connect#SearchUsers" }, @@ -1589,7 +1647,7 @@ "name": "connect" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "\n

Amazon Connect is a cloud-based contact center solution that you use to set up and\n manage a customer contact center and provide reliable customer engagement at any scale.

\n

Amazon Connect provides metrics and real-time reporting that enable you to optimize\n contact routing. You can also resolve customer issues more efficiently by getting customers in\n touch with the appropriate agents.

\n

There are limits to the number of Amazon Connect resources that you can create. There\n are also limits to the number of requests that you can make per second. For more information, see\n Amazon Connect Service Quotas in the Amazon Connect Administrator\n Guide.

\n

You can connect programmatically to an Amazon Web Services service by using an endpoint. For\n a list of Amazon Connect endpoints, see Amazon Connect Endpoints.

", + "smithy.api#documentation": "\n

Amazon Connect is a cloud-based contact center solution that you use to set up and\n manage a customer contact center and provide reliable customer engagement at any scale.

\n

Amazon Connect provides metrics and real-time reporting that enable you to optimize\n contact routing. You can also resolve customer issues more efficiently by getting customers in\n touch with the appropriate agents.

\n

There are limits to the number of Amazon Connect resources that you can create. There\n are also limits to the number of requests that you can make per second. For more information, see\n Amazon Connect Service Quotas in the Amazon Connect Administrator\n Guide.

\n

You can connect programmatically to an Amazon Web Services service by using an endpoint. For\n a list of Amazon Connect endpoints, see Amazon Connect Endpoints.

", "smithy.api#title": "Amazon Connect Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -4013,13 +4071,13 @@ "AllowedIps": { "target": "com.amazonaws.connect#IpCidrList", "traits": { - "smithy.api#documentation": "

A list of IP address range strings that are allowed to access the Amazon Connect\n instance. For more information about how to configure IP addresses, see Configure\n IP address based access control in the Amazon Connect Administrator\n Guide.

" + "smithy.api#documentation": "

A list of IP address range strings that are allowed to access the Amazon Connect\n instance. For more information about how to configure IP addresses, see Configure IP\n address based access control in the Amazon Connect Administrator\n Guide.

" } }, "BlockedIps": { "target": "com.amazonaws.connect#IpCidrList", "traits": { - "smithy.api#documentation": "

A list of IP address range strings that are blocked from accessing the Amazon Connect\n instance. For more information about how to configure IP addresses, see Configure\n IP address based access control in the Amazon Connect Administrator\n Guide.

" + "smithy.api#documentation": "

A list of IP address range strings that are blocked from accessing the Amazon Connect\n instance. For more information about how to configure IP addresses, see Configure IP\n address based access control in the Amazon Connect Administrator\n Guide.

" } }, "IsDefault": { @@ -4061,7 +4119,7 @@ } }, "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change. To \nrequest access to this API, contact Amazon Web Services Support.

\n

Information about an authentication profile. An authentication profile is a resource that\n stores the authentication settings for users in your contact center. You use authentication\n profiles to set up IP address range restrictions and session timeouts. For more information, see \n Set IP address restrictions or session timeouts. \n

" + "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change. To \nrequest access to this API, contact Amazon Web Services Support.

\n

Information about an authentication profile. An authentication profile is a resource that\n stores the authentication settings for users in your contact center. You use authentication\n profiles to set up IP address range restrictions and session timeouts. For more information, see\n Set IP\n address restrictions or session timeouts.

" } }, "com.amazonaws.connect#AuthenticationProfileDescription": { @@ -5053,6 +5111,26 @@ } } }, + "com.amazonaws.connect#CommonAttributeAndCondition": { + "type": "structure", + "members": { + "TagConditions": { + "target": "com.amazonaws.connect#TagAndConditionList", + "traits": { + "smithy.api#documentation": "

A leaf node condition which can be used to specify a tag condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an AND\n condition.

" + } + }, + "com.amazonaws.connect#CommonAttributeOrConditionList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#CommonAttributeAndCondition" + } + }, "com.amazonaws.connect#CommonNameLength127": { "type": "string", "traits": { @@ -5166,6 +5244,32 @@ } } }, + "com.amazonaws.connect#Condition": { + "type": "structure", + "members": { + "StringCondition": { + "target": "com.amazonaws.connect#StringCondition", + "traits": { + "smithy.api#documentation": "

A leaf node condition which can be used to specify a string condition.

\n \n

The currently supported values for FieldName are name and\u2028\n value.

\n
" + } + }, + "NumberCondition": { + "target": "com.amazonaws.connect#NumberCondition", + "traits": { + "smithy.api#documentation": "

A leaf node condition which can be used to specify a numeric condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A leaf node condition which can be used to specify a ProficiencyName, ProficiencyValue and\n ProficiencyLimit.

" + } + }, + "com.amazonaws.connect#Conditions": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#Condition" + } + }, "com.amazonaws.connect#ConflictException": { "type": "structure", "members": { @@ -6361,6 +6465,29 @@ } } }, + "com.amazonaws.connect#ControlPlaneAttributeFilter": { + "type": "structure", + "members": { + "OrConditions": { + "target": "com.amazonaws.connect#CommonAttributeOrConditionList", + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an OR\n condition.

" + } + }, + "AndCondition": { + "target": "com.amazonaws.connect#CommonAttributeAndCondition", + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an AND\n condition.

" + } + }, + "TagCondition": { + "target": "com.amazonaws.connect#TagCondition" + } + }, + "traits": { + "smithy.api#documentation": "

An object that can be used to specify Tag conditions inside the SearchFilter.\n This accepts an OR or AND (List of List) input where:

\n
    \n
  • \n

    The top level list specifies conditions that need to be applied with OR\n operator.

    \n
  • \n
  • \n

    The inner list specifies conditions that need to be applied with AND\n operator.

    \n
  • \n
" + } + }, "com.amazonaws.connect#ControlPlaneTagFilter": { "type": "structure", "members": { @@ -16801,7 +16928,7 @@ "Metrics": { "target": "com.amazonaws.connect#MetricsV2", "traits": { - "smithy.api#documentation": "

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The\n following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator\n Guide.

\n
\n
ABANDONMENT_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Abandonment rate\n

\n
\n
AGENT_ADHERENT_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherent time\n

\n
\n
AGENT_ANSWER_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent answer rate\n

\n
\n
AGENT_NON_ADHERENT_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-adherent time\n

\n
\n
AGENT_NON_RESPONSE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent\n non-response\n

\n
\n
AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

\n

UI name: Agent non-response without customer abandons\n

\n
\n
AGENT_OCCUPANCY
\n
\n

Unit: Percentage

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Occupancy\n

\n
\n
AGENT_SCHEDULE_ADHERENCE
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherence\n

\n
\n
AGENT_SCHEDULED_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Scheduled time\n

\n
\n
AVG_ABANDON_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue abandon time\n

\n
\n
AVG_ACTIVE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average active time\n

\n
\n
AVG_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average after contact work time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_AGENT_CONNECTING_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. For now, this metric only\n supports the following as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Average agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
AVG_AGENT_PAUSE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average agent pause time\n

\n
\n
AVG_CASE_RELATED_CONTACTS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average contacts per case\n

\n
\n
AVG_CASE_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average case resolution time\n

\n
\n
AVG_CONTACT_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average contact duration\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_CONVERSATION_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average conversation duration\n

\n
\n
AVG_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Average flow time\n

\n
\n
AVG_GREETING_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent greeting time\n

\n
\n
AVG_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression

\n

UI name: Average handle time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME_ALL_CONTACTS
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time all contacts\n

\n
\n
AVG_HOLDS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average holds\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction and customer hold time\n

\n
\n
AVG_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERRUPTIONS_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruptions\n

\n
\n
AVG_INTERRUPTION_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruption time\n

\n
\n
AVG_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average non-talk time\n

\n
\n
AVG_QUEUE_ANSWER_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue answer time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Average resolution time\n

\n
\n
AVG_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average talk time\n

\n
\n
AVG_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent talk time\n

\n
\n
AVG_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer talk time\n

\n
\n
CASES_CREATED
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases created\n

\n
\n
CONTACTS_CREATED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts created\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: API contacts handled\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts handled (connected to agent timestamp)\n

\n
\n
CONTACTS_HOLD_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts hold disconnect\n

\n
\n
CONTACTS_ON_HOLD_AGENT_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold agent disconnect\n

\n
\n
CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold customer disconnect\n

\n
\n
CONTACTS_PUT_ON_HOLD
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts put on hold\n

\n
\n
CONTACTS_TRANSFERRED_OUT_EXTERNAL
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out external\n

\n
\n
CONTACTS_TRANSFERRED_OUT_INTERNAL
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out internal\n

\n
\n
CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts queued\n

\n
\n
CONTACTS_QUEUED_BY_ENQUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts queued (enqueue timestamp)\n

\n
\n
CONTACTS_REMOVED_FROM_QUEUE_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: This metric is not available in Amazon Connect admin website.

\n
\n
CONTACTS_RESOLVED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts resolved in X\n

\n
\n
CONTACTS_TRANSFERRED_OUT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_TRANSFERRED_OUT_BY_AGENT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out by agent\n

\n
\n
CONTACTS_TRANSFERRED_OUT_FROM_QUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out queue\n

\n
\n
CURRENT_CASES
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Current cases\n

\n
\n
FLOWS_OUTCOME
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome\n

\n
\n
FLOWS_STARTED
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows started\n

\n
\n
MAX_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Maximum flow time\n

\n
\n
MAX_QUEUED_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Maximum queued time\n

\n
\n
MIN_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Minimum flow time\n

\n
\n
PERCENT_CASES_FIRST_CONTACT_RESOLVED
\n
\n

Unit: Percent

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved on first contact\n

\n
\n
PERCENT_CONTACTS_STEP_EXPIRED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_CONTACTS_STEP_JOINED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_FLOWS_OUTCOME
\n
\n

Unit: Percent

\n

Valid metric filter key: FLOWS_OUTCOME_TYPE\n

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome percentage.

\n \n

The FLOWS_OUTCOME_TYPE is not a valid grouping.

\n
\n
\n
PERCENT_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Non-talk\n time percent\n

\n
\n
PERCENT_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Talk time\n percent\n

\n
\n
PERCENT_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Agent\n talk time percent\n

\n
\n
PERCENT_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Customer talk time percent\n

\n
\n
REOPENED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases reopened\n

\n
\n
RESOLVED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved\n

\n
\n
SERVICE_LEVEL
\n
\n

You can include up to 20 SERVICE_LEVEL metrics in a request.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Service level X\n

\n
\n
STEP_CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
SUM_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: After\n contact work time\n

\n
\n
SUM_CONNECTING_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. This metric only supports the\n following filter keys as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
SUM_CONTACTS_ABANDONED
\n
\n

Unit: Count

\n

Metric filter:

\n
    \n
  • \n

    Valid values: API| Incoming | Outbound |\n Transfer | Callback | Queue_Transfer|\n Disconnect\n

    \n
  • \n
\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: Contact abandoned\n

\n
\n
SUM_CONTACTS_ABANDONED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts abandoned in X seconds\n

\n
\n
SUM_CONTACTS_ANSWERED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts answered in X seconds\n

\n
\n
SUM_CONTACT_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact flow time\n

\n
\n
SUM_CONTACT_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent on contact time\n

\n
\n
SUM_CONTACTS_DISCONNECTED
\n
\n

Valid metric filter key: DISCONNECT_REASON\n

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contact disconnected\n

\n
\n
SUM_ERROR_STATUS_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Error status time\n

\n
\n
SUM_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact handle time\n

\n
\n
SUM_HOLD_TIME
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Customer hold time\n

\n
\n
SUM_IDLE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent idle time\n

\n
\n
SUM_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Agent interaction and hold time\n

\n
\n
SUM_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent interaction time\n

\n
\n
SUM_NON_PRODUCTIVE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-Productive Time\n

\n
\n
SUM_ONLINE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Online time\n

\n
\n
SUM_RETRY_CALLBACK_ATTEMPTS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Callback attempts\n

\n
\n
", + "smithy.api#documentation": "

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The\n following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator\n Guide.

\n
\n
ABANDONMENT_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Abandonment rate\n

\n
\n
AGENT_ADHERENT_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherent time\n

\n
\n
AGENT_ANSWER_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent answer rate\n

\n
\n
AGENT_NON_ADHERENT_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-adherent time\n

\n
\n
AGENT_NON_RESPONSE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent\n non-response\n

\n
\n
AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

\n

UI name: Agent non-response without customer abandons\n

\n
\n
AGENT_OCCUPANCY
\n
\n

Unit: Percentage

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Occupancy\n

\n
\n
AGENT_SCHEDULE_ADHERENCE
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherence\n

\n
\n
AGENT_SCHEDULED_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Scheduled time\n

\n
\n
AVG_ABANDON_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue abandon time\n

\n
\n
AVG_ACTIVE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average active time\n

\n
\n
AVG_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average after contact work time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_AGENT_CONNECTING_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. For now, this metric only\n supports the following as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Average agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
AVG_AGENT_PAUSE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average agent pause time\n

\n
\n
AVG_CASE_RELATED_CONTACTS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average contacts per case\n

\n
\n
AVG_CASE_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average case resolution time\n

\n
\n
AVG_CONTACT_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average contact duration\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_CONVERSATION_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average conversation duration\n

\n
\n
AVG_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Average flow time\n

\n
\n
AVG_GREETING_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent greeting time\n

\n
\n
AVG_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression

\n

UI name: Average handle time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME_ALL_CONTACTS
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time all contacts\n

\n
\n
AVG_HOLDS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average holds\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction and customer hold time\n

\n
\n
AVG_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERRUPTIONS_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruptions\n

\n
\n
AVG_INTERRUPTION_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruption time\n

\n
\n
AVG_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average non-talk time\n

\n
\n
AVG_QUEUE_ANSWER_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue answer time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Average resolution time\n

\n
\n
AVG_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average talk time\n

\n
\n
AVG_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent talk time\n

\n
\n
AVG_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer talk time\n

\n
\n
CASES_CREATED
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases created\n

\n
\n
CONTACTS_CREATED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts created\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: API contacts handled\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts handled (connected to agent timestamp)\n

\n
\n
CONTACTS_HOLD_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts hold disconnect\n

\n
\n
CONTACTS_ON_HOLD_AGENT_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold agent disconnect\n

\n
\n
CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold customer disconnect\n

\n
\n
CONTACTS_PUT_ON_HOLD
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts put on hold\n

\n
\n
CONTACTS_TRANSFERRED_OUT_EXTERNAL
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out external\n

\n
\n
CONTACTS_TRANSFERRED_OUT_INTERNAL
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out internal\n

\n
\n
CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts queued\n

\n
\n
CONTACTS_QUEUED_BY_ENQUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts queued (enqueue timestamp)\n

\n
\n
CONTACTS_REMOVED_FROM_QUEUE_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts removed from queue in X seconds\n

\n
\n
CONTACTS_RESOLVED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts resolved in X\n

\n
\n
CONTACTS_TRANSFERRED_OUT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_TRANSFERRED_OUT_BY_AGENT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out by agent\n

\n
\n
CONTACTS_TRANSFERRED_OUT_FROM_QUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out queue\n

\n
\n
CURRENT_CASES
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Current cases\n

\n
\n
FLOWS_OUTCOME
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome\n

\n
\n
FLOWS_STARTED
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows started\n

\n
\n
MAX_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Maximum flow time\n

\n
\n
MAX_QUEUED_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Maximum queued time\n

\n
\n
MIN_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Minimum flow time\n

\n
\n
PERCENT_CASES_FIRST_CONTACT_RESOLVED
\n
\n

Unit: Percent

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved on first contact\n

\n
\n
PERCENT_CONTACTS_STEP_EXPIRED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_CONTACTS_STEP_JOINED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_FLOWS_OUTCOME
\n
\n

Unit: Percent

\n

Valid metric filter key: FLOWS_OUTCOME_TYPE\n

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome percentage.

\n \n

The FLOWS_OUTCOME_TYPE is not a valid grouping.

\n
\n
\n
PERCENT_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Non-talk\n time percent\n

\n
\n
PERCENT_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Talk time\n percent\n

\n
\n
PERCENT_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Agent\n talk time percent\n

\n
\n
PERCENT_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Customer talk time percent\n

\n
\n
REOPENED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases reopened\n

\n
\n
RESOLVED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved\n

\n
\n
SERVICE_LEVEL
\n
\n

You can include up to 20 SERVICE_LEVEL metrics in a request.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Service level X\n

\n
\n
STEP_CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
SUM_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: After\n contact work time\n

\n
\n
SUM_CONNECTING_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. This metric only supports the\n following filter keys as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
SUM_CONTACTS_ABANDONED
\n
\n

Unit: Count

\n

Metric filter:

\n
    \n
  • \n

    Valid values: API| Incoming | Outbound |\n Transfer | Callback | Queue_Transfer|\n Disconnect\n

    \n
  • \n
\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: Contact abandoned\n

\n
\n
SUM_CONTACTS_ABANDONED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts abandoned in X seconds\n

\n
\n
SUM_CONTACTS_ANSWERED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts answered in X seconds\n

\n
\n
SUM_CONTACT_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact flow time\n

\n
\n
SUM_CONTACT_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent on contact time\n

\n
\n
SUM_CONTACTS_DISCONNECTED
\n
\n

Valid metric filter key: DISCONNECT_REASON\n

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contact disconnected\n

\n
\n
SUM_ERROR_STATUS_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Error status time\n

\n
\n
SUM_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact handle time\n

\n
\n
SUM_HOLD_TIME
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Customer hold time\n

\n
\n
SUM_IDLE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent idle time\n

\n
\n
SUM_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Agent interaction and hold time\n

\n
\n
SUM_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent interaction time\n

\n
\n
SUM_NON_PRODUCTIVE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-Productive Time\n

\n
\n
SUM_ONLINE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Online time\n

\n
\n
SUM_RETRY_CALLBACK_ATTEMPTS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Callback attempts\n

\n
\n
", "smithy.api#required": {} } }, @@ -19756,6 +19883,26 @@ "smithy.api#output": {} } }, + "com.amazonaws.connect#ListCondition": { + "type": "structure", + "members": { + "TargetListType": { + "target": "com.amazonaws.connect#TargetListType", + "traits": { + "smithy.api#documentation": "

The type of target list that will be used to filter the users.

" + } + }, + "Conditions": { + "target": "com.amazonaws.connect#Conditions", + "traits": { + "smithy.api#documentation": "

A list of Condition objects which would be applied together with an AND condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A leaf node condition which can be used to specify a List condition to search users with\n attributes included in Lists like Proficiencies.

" + } + }, "com.amazonaws.connect#ListContactEvaluations": { "type": "operation", "input": { @@ -24374,6 +24521,88 @@ } } }, + "com.amazonaws.connect#NullableProficiencyLimitValue": { + "type": "integer" + }, + "com.amazonaws.connect#NumberComparisonType": { + "type": "enum", + "members": { + "GREATER_OR_EQUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GREATER_OR_EQUAL" + } + }, + "GREATER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GREATER" + } + }, + "LESSER_OR_EQUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LESSER_OR_EQUAL" + } + }, + "LESSER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LESSER" + } + }, + "EQUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQUAL" + } + }, + "NOT_EQUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_EQUAL" + } + }, + "RANGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RANGE" + } + } + } + }, + "com.amazonaws.connect#NumberCondition": { + "type": "structure", + "members": { + "FieldName": { + "target": "com.amazonaws.connect#String", + "traits": { + "smithy.api#documentation": "

The name of the field in the number condition.

" + } + }, + "MinValue": { + "target": "com.amazonaws.connect#NullableProficiencyLimitValue", + "traits": { + "smithy.api#documentation": "

The minValue to be used while evaluating the number condition.

" + } + }, + "MaxValue": { + "target": "com.amazonaws.connect#NullableProficiencyLimitValue", + "traits": { + "smithy.api#documentation": "

The maxValue to be used while evaluating the number condition.

" + } + }, + "ComparisonType": { + "target": "com.amazonaws.connect#NumberComparisonType", + "traits": { + "smithy.api#documentation": "

The type of comparison to be made when evaluating the number condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A leaf node condition which can be used to specify a numeric condition.

\n \n

The currently supported value for FieldName is limit.

\n
" + } + }, "com.amazonaws.connect#NumberReference": { "type": "structure", "members": { @@ -29951,6 +30180,111 @@ "smithy.api#pattern": "^s3://\\S+/.+|https://\\\\S+\\\\.s3\\\\.\\\\S+\\\\.amazonaws\\\\.com/\\\\S+$" } }, + "com.amazonaws.connect#SearchAgentStatuses": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#SearchAgentStatusesRequest" + }, + "output": { + "target": "com.amazonaws.connect#SearchAgentStatusesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches AgentStatuses in an Amazon Connect instance, with optional filtering.

", + "smithy.api#http": { + "method": "POST", + "uri": "/search-agent-statuses", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "AgentStatuses", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.connect#SearchAgentStatusesRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of\n the instance.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken2500", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.connect#MaxResult100", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return per page.

" + } + }, + "SearchFilter": { + "target": "com.amazonaws.connect#AgentStatusSearchFilter", + "traits": { + "smithy.api#documentation": "

Filters to be applied to search results.

" + } + }, + "SearchCriteria": { + "target": "com.amazonaws.connect#AgentStatusSearchCriteria", + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return agent statuses.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#SearchAgentStatusesResponse": { + "type": "structure", + "members": { + "AgentStatuses": { + "target": "com.amazonaws.connect#AgentStatusList", + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return agent statuses.

" + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken2500", + "traits": { + "smithy.api#documentation": "

If there are additional results, this is the token for the next set of results.

" + } + }, + "ApproximateTotalCount": { + "target": "com.amazonaws.connect#ApproximateTotalCount", + "traits": { + "smithy.api#documentation": "

The total number of agent statuses which matched your search query.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#SearchAvailablePhoneNumbers": { "type": "operation", "input": { @@ -31083,7 +31417,7 @@ "ResourceTypes": { "target": "com.amazonaws.connect#ResourceTypeList", "traits": { - "smithy.api#documentation": "

The list of resource types to be used to search tags from. If not provided or if any empty\n list is provided, this API will search from all supported resource types.

" + "smithy.api#documentation": "

The list of resource types to be used to search tags from. If not provided or if any empty\n list is provided, this API will search from all supported resource types.

\n

\n Supported resource types\n

\n
    \n
  • \n

    AGENT

    \n
  • \n
  • \n

    ROUTING_PROFILE

    \n
  • \n
  • \n

    STANDARD_QUEUE

    \n
  • \n
  • \n

    SECURITY_PROFILE

    \n
  • \n
  • \n

    OPERATING_HOURS

    \n
  • \n
  • \n

    PROMPT

    \n
  • \n
  • \n

    CONTACT_FLOW

    \n
  • \n
  • \n

    FLOW_MODULE

    \n
  • \n
" } }, "NextToken": { @@ -31361,6 +31695,111 @@ } } }, + "com.amazonaws.connect#SearchUserHierarchyGroups": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#SearchUserHierarchyGroupsRequest" + }, + "output": { + "target": "com.amazonaws.connect#SearchUserHierarchyGroupsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches UserHierarchyGroups in an Amazon Connect instance, with optional\n filtering.

\n \n

The UserHierarchyGroup with \"LevelId\": \"0\" is the foundation for building\n levels on top of an instance. It is not user-definable, nor is it visible in the UI.

\n
", + "smithy.api#http": { + "method": "POST", + "uri": "/search-user-hierarchy-groups", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "UserHierarchyGroups", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.connect#SearchUserHierarchyGroupsRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of\n the instance.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken2500", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.connect#MaxResult100", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return per page.

" + } + }, + "SearchFilter": { + "target": "com.amazonaws.connect#UserHierarchyGroupSearchFilter", + "traits": { + "smithy.api#documentation": "

Filters to be applied to search results.

" + } + }, + "SearchCriteria": { + "target": "com.amazonaws.connect#UserHierarchyGroupSearchCriteria", + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return UserHierarchyGroups.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#SearchUserHierarchyGroupsResponse": { + "type": "structure", + "members": { + "UserHierarchyGroups": { + "target": "com.amazonaws.connect#UserHierarchyGroupList", + "traits": { + "smithy.api#documentation": "

Information about the userHierarchyGroups.

" + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken2500", + "traits": { + "smithy.api#documentation": "

If there are additional results, this is the token for the next set of results.

" + } + }, + "ApproximateTotalCount": { + "target": "com.amazonaws.connect#ApproximateTotalCount", + "traits": { + "smithy.api#documentation": "

The total number of userHierarchyGroups which matched your search query.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#SearchUsers": { "type": "operation", "input": { @@ -33701,7 +34140,7 @@ } }, "traits": { - "smithy.api#documentation": "

A leaf node condition which can be used to specify a string condition.

\n \n

The currently supported values for FieldName are name and\n description.

\n
" + "smithy.api#documentation": "

A leaf node condition which can be used to specify a string condition.

" } }, "com.amazonaws.connect#StringReference": { @@ -34058,7 +34497,7 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": "^(?!aws:)[a-zA-Z+-=._:/]+$" + "smithy.api#pattern": "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$" } }, "com.amazonaws.connect#TagKeyList": { @@ -34257,6 +34696,17 @@ "target": "com.amazonaws.connect#TagSet" } }, + "com.amazonaws.connect#TargetListType": { + "type": "enum", + "members": { + "PROFICIENCIES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROFICIENCIES" + } + } + } + }, "com.amazonaws.connect#TaskActionDefinition": { "type": "structure", "members": { @@ -39442,6 +39892,58 @@ "target": "com.amazonaws.connect#UserData" } }, + "com.amazonaws.connect#UserHierarchyGroupList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#HierarchyGroup" + } + }, + "com.amazonaws.connect#UserHierarchyGroupSearchConditionList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#UserHierarchyGroupSearchCriteria" + } + }, + "com.amazonaws.connect#UserHierarchyGroupSearchCriteria": { + "type": "structure", + "members": { + "OrConditions": { + "target": "com.amazonaws.connect#UserHierarchyGroupSearchConditionList", + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an OR condition.

" + } + }, + "AndConditions": { + "target": "com.amazonaws.connect#UserHierarchyGroupSearchConditionList", + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an AND condition.

" + } + }, + "StringCondition": { + "target": "com.amazonaws.connect#StringCondition", + "traits": { + "smithy.api#documentation": "

A leaf node condition which can be used to specify a string condition.

\n \n

The currently supported values for FieldName are name,\u2028\u2028\n parentId, levelId, and resourceID.

\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return userHierarchyGroup.

" + } + }, + "com.amazonaws.connect#UserHierarchyGroupSearchFilter": { + "type": "structure", + "members": { + "AttributeFilter": { + "target": "com.amazonaws.connect#ControlPlaneAttributeFilter", + "traits": { + "smithy.api#documentation": "

An object that can be used to specify Tag conditions inside the SearchFilter. This accepts\n an OR or AND (List of List) input where:

\n
    \n
  • \n

    The top level list specifies conditions that need to be applied with OR\n operator.

    \n
  • \n
  • \n

    The inner list specifies conditions that need to be applied with AND\n operator.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

Filters to be applied to search results.

" + } + }, "com.amazonaws.connect#UserId": { "type": "string" }, @@ -39690,6 +40192,12 @@ "smithy.api#documentation": "

A leaf node condition which can be used to specify a string condition.

\n

The currently supported values for FieldName are Username,\n FirstName, LastName, RoutingProfileId,\n SecurityProfileId, ResourceId.

" } }, + "ListCondition": { + "target": "com.amazonaws.connect#ListCondition", + "traits": { + "smithy.api#documentation": "

A leaf node condition which can be used to specify a List condition to search users with\n attributes included in Lists like Proficiencies.

" + } + }, "HierarchyGroupCondition": { "target": "com.amazonaws.connect#HierarchyGroupCondition", "traits": { diff --git a/models/database-migration-service.json b/models/database-migration-service.json index 36efe541dc..99fe7b4985 100644 --- a/models/database-migration-service.json +++ b/models/database-migration-service.json @@ -473,7 +473,7 @@ "sdkId": "Database Migration Service", "arnNamespace": "dms", "cloudFormationName": "DMS", - "cloudTrailEventSource": "databasemigrationservice.amazonaws.com", + "cloudTrailEventSource": "dms.amazonaws.com", "docId": "dms-2016-01-01", "endpointPrefix": "dms" }, @@ -6044,6 +6044,19 @@ "outputToken": "Marker", "pageSize": "MaxRecords" }, + "smithy.test#smokeTests": [ + { + "id": "DescribeEndpointsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ], "smithy.waiters#waitable": { "EndpointDeleted": { "documentation": "Wait until testing endpoint is deleted.", diff --git a/models/datazone.json b/models/datazone.json index 369f18219f..42d9639333 100644 --- a/models/datazone.json +++ b/models/datazone.json @@ -4932,6 +4932,9 @@ } }, "traits": { + "smithy.api#deprecated": { + "message": "This structure is deprecated." + }, "smithy.api#documentation": "

" } }, @@ -4941,6 +4944,9 @@ "target": "com.amazonaws.datazone#DataProductItem" }, "traits": { + "smithy.api#deprecated": { + "message": "This structure is deprecated." + }, "smithy.api#length": { "min": 0, "max": 100 @@ -5032,6 +5038,9 @@ } }, "traits": { + "smithy.api#deprecated": { + "message": "This structure is deprecated." + }, "smithy.api#documentation": "

" } }, @@ -18779,6 +18788,9 @@ "dataProductItem": { "target": "com.amazonaws.datazone#DataProductSummary", "traits": { + "smithy.api#deprecated": { + "message": "This field is deprecated." + }, "smithy.api#documentation": "

The data product item included in the search results.

" } } diff --git a/models/device-farm.json b/models/device-farm.json index 2aee36c4bd..00d2d5f7cd 100644 --- a/models/device-farm.json +++ b/models/device-farm.json @@ -2400,7 +2400,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2443,7 +2442,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -2456,7 +2456,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2470,7 +2469,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2493,7 +2491,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2528,7 +2525,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -2539,14 +2535,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -2560,14 +2558,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -2576,11 +2572,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -2591,14 +2587,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -2612,7 +2610,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2632,7 +2629,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -2643,14 +2639,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -2661,9 +2659,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -5464,7 +5464,20 @@ "inputToken": "nextToken", "outputToken": "nextToken", "items": "devices" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListDevicesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.devicefarm#ListDevicesRequest": { diff --git a/models/dynamodb.json b/models/dynamodb.json index 0d8e4d1e42..3dcf98b62d 100644 --- a/models/dynamodb.json +++ b/models/dynamodb.json @@ -7720,7 +7720,22 @@ "outputToken": "LastEvaluatedTableName", "items": "TableNames", "pageSize": "Limit" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListTablesSuccess", + "params": { + "Limit": 1 + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.dynamodb#ListTablesInput": { diff --git a/models/ec2.json b/models/ec2.json index 9bb20e4a89..20f2ebe277 100644 --- a/models/ec2.json +++ b/models/ec2.json @@ -2363,6 +2363,9 @@ { "target": "com.amazonaws.ec2#CreateIpam" }, + { + "target": "com.amazonaws.ec2#CreateIpamExternalResourceVerificationToken" + }, { "target": "com.amazonaws.ec2#CreateIpamPool" }, @@ -2591,6 +2594,9 @@ { "target": "com.amazonaws.ec2#DeleteIpam" }, + { + "target": "com.amazonaws.ec2#DeleteIpamExternalResourceVerificationToken" + }, { "target": "com.amazonaws.ec2#DeleteIpamPool" }, @@ -2963,6 +2969,9 @@ { "target": "com.amazonaws.ec2#DescribeIpamByoasn" }, + { + "target": "com.amazonaws.ec2#DescribeIpamExternalResourceVerificationTokens" + }, { "target": "com.amazonaws.ec2#DescribeIpamPools" }, @@ -15897,6 +15906,70 @@ "smithy.api#documentation": "

Create an IPAM. Amazon VPC IP Address Manager (IPAM) is a VPC feature that you can use\n to automate your IP address management workflows including assigning, tracking,\n troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts\n throughout your Amazon Web Services Organization.

\n

For more information, see Create an IPAM in the Amazon VPC IPAM User Guide.\n

" } }, + "com.amazonaws.ec2#CreateIpamExternalResourceVerificationToken": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#CreateIpamExternalResourceVerificationTokenRequest" + }, + "output": { + "target": "com.amazonaws.ec2#CreateIpamExternalResourceVerificationTokenResult" + }, + "traits": { + "smithy.api#documentation": "

Create a verification token. A verification token is an Amazon Web Services-generated random value that you can use to prove ownership of an external resource. For example, you can use a verification token to validate that you control a public IP address range when you bring an IP address range to Amazon Web Services (BYOIP).\n

" + } + }, + "com.amazonaws.ec2#CreateIpamExternalResourceVerificationTokenRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + } + }, + "IpamId": { + "target": "com.amazonaws.ec2#IpamId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the IPAM that will create the token.

", + "smithy.api#required": {} + } + }, + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

Token tags.

", + "smithy.api#xmlName": "TagSpecification" + } + }, + "ClientToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#CreateIpamExternalResourceVerificationTokenResult": { + "type": "structure", + "members": { + "IpamExternalResourceVerificationToken": { + "target": "com.amazonaws.ec2#IpamExternalResourceVerificationToken", + "traits": { + "aws.protocols#ec2QueryName": "IpamExternalResourceVerificationToken", + "smithy.api#documentation": "

The verification token.

", + "smithy.api#xmlName": "ipamExternalResourceVerificationToken" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#CreateIpamPool": { "type": "operation", "input": { @@ -15929,7 +16002,7 @@ "Locale": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

In IPAM, the locale is the Amazon Web Services Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC’s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool.

\n

Possible values: Any Amazon Web Services Region, such as us-east-1.

" + "smithy.api#documentation": "

The locale for the pool should be one of the following:

\n
    \n
  • \n

    An Amazon Web Services Region where you want this IPAM pool to be available for allocations.

    \n
  • \n
  • \n

    The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope.

    \n
  • \n
\n

If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool.

\n

Possible values: Any Amazon Web Services Region or supported Amazon Web Services Local Zone.

" } }, "SourceIpamPoolId": { @@ -17850,6 +17923,12 @@ "smithy.api#documentation": "

The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

", "smithy.api#xmlName": "TagSpecification" } + }, + "NetworkBorderGroup": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide.

" + } } }, "traits": { @@ -23073,6 +23152,56 @@ "smithy.api#documentation": "

Delete an IPAM. Deleting an IPAM removes all monitored data associated with the IPAM including the historical data for CIDRs.

\n

For more information, see Delete an IPAM in the Amazon VPC IPAM User Guide.\n

" } }, + "com.amazonaws.ec2#DeleteIpamExternalResourceVerificationToken": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DeleteIpamExternalResourceVerificationTokenRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DeleteIpamExternalResourceVerificationTokenResult" + }, + "traits": { + "smithy.api#documentation": "

Delete a verification token. A verification token is an Amazon Web Services-generated random value that you can use to prove ownership of an external resource. For example, you can use a verification token to validate that you control a public IP address range when you bring an IP address range to Amazon Web Services (BYOIP).\n

" + } + }, + "com.amazonaws.ec2#DeleteIpamExternalResourceVerificationTokenRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + } + }, + "IpamExternalResourceVerificationTokenId": { + "target": "com.amazonaws.ec2#IpamExternalResourceVerificationTokenId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The token ID.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#DeleteIpamExternalResourceVerificationTokenResult": { + "type": "structure", + "members": { + "IpamExternalResourceVerificationToken": { + "target": "com.amazonaws.ec2#IpamExternalResourceVerificationToken", + "traits": { + "aws.protocols#ec2QueryName": "IpamExternalResourceVerificationToken", + "smithy.api#documentation": "

The verification token.

", + "smithy.api#xmlName": "ipamExternalResourceVerificationToken" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#DeleteIpamPool": { "type": "operation", "input": { @@ -24425,6 +24554,12 @@ "smithy.api#documentation": "

The ID of the public IPv4 pool you want to delete.

", "smithy.api#required": {} } + }, + "NetworkBorderGroup": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide.

" + } } }, "traits": { @@ -32635,6 +32770,82 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#DescribeIpamExternalResourceVerificationTokens": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DescribeIpamExternalResourceVerificationTokensRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DescribeIpamExternalResourceVerificationTokensResult" + }, + "traits": { + "smithy.api#documentation": "

Describe verification tokens. A verification token is an Amazon Web Services-generated random value that you can use to prove ownership of an external resource. For example, you can use a verification token to validate that you control a public IP address range when you bring an IP address range to Amazon Web Services (BYOIP).\n

" + } + }, + "com.amazonaws.ec2#DescribeIpamExternalResourceVerificationTokensRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

One or more filters for the request. For more information about filtering, see Filtering CLI output.

\n

Available filters:

\n
    \n
  • \n

    \n ipam-arn\n

    \n
  • \n
  • \n

    \n ipam-external-resource-verification-token-arn\n

    \n
  • \n
  • \n

    \n ipam-external-resource-verification-token-id\n

    \n
  • \n
  • \n

    \n ipam-id\n

    \n
  • \n
  • \n

    \n ipam-region\n

    \n
  • \n
  • \n

    \n state\n

    \n
  • \n
  • \n

    \n status\n

    \n
  • \n
  • \n

    \n token-name\n

    \n
  • \n
  • \n

    \n token-value\n

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#NextToken", + "traits": { + "smithy.api#documentation": "

The token for the next page of results.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#IpamMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of tokens to return in one page of results.

" + } + }, + "IpamExternalResourceVerificationTokenIds": { + "target": "com.amazonaws.ec2#ValueStringList", + "traits": { + "smithy.api#documentation": "

Verification token IDs.

", + "smithy.api#xmlName": "IpamExternalResourceVerificationTokenId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#DescribeIpamExternalResourceVerificationTokensResult": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.ec2#NextToken", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "smithy.api#xmlName": "nextToken" + } + }, + "IpamExternalResourceVerificationTokens": { + "target": "com.amazonaws.ec2#IpamExternalResourceVerificationTokenSet", + "traits": { + "aws.protocols#ec2QueryName": "IpamExternalResourceVerificationTokenSet", + "smithy.api#documentation": "

Verification tokens.

", + "smithy.api#xmlName": "ipamExternalResourceVerificationTokenSet" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#DescribeIpamPools": { "type": "operation", "input": { @@ -35548,7 +35759,7 @@ "target": "com.amazonaws.ec2#DescribePlacementGroupsResult" }, "traits": { - "smithy.api#documentation": "

Describes the specified placement groups or all of your placement groups. For more\n information, see Placement groups in the\n Amazon EC2 User Guide.

" + "smithy.api#documentation": "

Describes the specified placement groups or all of your placement groups.

\n \n

To describe a specific placement group that is shared with\n your account, you must specify the ID of the placement group using the\n GroupId parameter. Specifying the name of a\n shared placement group using the GroupNames\n parameter will result in an error.

\n
\n

For more information, see Placement groups in the\n Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#DescribePlacementGroupsRequest": { @@ -35573,7 +35784,7 @@ "target": "com.amazonaws.ec2#PlacementGroupStringList", "traits": { "aws.protocols#ec2QueryName": "GroupName", - "smithy.api#documentation": "

The names of the placement groups.

\n

Default: Describes all your placement groups, or only those otherwise\n specified.

", + "smithy.api#documentation": "

The names of the placement groups.

\n

Constraints:

\n
    \n
  • \n

    You can specify a name only if the placement group is owned by your\n account.

    \n
  • \n
  • \n

    If a placement group is shared with your account,\n specifying the name results in an error. You must use the GroupId\n parameter instead.

    \n
  • \n
", "smithy.api#xmlName": "groupName" } }, @@ -40870,7 +41081,7 @@ "target": "com.amazonaws.ec2#DescribeVolumesModificationsResult" }, "traits": { - "smithy.api#documentation": "

Describes the most recent volume modification request for the specified EBS volumes.

\n

If a volume has never been modified, some information in the output will be null.\n If a volume has been modified more than once, the output includes only the most \n recent modification request.

\n

For more information, see \n Monitor the progress of volume modifications in the Amazon EBS User Guide.

", + "smithy.api#documentation": "

Describes the most recent volume modification request for the specified EBS volumes.

\n

For more information, see \n Monitor the progress of volume modifications in the Amazon EBS User Guide.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -49934,7 +50145,7 @@ "target": "com.amazonaws.ec2#Double", "traits": { "aws.protocols#ec2QueryName": "WeightedCapacity", - "smithy.api#documentation": "

The number of units provided by the specified instance type.

\n \n

When specifying weights, the price used in the lowest-price and\n price-capacity-optimized allocation strategies is per\n unit hour (where the instance price is divided by the specified\n weight). However, if all the specified weights are above the requested\n TargetCapacity, resulting in only 1 instance being launched, the price\n used is per instance hour.

\n
", + "smithy.api#documentation": "

The number of units provided by the specified instance type. These are the same units\n that you chose to set the target capacity in terms of instances, or a performance\n characteristic such as vCPUs, memory, or I/O.

\n

If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the\n number of instances to the next whole number. If this value is not specified, the default\n is 1.

\n \n

When specifying weights, the price used in the lowest-price and\n price-capacity-optimized allocation strategies is per\n unit hour (where the instance price is divided by the specified\n weight). However, if all the specified weights are above the requested\n TargetCapacity, resulting in only 1 instance being launched, the price\n used is per instance hour.

\n
", "smithy.api#xmlName": "weightedCapacity" } }, @@ -50023,7 +50234,7 @@ "WeightedCapacity": { "target": "com.amazonaws.ec2#Double", "traits": { - "smithy.api#documentation": "

The number of units provided by the specified instance type.

\n \n

When specifying weights, the price used in the lowest-price and\n price-capacity-optimized allocation strategies is per\n unit hour (where the instance price is divided by the specified\n weight). However, if all the specified weights are above the requested\n TargetCapacity, resulting in only 1 instance being launched, the price\n used is per instance hour.

\n
" + "smithy.api#documentation": "

The number of units provided by the specified instance type. These are the same units\n that you chose to set the target capacity in terms of instances, or a performance\n characteristic such as vCPUs, memory, or I/O.

\n

If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the\n number of instances to the next whole number. If this value is not specified, the default\n is 1.

\n \n

When specifying weights, the price used in the lowest-price and\n price-capacity-optimized allocation strategies is per\n unit hour (where the instance price is divided by the specified\n weight). However, if all the specified weights are above the requested\n TargetCapacity, resulting in only 1 instance being launched, the price\n used is per instance hour.

\n
" } }, "Priority": { @@ -51378,7 +51589,7 @@ "target": "com.amazonaws.ec2#GetConsoleOutputResult" }, "traits": { - "smithy.api#documentation": "

Gets the console output for the specified instance. For Linux instances, the instance\n console output displays the exact console output that would normally be displayed on a\n physical monitor attached to a computer. For Windows instances, the instance console\n output includes the last three system event log errors.

\n

By default, the console output returns buffered information that was posted shortly\n after an instance transition state (start, stop, reboot, or terminate). This information\n is available for at least one hour after the most recent post. Only the most recent 64\n KB of console output is available.

\n

You can optionally retrieve the latest serial console output at any time during the\n instance lifecycle. This option is supported on instance types that use the Nitro\n hypervisor.

\n

For more information, see Instance\n console output in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

Gets the console output for the specified instance. For Linux instances, the instance\n console output displays the exact console output that would normally be displayed on a\n physical monitor attached to a computer. For Windows instances, the instance console\n output includes the last three system event log errors.

\n

For more information, see Instance\n console output in the Amazon EC2 User Guide.

", "smithy.api#examples": [ { "title": "To get the console output", @@ -67558,7 +67769,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "NetworkBorderGroup", - "smithy.api#documentation": "

The network border group that the resource that the IP address is assigned to is in.

", + "smithy.api#documentation": "

The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "networkBorderGroup" } }, @@ -67667,6 +67878,14 @@ "smithy.api#xmlName": "vpcId" } }, + "NetworkInterfaceAttachmentStatus": { + "target": "com.amazonaws.ec2#IpamNetworkInterfaceAttachmentStatus", + "traits": { + "aws.protocols#ec2QueryName": "NetworkInterfaceAttachmentStatus", + "smithy.api#documentation": "

For elastic network interfaces, this is the status of whether or not the elastic network interface is attached.

", + "smithy.api#xmlName": "networkInterfaceAttachmentStatus" + } + }, "SampleTime": { "target": "com.amazonaws.ec2#MillisecondDateTime", "traits": { @@ -67674,6 +67893,14 @@ "smithy.api#documentation": "

The last successful resource discovery time.

", "smithy.api#xmlName": "sampleTime" } + }, + "AvailabilityZoneId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZoneId", + "smithy.api#documentation": "

The Availability Zone ID.

", + "smithy.api#xmlName": "availabilityZoneId" + } } }, "traits": { @@ -67736,6 +67963,155 @@ "smithy.api#documentation": "

The discovery failure reason.

" } }, + "com.amazonaws.ec2#IpamExternalResourceVerificationToken": { + "type": "structure", + "members": { + "IpamExternalResourceVerificationTokenId": { + "target": "com.amazonaws.ec2#IpamExternalResourceVerificationTokenId", + "traits": { + "aws.protocols#ec2QueryName": "IpamExternalResourceVerificationTokenId", + "smithy.api#documentation": "

The ID of the token.

", + "smithy.api#xmlName": "ipamExternalResourceVerificationTokenId" + } + }, + "IpamExternalResourceVerificationTokenArn": { + "target": "com.amazonaws.ec2#ResourceArn", + "traits": { + "aws.protocols#ec2QueryName": "IpamExternalResourceVerificationTokenArn", + "smithy.api#documentation": "

Token ARN.

", + "smithy.api#xmlName": "ipamExternalResourceVerificationTokenArn" + } + }, + "IpamId": { + "target": "com.amazonaws.ec2#IpamId", + "traits": { + "aws.protocols#ec2QueryName": "IpamId", + "smithy.api#documentation": "

The ID of the IPAM that created the token.

", + "smithy.api#xmlName": "ipamId" + } + }, + "IpamArn": { + "target": "com.amazonaws.ec2#ResourceArn", + "traits": { + "aws.protocols#ec2QueryName": "IpamArn", + "smithy.api#documentation": "

ARN of the IPAM that created the token.

", + "smithy.api#xmlName": "ipamArn" + } + }, + "IpamRegion": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "IpamRegion", + "smithy.api#documentation": "

Region of the IPAM that created the token.

", + "smithy.api#xmlName": "ipamRegion" + } + }, + "TokenValue": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "TokenValue", + "smithy.api#documentation": "

Token value.

", + "smithy.api#xmlName": "tokenValue" + } + }, + "TokenName": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "TokenName", + "smithy.api#documentation": "

Token name.

", + "smithy.api#xmlName": "tokenName" + } + }, + "NotAfter": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "NotAfter", + "smithy.api#documentation": "

Token expiration.

", + "smithy.api#xmlName": "notAfter" + } + }, + "Status": { + "target": "com.amazonaws.ec2#TokenState", + "traits": { + "aws.protocols#ec2QueryName": "Status", + "smithy.api#documentation": "

Token status.

", + "smithy.api#xmlName": "status" + } + }, + "Tags": { + "target": "com.amazonaws.ec2#TagList", + "traits": { + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "

Token tags.

", + "smithy.api#xmlName": "tagSet" + } + }, + "State": { + "target": "com.amazonaws.ec2#IpamExternalResourceVerificationTokenState", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "

Token state.

", + "smithy.api#xmlName": "state" + } + } + }, + "traits": { + "smithy.api#documentation": "

A verification token is an Amazon Web Services-generated random value that you can use to prove ownership of an external resource. For example, you can use a verification token to validate that you control a public IP address range when you bring an IP address range to Amazon Web Services (BYOIP).\n

" + } + }, + "com.amazonaws.ec2#IpamExternalResourceVerificationTokenId": { + "type": "string" + }, + "com.amazonaws.ec2#IpamExternalResourceVerificationTokenSet": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#IpamExternalResourceVerificationToken", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, + "com.amazonaws.ec2#IpamExternalResourceVerificationTokenState": { + "type": "enum", + "members": { + "CREATE_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "create-in-progress" + } + }, + "CREATE_COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "create-complete" + } + }, + "CREATE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "create-failed" + } + }, + "DELETE_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "delete-in-progress" + } + }, + "DELETE_COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "delete-complete" + } + }, + "DELETE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "delete-failed" + } + } + } + }, "com.amazonaws.ec2#IpamId": { "type": "string" }, @@ -67780,6 +68156,23 @@ } } }, + "com.amazonaws.ec2#IpamNetworkInterfaceAttachmentStatus": { + "type": "enum", + "members": { + "available": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "available" + } + }, + "in_use": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "in-use" + } + } + } + }, "com.amazonaws.ec2#IpamOperatingRegion": { "type": "structure", "members": { @@ -67899,7 +68292,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Locale", - "smithy.api#documentation": "

The locale of the IPAM pool. In IPAM, the locale is the Amazon Web Services Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC’s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an Amazon Web Services Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.

", + "smithy.api#documentation": "

The locale of the IPAM pool.

\n

The locale for the pool should be one of the following:

\n
    \n
  • \n

    An Amazon Web Services Region where you want this IPAM pool to be available for allocations.

    \n
  • \n
  • \n

    The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope.

    \n
  • \n
\n

If you choose an Amazon Web Services Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.

", "smithy.api#xmlName": "locale" } }, @@ -68826,6 +69219,14 @@ "smithy.api#documentation": "

The ID of a VPC.

", "smithy.api#xmlName": "vpcId" } + }, + "AvailabilityZoneId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZoneId", + "smithy.api#documentation": "

The Availability Zone ID.

", + "smithy.api#xmlName": "availabilityZoneId" + } } }, "traits": { @@ -71749,7 +72150,7 @@ "target": "com.amazonaws.ec2#Double", "traits": { "aws.protocols#ec2QueryName": "WeightedCapacity", - "smithy.api#documentation": "

The number of units provided by the specified instance type.

\n \n

When specifying weights, the price used in the lowest-price and\n price-capacity-optimized allocation strategies is per\n unit hour (where the instance price is divided by the specified\n weight). However, if all the specified weights are above the requested\n TargetCapacity, resulting in only 1 instance being launched, the price\n used is per instance hour.

\n
", + "smithy.api#documentation": "

The number of units provided by the specified instance type. These are the same units\n that you chose to set the target capacity in terms of instances, or a performance\n characteristic such as vCPUs, memory, or I/O.

\n

If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the\n number of instances to the next whole number. If this value is not specified, the default\n is 1.

\n \n

When specifying weights, the price used in the lowestPrice and\n priceCapacityOptimized allocation strategies is per\n unit hour (where the instance price is divided by the specified\n weight). However, if all the specified weights are above the requested\n TargetCapacity, resulting in only 1 instance being launched, the price\n used is per instance hour.

\n
", "smithy.api#xmlName": "weightedCapacity" } }, @@ -85155,7 +85556,7 @@ "CidrAuthorizationContext": { "target": "com.amazonaws.ec2#IpamCidrAuthorizationContext", "traits": { - "smithy.api#documentation": "

A signed document that proves that you are authorized to bring a specified IP address range to Amazon using BYOIP. This option applies to public pools only.

" + "smithy.api#documentation": "

A signed document that proves that you are authorized to bring a specified IP address range to Amazon using BYOIP. This option only applies to IPv4 and IPv6 pools in the public scope.

" } }, "NetmaskLength": { @@ -85170,6 +85571,18 @@ "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } + }, + "VerificationMethod": { + "target": "com.amazonaws.ec2#VerificationMethod", + "traits": { + "smithy.api#documentation": "

The method for verifying control of a public IP address range. Defaults to remarks-x509 if not specified. This option only applies to IPv4 and IPv6 pools in the public scope.

" + } + }, + "IpamExternalResourceVerificationTokenId": { + "target": "com.amazonaws.ec2#IpamExternalResourceVerificationTokenId", + "traits": { + "smithy.api#documentation": "

Verification token ID. This option only applies to IPv4 and IPv6 pools in the public scope.

" + } } }, "traits": { @@ -85236,6 +85649,12 @@ "smithy.api#documentation": "

The netmask length of the CIDR you would like to allocate to the public IPv4 pool.

", "smithy.api#required": {} } + }, + "NetworkBorderGroup": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

The Availability Zone (AZ) or Local Zone (LZ) network border group that the resource that the IP address is assigned to is in. Defaults to an AZ network border group. For more information on available Local Zones, see Local Zone availability in the Amazon EC2 User Guide.

" + } } }, "traits": { @@ -90695,6 +91114,12 @@ "traits": { "smithy.api#enumValue": "instance-connect-endpoint" } + }, + "ipam_external_resource_verification_token": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ipam-external-resource-verification-token" + } } } }, @@ -95813,7 +96238,7 @@ "target": "com.amazonaws.ec2#Double", "traits": { "aws.protocols#ec2QueryName": "WeightedCapacity", - "smithy.api#documentation": "

The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.

\n

If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.

", + "smithy.api#documentation": "

The number of units provided by the specified instance type. These are the same units\n that you chose to set the target capacity in terms of instances, or a performance\n characteristic such as vCPUs, memory, or I/O.

\n

If the target capacity divided by this value is not a whole number, Amazon EC2 rounds the\n number of instances to the next whole number. If this value is not specified, the default\n is 1.

\n \n

When specifying weights, the price used in the lowestPrice and\n priceCapacityOptimized allocation strategies is per\n unit hour (where the instance price is divided by the specified\n weight). However, if all the specified weights are above the requested\n TargetCapacity, resulting in only 1 instance being launched, the price\n used is per instance hour.

\n
", "smithy.api#xmlName": "weightedCapacity" } }, @@ -99131,6 +99556,23 @@ } } }, + "com.amazonaws.ec2#TokenState": { + "type": "enum", + "members": { + "valid": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "valid" + } + }, + "expired": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "expired" + } + } + } + }, "com.amazonaws.ec2#TotalLocalStorageGB": { "type": "structure", "members": { @@ -104059,6 +104501,23 @@ } } }, + "com.amazonaws.ec2#VerificationMethod": { + "type": "enum", + "members": { + "remarks_x509": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "remarks-x509" + } + }, + "dns_token": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "dns-token" + } + } + } + }, "com.amazonaws.ec2#VerifiedAccessEndpoint": { "type": "structure", "members": { @@ -105662,7 +106121,7 @@ "target": "com.amazonaws.ec2#VolumeModificationState", "traits": { "aws.protocols#ec2QueryName": "ModificationState", - "smithy.api#documentation": "

The current modification state. The modification state is null for unmodified\n volumes.

", + "smithy.api#documentation": "

The current modification state.

", "smithy.api#xmlName": "modificationState" } }, @@ -105780,7 +106239,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the modification status of an EBS volume.

\n

If the volume has never been modified, some element values will be null.

" + "smithy.api#documentation": "

Describes the modification status of an EBS volume.

" } }, "com.amazonaws.ec2#VolumeModificationList": { diff --git a/models/elastic-beanstalk.json b/models/elastic-beanstalk.json index b742fd91e6..2d84b27963 100644 --- a/models/elastic-beanstalk.json +++ b/models/elastic-beanstalk.json @@ -238,7 +238,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -281,7 +280,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -294,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -308,7 +307,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -331,7 +329,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -366,7 +363,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -377,14 +373,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -398,14 +396,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -414,18 +410,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -434,7 +429,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -454,14 +450,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -475,7 +473,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -495,7 +492,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -506,14 +502,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -524,9 +522,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -5851,6 +5851,19 @@ ] } } + ], + "smithy.test#smokeTests": [ + { + "id": "ListAvailableSolutionStacksSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } ] } }, @@ -8355,6 +8368,54 @@ "traits": { "smithy.api#documentation": "

Updates the environment description, deploys a new application version, updates the\n configuration settings to an entirely new configuration template, or updates select\n configuration option values in the running environment.

\n

Attempting to update both the release and configuration is not allowed and AWS Elastic\n Beanstalk returns an InvalidParameterCombination error.

\n

When updating the configuration settings to a new template or individual settings, a\n draft configuration is created and DescribeConfigurationSettings for this\n environment returns two setting descriptions with different DeploymentStatus\n values.

", "smithy.api#examples": [ + { + "title": "To configure option settings", + "documentation": "The following operation configures several options in the aws:elb:loadbalancer namespace:", + "input": { + "EnvironmentName": "my-env", + "OptionSettings": [ + { + "Namespace": "aws:elb:healthcheck", + "OptionName": "Interval", + "Value": "15" + }, + { + "Namespace": "aws:elb:healthcheck", + "OptionName": "Timeout", + "Value": "8" + }, + { + "Namespace": "aws:elb:healthcheck", + "OptionName": "HealthyThreshold", + "Value": "2" + }, + { + "Namespace": "aws:elb:healthcheck", + "OptionName": "UnhealthyThreshold", + "Value": "3" + } + ] + }, + "output": { + "ApplicationName": "my-app", + "EnvironmentName": "my-env", + "VersionLabel": "7f58-stage-150812_025409", + "Status": "Updating", + "EnvironmentId": "e-wtp2rpqsej", + "EndpointURL": "awseb-e-w-AWSEBLoa-14XB83101Q4L-104QXY80921.sa-east-1.elb.amazonaws.com", + "SolutionStackName": "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 8 Java 8", + "CNAME": "my-env.elasticbeanstalk.com", + "Health": "Grey", + "AbortableOperationInProgress": true, + "Tier": { + "Version": " ", + "Type": "Standard", + "Name": "WebServer" + }, + "DateUpdated": "2015-08-12T18:15:23.804Z", + "DateCreated": "2015-08-07T20:48:49.599Z" + } + }, { "title": "To update an environment to a new version", "documentation": "The following operation updates an environment named \"my-env\" to version \"v2\" of the application to which it belongs:", diff --git a/models/elasticsearch-service.json b/models/elasticsearch-service.json index fbac60339c..cccb5274d8 100644 --- a/models/elasticsearch-service.json +++ b/models/elasticsearch-service.json @@ -6560,7 +6560,20 @@ "method": "GET", "uri": "/2015-01-01/domain", "code": 200 - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListDomainNamesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.elasticsearchservice#ListDomainNamesRequest": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index e271448f8c..de4927d664 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -453,16 +453,25 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -3018,6 +3027,12 @@ "ca-central-1" : { "variants" : [ { "tags" : [ "dualstack" ] + }, { + "hostname" : "cloud9-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloud9-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "eu-central-1" : { @@ -3050,6 +3065,41 @@ "tags" : [ "dualstack" ] } ] }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "cloud9-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "cloud9-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "cloud9-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "cloud9-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "cloud9-fips.us-west-2.amazonaws.com" + }, "il-central-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -3068,58 +3118,189 @@ "us-east-1" : { "variants" : [ { "tags" : [ "dualstack" ] + }, { + "hostname" : "cloud9-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloud9-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "us-east-2" : { "variants" : [ { "tags" : [ "dualstack" ] + }, { + "hostname" : "cloud9-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloud9-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "us-west-1" : { "variants" : [ { "tags" : [ "dualstack" ] + }, { + "hostname" : "cloud9-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloud9-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "us-west-2" : { "variants" : [ { "tags" : [ "dualstack" ] + }, { + "hostname" : "cloud9-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "cloud9-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] } ] } } }, "cloudcontrolapi" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "cloudcontrolapi-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { "hostname" : "cloudcontrolapi-fips.ca-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -3162,32 +3343,76 @@ "deprecated" : true, "hostname" : "cloudcontrolapi-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "cloudcontrolapi-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "cloudcontrolapi-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "cloudcontrolapi-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "cloudcontrolapi-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -3913,6 +4138,7 @@ "cognito-identity" : { "endpoints" : { "af-south-1" : { }, + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -7360,6 +7586,7 @@ } ] }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, "eu-south-2" : { }, @@ -12938,6 +13165,7 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -13545,35 +13773,287 @@ }, "pi" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "af-south-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.ca-west-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-west-2.amazonaws.com" + }, + "il-central-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + } } }, "pinpoint" : { @@ -13837,6 +14317,12 @@ }, "hostname" : "portal.sso.ca-central-1.amazonaws.com" }, + "ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "hostname" : "portal.sso.ca-west-1.amazonaws.com" + }, "eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -19121,22 +19607,80 @@ "us-west-2" : { } } }, + "tax" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "tax.us-east-1.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, "textract" : { "endpoints" : { - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "textract.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "textract.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "textract.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "textract.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "textract-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "textract.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "textract.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "textract.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "textract.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -19176,24 +19720,48 @@ "variants" : [ { "hostname" : "textract-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "textract-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "textract-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "textract-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -19547,7 +20115,19 @@ "deprecated" : true, "hostname" : "translate-fips.us-east-2.amazonaws.com" }, - "us-west-1" : { }, + "us-west-1" : { + "variants" : [ { + "hostname" : "translate-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "translate-fips.us-west-1.amazonaws.com" + }, "us-west-2" : { "variants" : [ { "hostname" : "translate-fips.us-west-2.amazonaws.com", @@ -21291,8 +21871,18 @@ }, "cloudcontrolapi" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "cloudformation" : { @@ -22021,8 +22611,20 @@ }, "pi" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "pipes" : { @@ -23270,12 +23872,24 @@ "variants" : [ { "hostname" : "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cloudcontrolapi-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "cloudcontrolapi.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -23858,17 +24472,31 @@ }, "directconnect" : { "endpoints" : { - "us-gov-east-1" : { + "fips-us-gov-east-1" : { "credentialScope" : { "region" : "us-gov-east-1" }, - "hostname" : "directconnect.us-gov-east-1.amazonaws.com" + "deprecated" : true, + "hostname" : "directconnect-fips.us-gov-east-1.amazonaws.com" }, - "us-gov-west-1" : { + "fips-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" }, - "hostname" : "directconnect.us-gov-west-1.amazonaws.com" + "deprecated" : true, + "hostname" : "directconnect-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "directconnect-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "directconnect-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] } } }, @@ -25898,8 +26526,46 @@ }, "pi" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "pi-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-west-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "pi-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "pi.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + } } }, "pinpoint" : { @@ -27318,12 +27984,24 @@ "variants" : [ { "hostname" : "textract-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "textract-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "textract-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "textract.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -28208,31 +28886,17 @@ }, "redshift" : { "endpoints" : { - "fips-us-iso-east-1" : { + "us-iso-east-1" : { "credentialScope" : { "region" : "us-iso-east-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov" + "hostname" : "redshift.us-iso-east-1.c2s.ic.gov" }, - "fips-us-iso-west-1" : { + "us-iso-west-1" : { "credentialScope" : { "region" : "us-iso-west-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov" - }, - "us-iso-east-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - }, - "us-iso-west-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "redshift.us-iso-west-1.c2s.ic.gov" } } }, @@ -28896,18 +29560,11 @@ }, "redshift" : { "endpoints" : { - "fips-us-isob-east-1" : { + "us-isob-east-1" : { "credentialScope" : { "region" : "us-isob-east-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov" - }, - "us-isob-east-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "redshift.us-isob-east-1.sc2s.sgov.gov" } } }, diff --git a/models/firehose.json b/models/firehose.json index b31afc12c0..e89f2448b3 100644 --- a/models/firehose.json +++ b/models/firehose.json @@ -753,6 +753,20 @@ "smithy.api#documentation": "

Describes hints for the buffering to perform before delivering data to the\n destination. These options are treated as hints, and therefore Firehose might\n choose to use different values when it is optimal. The SizeInMBs and\n IntervalInSeconds parameters are optional. However, if specify a value for\n one of them, you must also provide a value for the other.

" } }, + "com.amazonaws.firehose#CatalogConfiguration": { + "type": "structure", + "members": { + "CatalogARN": { + "target": "com.amazonaws.firehose#GlueDataCatalogARN", + "traits": { + "smithy.api#documentation": "

\n Specifies the Glue catalog ARN indentifier of the destination Apache Iceberg Tables. You must specify the ARN in the format arn:aws:glue:region:account-id:catalog.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the containers where the destination Apache Iceberg Tables are persisted.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, "com.amazonaws.firehose#CloudWatchLoggingOptions": { "type": "structure", "members": { @@ -1036,6 +1050,12 @@ "traits": { "smithy.api#documentation": "

Configure Snowflake destination

" } + }, + "IcebergDestinationConfiguration": { + "target": "com.amazonaws.firehose#IcebergDestinationConfiguration", + "traits": { + "smithy.api#documentation": "

\n Configure Apache Iceberg Tables destination.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } } }, "traits": { @@ -1715,6 +1735,12 @@ "traits": { "smithy.api#documentation": "

The destination in the Serverless offering for Amazon OpenSearch Service.

" } + }, + "IcebergDestinationDescription": { + "target": "com.amazonaws.firehose#IcebergDestinationDescription", + "traits": { + "smithy.api#documentation": "

\n Describes a destination in Apache Iceberg Tables.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } } }, "traits": { @@ -1737,6 +1763,46 @@ "smithy.api#pattern": "^[a-zA-Z0-9-]+$" } }, + "com.amazonaws.firehose#DestinationTableConfiguration": { + "type": "structure", + "members": { + "DestinationTableName": { + "target": "com.amazonaws.firehose#NonEmptyStringWithoutWhitespace", + "traits": { + "smithy.api#documentation": "

\n Specifies the name of the Apache Iceberg Table.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

", + "smithy.api#required": {} + } + }, + "DestinationDatabaseName": { + "target": "com.amazonaws.firehose#NonEmptyStringWithoutWhitespace", + "traits": { + "smithy.api#documentation": "

\n The name of the Apache Iceberg database.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

", + "smithy.api#required": {} + } + }, + "UniqueKeys": { + "target": "com.amazonaws.firehose#ListOfNonEmptyStringsWithoutWhitespace", + "traits": { + "smithy.api#documentation": "

\n A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create/Update/Delete operations on the given Iceberg table. \n \n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "S3ErrorOutputPrefix": { + "target": "com.amazonaws.firehose#ErrorOutputPrefix", + "traits": { + "smithy.api#documentation": "

\n The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination. \n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the configuration of a destination in Apache Iceberg Tables.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "com.amazonaws.firehose#DestinationTableConfigurationList": { + "type": "list", + "member": { + "target": "com.amazonaws.firehose#DestinationTableConfiguration" + } + }, "com.amazonaws.firehose#DocumentIdOptions": { "type": "structure", "members": { @@ -3600,6 +3666,16 @@ } } }, + "com.amazonaws.firehose#GlueDataCatalogARN": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + }, + "smithy.api#pattern": "^arn:" + } + }, "com.amazonaws.firehose#HECAcknowledgmentTimeoutInSeconds": { "type": "integer", "traits": { @@ -4074,6 +4150,169 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.firehose#IcebergDestinationConfiguration": { + "type": "structure", + "members": { + "DestinationTableConfigurationList": { + "target": "com.amazonaws.firehose#DestinationTableConfigurationList", + "traits": { + "smithy.api#documentation": "

Provides a list of DestinationTableConfigurations which Firehose uses\n to deliver data to Apache Iceberg tables.

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "BufferingHints": { + "target": "com.amazonaws.firehose#BufferingHints" + }, + "CloudWatchLoggingOptions": { + "target": "com.amazonaws.firehose#CloudWatchLoggingOptions" + }, + "ProcessingConfiguration": { + "target": "com.amazonaws.firehose#ProcessingConfiguration" + }, + "S3BackupMode": { + "target": "com.amazonaws.firehose#IcebergS3BackupMode", + "traits": { + "smithy.api#documentation": "

Describes how Firehose will backup records. Currently,Firehose only supports\n FailedDataOnly for preview.

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "RetryOptions": { + "target": "com.amazonaws.firehose#RetryOptions" + }, + "RoleARN": { + "target": "com.amazonaws.firehose#RoleARN", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the Apache Iceberg tables role.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

", + "smithy.api#required": {} + } + }, + "CatalogConfiguration": { + "target": "com.amazonaws.firehose#CatalogConfiguration", + "traits": { + "smithy.api#documentation": "

\n Configuration describing where the destination Apache Iceberg Tables are persisted.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

", + "smithy.api#required": {} + } + }, + "S3Configuration": { + "target": "com.amazonaws.firehose#S3DestinationConfiguration", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Specifies the destination configure settings for Apache Iceberg Table.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "com.amazonaws.firehose#IcebergDestinationDescription": { + "type": "structure", + "members": { + "DestinationTableConfigurationList": { + "target": "com.amazonaws.firehose#DestinationTableConfigurationList", + "traits": { + "smithy.api#documentation": "

Provides a list of DestinationTableConfigurations which Firehose uses\n to deliver data to Apache Iceberg tables.

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "BufferingHints": { + "target": "com.amazonaws.firehose#BufferingHints" + }, + "CloudWatchLoggingOptions": { + "target": "com.amazonaws.firehose#CloudWatchLoggingOptions" + }, + "ProcessingConfiguration": { + "target": "com.amazonaws.firehose#ProcessingConfiguration" + }, + "S3BackupMode": { + "target": "com.amazonaws.firehose#IcebergS3BackupMode", + "traits": { + "smithy.api#documentation": "

Describes how Firehose will backup records. Currently,Firehose only supports\n FailedDataOnly for preview.

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "RetryOptions": { + "target": "com.amazonaws.firehose#RetryOptions" + }, + "RoleARN": { + "target": "com.amazonaws.firehose#RoleARN", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the Apache Iceberg Tables role.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "CatalogConfiguration": { + "target": "com.amazonaws.firehose#CatalogConfiguration", + "traits": { + "smithy.api#documentation": "

\n Configuration describing where the destination Iceberg tables are persisted.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "S3DestinationDescription": { + "target": "com.amazonaws.firehose#S3DestinationDescription" + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes a destination in Apache Iceberg Tables.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "com.amazonaws.firehose#IcebergDestinationUpdate": { + "type": "structure", + "members": { + "DestinationTableConfigurationList": { + "target": "com.amazonaws.firehose#DestinationTableConfigurationList", + "traits": { + "smithy.api#documentation": "

Provides a list of DestinationTableConfigurations which Firehose uses\n to deliver data to Apache Iceberg tables.

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "BufferingHints": { + "target": "com.amazonaws.firehose#BufferingHints" + }, + "CloudWatchLoggingOptions": { + "target": "com.amazonaws.firehose#CloudWatchLoggingOptions" + }, + "ProcessingConfiguration": { + "target": "com.amazonaws.firehose#ProcessingConfiguration" + }, + "S3BackupMode": { + "target": "com.amazonaws.firehose#IcebergS3BackupMode", + "traits": { + "smithy.api#documentation": "

Describes how Firehose will backup records. Currently,Firehose only supports\n FailedDataOnly for preview.

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "RetryOptions": { + "target": "com.amazonaws.firehose#RetryOptions" + }, + "RoleARN": { + "target": "com.amazonaws.firehose#RoleARN", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the Apache Iceberg Tables role.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "CatalogConfiguration": { + "target": "com.amazonaws.firehose#CatalogConfiguration", + "traits": { + "smithy.api#documentation": "

\n Configuration describing where the destination Iceberg tables are persisted.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "S3Configuration": { + "target": "com.amazonaws.firehose#S3DestinationConfiguration" + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes an update for a destination in Apache Iceberg Tables.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } + }, + "com.amazonaws.firehose#IcebergS3BackupMode": { + "type": "enum", + "members": { + "FailedDataOnly": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FailedDataOnly" + } + }, + "AllData": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AllData" + } + } + } + }, "com.amazonaws.firehose#InputFormatConfiguration": { "type": "structure", "members": { @@ -4474,6 +4713,12 @@ "smithy.api#documentation": "

The authentication configuration of the Amazon MSK cluster.

", "smithy.api#required": {} } + }, + "ReadFromTimestamp": { + "target": "com.amazonaws.firehose#ReadFromTimestamp", + "traits": { + "smithy.api#documentation": "

The start date and time in UTC for the offset position within your MSK topic from where\n Firehose begins to read. By default, this is set to timestamp when Firehose becomes Active.

\n

If you want to create a Firehose stream with Earliest start position from SDK or CLI,\n you need to set the ReadFromTimestamp parameter to Epoch\n (1970-01-01T00:00:00Z).

" + } } }, "traits": { @@ -4506,6 +4751,12 @@ "traits": { "smithy.api#documentation": "

Firehose starts retrieving records from the topic within the Amazon MSK\n cluster starting with this timestamp.

" } + }, + "ReadFromTimestamp": { + "target": "com.amazonaws.firehose#ReadFromTimestamp", + "traits": { + "smithy.api#documentation": "

The start date and time in UTC for the offset position within your MSK topic from where\n Firehose begins to read. By default, this is set to timestamp when Firehose becomes Active.

\n

If you want to create a Firehose stream with Earliest start position from SDK or CLI,\n you need to set the ReadFromTimestampUTC parameter to Epoch\n (1970-01-01T00:00:00Z).

" + } } }, "traits": { @@ -5243,6 +5494,9 @@ } } }, + "com.amazonaws.firehose#ReadFromTimestamp": { + "type": "timestamp" + }, "com.amazonaws.firehose#Record": { "type": "structure", "members": { @@ -5944,6 +6198,44 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.firehose#SnowflakeBufferingHints": { + "type": "structure", + "members": { + "SizeInMBs": { + "target": "com.amazonaws.firehose#SnowflakeBufferingSizeInMBs", + "traits": { + "smithy.api#documentation": "

\n Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 1.\n

" + } + }, + "IntervalInSeconds": { + "target": "com.amazonaws.firehose#SnowflakeBufferingIntervalInSeconds", + "traits": { + "smithy.api#documentation": "

\n Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 0.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values.\n

" + } + }, + "com.amazonaws.firehose#SnowflakeBufferingIntervalInSeconds": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 900 + } + } + }, + "com.amazonaws.firehose#SnowflakeBufferingSizeInMBs": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 128 + } + } + }, "com.amazonaws.firehose#SnowflakeContentColumnName": { "type": "string", "traits": { @@ -6102,6 +6394,12 @@ "traits": { "smithy.api#documentation": "

\n The configuration that defines how you access secrets for Snowflake.\n

" } + }, + "BufferingHints": { + "target": "com.amazonaws.firehose#SnowflakeBufferingHints", + "traits": { + "smithy.api#documentation": "

\n Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values.\n

" + } } }, "traits": { @@ -6203,6 +6501,12 @@ "traits": { "smithy.api#documentation": "

\n The configuration that defines how you access secrets for Snowflake.\n

" } + }, + "BufferingHints": { + "target": "com.amazonaws.firehose#SnowflakeBufferingHints", + "traits": { + "smithy.api#documentation": "

\n Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values.\n

" + } } }, "traits": { @@ -6310,6 +6614,12 @@ "traits": { "smithy.api#documentation": "

\n Describes the Secrets Manager configuration in Snowflake.\n

" } + }, + "BufferingHints": { + "target": "com.amazonaws.firehose#SnowflakeBufferingHints", + "traits": { + "smithy.api#documentation": "

\n Describes the buffering to perform before delivering data to the Snowflake destination. \n

" + } } }, "traits": { @@ -7205,6 +7515,12 @@ "traits": { "smithy.api#documentation": "

Update to the Snowflake destination configuration settings.

" } + }, + "IcebergDestinationUpdate": { + "target": "com.amazonaws.firehose#IcebergDestinationUpdate", + "traits": { + "smithy.api#documentation": "

\n Describes an update for a destination in Apache Iceberg Tables.\n

\n

Amazon Data Firehose is in preview release and is subject to change.

" + } } }, "traits": { diff --git a/models/fsx.json b/models/fsx.json index 29023e09dd..76702f73e2 100644 --- a/models/fsx.json +++ b/models/fsx.json @@ -1286,7 +1286,7 @@ "ProgressPercent": { "target": "com.amazonaws.fsx#ProgressPercent", "traits": { - "smithy.api#documentation": "

The percentage-complete status of a STORAGE_OPTIMIZATION administrative action. Does not apply to any\n other administrative action type.

" + "smithy.api#documentation": "

The percentage-complete status of a STORAGE_OPTIMIZATION \n or DOWNLOAD_DATA_FROM_BACKUP administrative action. Does not apply to any\n other administrative action type.

" } }, "RequestTime": { @@ -1298,7 +1298,7 @@ "Status": { "target": "com.amazonaws.fsx#Status", "traits": { - "smithy.api#documentation": "

The status of the administrative action, as follows:

\n
    \n
  • \n

    \n FAILED - Amazon FSx failed to process the administrative action\n successfully.

    \n
  • \n
  • \n

    \n IN_PROGRESS - Amazon FSx is processing the administrative action.

    \n
  • \n
  • \n

    \n PENDING - Amazon FSx is waiting to process the administrative\n action.

    \n
  • \n
  • \n

    \n COMPLETED - Amazon FSx has finished processing the administrative\n task.

    \n
  • \n
  • \n

    \n UPDATED_OPTIMIZING - For a storage-capacity increase update, Amazon FSx\n has updated the file system with the new storage capacity, and is now performing\n the storage-optimization process.

    \n
  • \n
" + "smithy.api#documentation": "

The status of the administrative action, as follows:

\n
    \n
  • \n

    \n FAILED - Amazon FSx failed to process the administrative action\n successfully.

    \n
  • \n
  • \n

    \n IN_PROGRESS - Amazon FSx is processing the administrative action.

    \n
  • \n
  • \n

    \n PENDING - Amazon FSx is waiting to process the administrative\n action.

    \n
  • \n
  • \n

    \n COMPLETED - Amazon FSx has finished processing the administrative\n task.

    \n

    For a backup restore to a second-generation FSx for ONTAP file system, \n indicates that all data has been downloaded to the volume, and clients now have read-write access to volume.

    \n
  • \n
  • \n

    \n UPDATED_OPTIMIZING - For a storage-capacity increase update, Amazon FSx\n has updated the file system with the new storage capacity, and is now performing\n the storage-optimization process.

    \n
  • \n
  • \n

    \n PENDING - For a backup restore to a second-generation FSx for ONTAP file system, \n indicates that the file metadata is being downloaded onto the volume. The volume's Lifecycle state is CREATING.

    \n
  • \n
  • \n

    \n IN_PROGRESS - For a backup restore to a second-generation FSx for ONTAP file system, \n indicates that all metadata has been downloaded to the new volume and client can access data with read-only access \n while Amazon FSx downloads the file data to the volume. Track the\n progress of this process with the ProgressPercent element.

    \n
  • \n
" } }, "TargetFileSystemValues": { @@ -1433,10 +1433,16 @@ "traits": { "smithy.api#enumValue": "VOLUME_INITIALIZE_WITH_SNAPSHOT" } + }, + "DOWNLOAD_DATA_FROM_BACKUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DOWNLOAD_DATA_FROM_BACKUP" + } } }, "traits": { - "smithy.api#documentation": "

Describes the type of administrative action, as follows:

\n
    \n
  • \n

    \n FILE_SYSTEM_UPDATE - A file system update administrative action\n initiated from the Amazon FSx console, API\n (UpdateFileSystem), or CLI\n (update-file-system).

    \n
  • \n
  • \n

    \n THROUGHPUT_OPTIMIZATION - After the FILE_SYSTEM_UPDATE\n task to increase a file system's throughput capacity has been completed\n successfully, a THROUGHPUT_OPTIMIZATION task starts.

    \n

    You can track the storage-optimization progress using the\n ProgressPercent property. When\n THROUGHPUT_OPTIMIZATION has been completed successfully, the\n parent FILE_SYSTEM_UPDATE action status changes to\n COMPLETED. For more information, see Managing\n throughput capacity in the Amazon FSx for Windows\n File Server User Guide.

    \n
  • \n
  • \n

    \n STORAGE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE\n task to increase a file system's storage capacity has been completed\n successfully, a STORAGE_OPTIMIZATION task starts.

    \n
      \n
    • \n

      For Windows and ONTAP, storage optimization is the process of migrating the file system data\n to newer larger disks.

      \n
    • \n
    • \n

      For Lustre, storage optimization consists of rebalancing the data across the existing and\n newly added file servers.

      \n
    • \n
    \n

    You can track the storage-optimization progress using the\n ProgressPercent property. When\n STORAGE_OPTIMIZATION has been completed successfully, the\n parent FILE_SYSTEM_UPDATE action status changes to\n COMPLETED. For more information, see Managing\n storage capacity in the Amazon FSx for Windows\n File Server User Guide, Managing storage\n capacity in the Amazon FSx for\n Lustre User Guide, and\n Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User\n Guide.

    \n
  • \n
  • \n

    \n FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a new Domain\n Name System (DNS) alias with the file system. For more information, see \n AssociateFileSystemAliases.

    \n
  • \n
  • \n

    \n FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate a DNS alias from the file system.\n For more information, see DisassociateFileSystemAliases.

    \n
  • \n
  • \n

    \n IOPS_OPTIMIZATION - After the FILE_SYSTEM_UPDATE\n task to increase a file system's throughput capacity has been completed\n successfully, a IOPS_OPTIMIZATION task starts.

    \n

    You can track the storage-optimization progress using the\n ProgressPercent property. When IOPS_OPTIMIZATION\n has been completed successfully, the parent FILE_SYSTEM_UPDATE\n action status changes to COMPLETED. For more information, see\n Managing\n provisioned SSD IOPS in the Amazon FSx for Windows File\n Server User Guide.

    \n
  • \n
  • \n

    \n STORAGE_TYPE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE\n task to increase a file system's throughput capacity has been completed\n successfully, a STORAGE_TYPE_OPTIMIZATION task starts.

    \n

    You can track the storage-optimization progress using the\n ProgressPercent property. When\n STORAGE_TYPE_OPTIMIZATION has been completed successfully, the\n parent FILE_SYSTEM_UPDATE action status changes to\n COMPLETED.

    \n
  • \n
  • \n

    \n VOLUME_UPDATE - A volume update to an Amazon FSx for OpenZFS volume\n initiated from the Amazon FSx console, API (UpdateVolume),\n or CLI (update-volume).

    \n
  • \n
  • \n

    \n VOLUME_RESTORE - An Amazon FSx for OpenZFS volume\n is returned to the state saved by the specified snapshot, initiated from an\n API (RestoreVolumeFromSnapshot) or CLI\n (restore-volume-from-snapshot).

    \n
  • \n
  • \n

    \n SNAPSHOT_UPDATE - A snapshot update to an Amazon FSx for\n OpenZFS volume initiated from the Amazon FSx console, API\n (UpdateSnapshot), or CLI (update-snapshot).

    \n
  • \n
  • \n

    \n RELEASE_NFS_V3_LOCKS - Tracks the release of Network File System\n (NFS) V3 locks on an Amazon FSx for OpenZFS file system.

    \n
  • \n
  • \n

    \n VOLUME_INITIALIZE_WITH_SNAPSHOT - A volume is being created from\n a snapshot on a different FSx for OpenZFS file system. You can\n initiate this from the Amazon FSx console, API\n (CreateVolume), or CLI (create-volume) when using\n the using the FULL_COPY strategy.

    \n
  • \n
  • \n

    \n VOLUME_UPDATE_WITH_SNAPSHOT - A volume is being updated from a\n snapshot on a different FSx for OpenZFS file system. You can initiate\n this from the Amazon FSx console, API\n (CopySnapshotAndUpdateVolume), or CLI\n (copy-snapshot-and-update-volume).

    \n
  • \n
" + "smithy.api#documentation": "

Describes the type of administrative action, as follows:

\n
    \n
  • \n

    \n FILE_SYSTEM_UPDATE - A file system update administrative action\n initiated from the Amazon FSx console, API\n (UpdateFileSystem), or CLI\n (update-file-system).

    \n
  • \n
  • \n

    \n THROUGHPUT_OPTIMIZATION - After the FILE_SYSTEM_UPDATE\n task to increase a file system's throughput capacity has been completed\n successfully, a THROUGHPUT_OPTIMIZATION task starts.

    \n

    You can track the storage-optimization progress using the\n ProgressPercent property. When\n THROUGHPUT_OPTIMIZATION has been completed successfully, the\n parent FILE_SYSTEM_UPDATE action status changes to\n COMPLETED. For more information, see Managing\n throughput capacity in the Amazon FSx for Windows\n File Server User Guide.

    \n
  • \n
  • \n

    \n STORAGE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE\n task to increase a file system's storage capacity has completed\n successfully, a STORAGE_OPTIMIZATION task starts.

    \n
      \n
    • \n

      For Windows and ONTAP, storage optimization is the process of migrating the file system data\n to newer larger disks.

      \n
    • \n
    • \n

      For Lustre, storage optimization consists of rebalancing the data across the existing and\n newly added file servers.

      \n
    • \n
    \n

    You can track the storage-optimization progress using the\n ProgressPercent property. When\n STORAGE_OPTIMIZATION has been completed successfully, the\n parent FILE_SYSTEM_UPDATE action status changes to\n COMPLETED. For more information, see Managing\n storage capacity in the Amazon FSx for Windows\n File Server User Guide, Managing storage\n capacity in the Amazon FSx for\n Lustre User Guide, and\n Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User\n Guide.

    \n
  • \n
  • \n

    \n FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a new Domain\n Name System (DNS) alias with the file system. For more information, see \n AssociateFileSystemAliases.

    \n
  • \n
  • \n

    \n FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate a DNS alias from the file system.\n For more information, see DisassociateFileSystemAliases.

    \n
  • \n
  • \n

    \n IOPS_OPTIMIZATION - After the FILE_SYSTEM_UPDATE\n task to increase a file system's throughput capacity has been completed\n successfully, a IOPS_OPTIMIZATION task starts.

    \n

    You can track the storage-optimization progress using the\n ProgressPercent property. When IOPS_OPTIMIZATION\n has been completed successfully, the parent FILE_SYSTEM_UPDATE\n action status changes to COMPLETED. For more information, see\n Managing\n provisioned SSD IOPS in the Amazon FSx for Windows File\n Server User Guide.

    \n
  • \n
  • \n

    \n STORAGE_TYPE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE\n task to increase a file system's throughput capacity has been completed\n successfully, a STORAGE_TYPE_OPTIMIZATION task starts.

    \n

    You can track the storage-optimization progress using the\n ProgressPercent property. When\n STORAGE_TYPE_OPTIMIZATION has been completed successfully, the\n parent FILE_SYSTEM_UPDATE action status changes to\n COMPLETED.

    \n
  • \n
  • \n

    \n VOLUME_UPDATE - A volume update to an Amazon FSx for OpenZFS volume\n initiated from the Amazon FSx console, API (UpdateVolume),\n or CLI (update-volume).

    \n
  • \n
  • \n

    \n VOLUME_RESTORE - An Amazon FSx for OpenZFS volume\n is returned to the state saved by the specified snapshot, initiated from an\n API (RestoreVolumeFromSnapshot) or CLI\n (restore-volume-from-snapshot).

    \n
  • \n
  • \n

    \n SNAPSHOT_UPDATE - A snapshot update to an Amazon FSx for\n OpenZFS volume initiated from the Amazon FSx console, API\n (UpdateSnapshot), or CLI (update-snapshot).

    \n
  • \n
  • \n

    \n RELEASE_NFS_V3_LOCKS - Tracks the release of Network File System\n (NFS) V3 locks on an Amazon FSx for OpenZFS file system.

    \n
  • \n
  • \n

    \n DOWNLOAD_DATA_FROM_BACKUP - An FSx for ONTAP backup is\n being restored to a new volume on a second-generation file system. Once the all the file \n metadata is loaded onto the volume, you can mount the volume with read-only access.\n during this process.

    \n
  • \n
  • \n

    \n VOLUME_INITIALIZE_WITH_SNAPSHOT - A volume is being created from\n a snapshot on a different FSx for OpenZFS file system. You can\n initiate this from the Amazon FSx console, API\n (CreateVolume), or CLI (create-volume) when using\n the using the FULL_COPY strategy.

    \n
  • \n
  • \n

    \n VOLUME_UPDATE_WITH_SNAPSHOT - A volume is being updated from a\n snapshot on a different FSx for OpenZFS file system. You can initiate\n this from the Amazon FSx console, API\n (CopySnapshotAndUpdateVolume), or CLI\n (copy-snapshot-and-update-volume).

    \n
  • \n
" } }, "com.amazonaws.fsx#AdministrativeActions": { @@ -1467,7 +1473,7 @@ "Aggregates": { "target": "com.amazonaws.fsx#Aggregates", "traits": { - "smithy.api#documentation": "

The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry.

\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The strings in the value of Aggregates are not are not formatted as aggrX, where X is a number between 1 and 6.

    \n
  • \n
  • \n

    The value of Aggregates contains aggregates that are not present.

    \n
  • \n
  • \n

    One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.

    \n
  • \n
" + "smithy.api#documentation": "

The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry.

\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The strings in the value of Aggregates are not are not formatted as aggrX, where X is a number between 1 and 12.

    \n
  • \n
  • \n

    The value of Aggregates contains aggregates that are not present.

    \n
  • \n
  • \n

    One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.

    \n
  • \n
" } }, "TotalConstituents": { @@ -3281,7 +3287,7 @@ "target": "com.amazonaws.fsx#OntapDeploymentType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Specifies the FSx for ONTAP file system deployment type to use in creating\n the file system.

\n
    \n
  • \n

    \n MULTI_AZ_1 - (Default) A high availability file system configured\n for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ)\n unavailability.

    \n
  • \n
  • \n

    \n SINGLE_AZ_1 - A file system configured for Single-AZ\n redundancy.

    \n
  • \n
  • \n

    \n SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy.

    \n
  • \n
\n

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to\n Choosing a file system deployment type.

", + "smithy.api#documentation": "

Specifies the FSx for ONTAP file system deployment type to use in creating\n the file system.

\n
    \n
  • \n

    \n MULTI_AZ_1 - A high availability file system configured\n for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ)\n unavailability. This is a first-generation FSx for ONTAP file system.

    \n
  • \n
  • \n

    \n MULTI_AZ_2 - A high availability file system configured for Multi-AZ redundancy to tolerate \n temporary AZ unavailability. This is a second-generation FSx for ONTAP file system.

    \n
  • \n
  • \n

    \n SINGLE_AZ_1 - A file system configured for Single-AZ\n redundancy. This is a first-generation FSx for ONTAP file system.

    \n
  • \n
  • \n

    \n SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. \n This is a second-generation FSx for ONTAP file system.

    \n
  • \n
\n

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to\n Choosing a file system deployment type.

", "smithy.api#required": {} } }, @@ -3306,7 +3312,7 @@ "PreferredSubnetId": { "target": "com.amazonaws.fsx#SubnetId", "traits": { - "smithy.api#documentation": "

Required when DeploymentType is set to MULTI_AZ_1. This\n specifies the subnet in which you want the preferred file server to be located.

" + "smithy.api#documentation": "

Required when DeploymentType is set to MULTI_AZ_1 or MULTI_AZ_2. This\n specifies the subnet in which you want the preferred file server to be located.

" } }, "RouteTableIds": { @@ -3327,13 +3333,13 @@ "HAPairs": { "target": "com.amazonaws.fsx#HAPairs", "traits": { - "smithy.api#documentation": "

Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. \n FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of StorageCapacity, \n Iops, and ThroughputCapacity. For more information, see \n High-availability (HA) pairs in the FSx for ONTAP user guide.

\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of HAPairs is less than 1 or greater than 12.

    \n
  • \n
  • \n

    The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1 or MULTI_AZ_1.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies how many high-availability (HA) pairs of file servers will power your file system. First-generation file systems are powered by 1 HA pair.\n Second-generation multi-AZ file systems are powered by 1 HA pair. Second generation single-AZ file systems are powered by up to 12 HA pairs. The default value is 1. \n The value of this property affects the values of StorageCapacity, \n Iops, and ThroughputCapacity. For more information, see \n High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support \n (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see \n Using block storage protocols.

\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of HAPairs is less than 1 or greater than 12.

    \n
  • \n
  • \n

    The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1, MULTI_AZ_1, or MULTI_AZ_2.

    \n
  • \n
" } }, "ThroughputCapacityPerHAPair": { "target": "com.amazonaws.fsx#ThroughputCapacityPerHAPair", "traits": { - "smithy.api#documentation": "

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

\n

You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity when creating a file system, but not both.

\n

This field and ThroughputCapacity are the same for scale-up file systems powered by one HA pair.

\n
    \n
  • \n

    For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

    \n
  • \n
  • \n

    For SINGLE_AZ_2 file systems, valid values are 3072 or 6144 MBps.

    \n
  • \n
\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair.

    \n
  • \n
  • \n

    The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and 12).

    \n
  • \n
  • \n

    The value of ThroughputCapacityPerHAPair is not a valid value.

    \n
  • \n
" + "smithy.api#documentation": "

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

\n

You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity when creating a file system, but not both.

\n

This field and ThroughputCapacity are the same for file systems powered by one HA pair.

\n
    \n
  • \n

    For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

    \n
  • \n
  • \n

    For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps.

    \n
  • \n
  • \n

    For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps.

    \n
  • \n
\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair.

    \n
  • \n
  • \n

    The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12).

    \n
  • \n
  • \n

    The value of ThroughputCapacityPerHAPair is not a valid value.

    \n
  • \n
" } } }, @@ -3366,7 +3372,7 @@ "target": "com.amazonaws.fsx#OpenZFSDeploymentType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Specifies the file system deployment type. Single AZ deployment types are configured\n for redundancy within a single Availability Zone in an Amazon Web Services Region .\n Valid values are the following:

\n
    \n
  • \n

    \n MULTI_AZ_1- Creates file systems with high availability that are configured\n for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs).\n Multi_AZ_1 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon),\n Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services Regions.

    \n
  • \n
  • \n

    \n SINGLE_AZ_1- Creates file systems with throughput capacities of 64 - 4,096 MB/s.\n Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx \n for OpenZFS is available.

    \n
  • \n
  • \n

    \n SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s\n using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia),\n US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland)\n Amazon Web Services Regions.

    \n
  • \n
\n

For more information, see Deployment type availability\n and File system performance\n in the Amazon FSx for OpenZFS User Guide.

", + "smithy.api#documentation": "

Specifies the file system deployment type. Valid values are the following:

\n
    \n
  • \n

    \n MULTI_AZ_1- Creates file systems with high availability and durability by replicating your data and supporting failover across multiple Availability Zones in the same Amazon Web Services Region.

    \n
  • \n
  • \n

    \n SINGLE_AZ_HA_2- Creates file systems with high availability and throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache by deploying a primary and standby file system within the same Availability Zone.

    \n
  • \n
  • \n

    \n SINGLE_AZ_HA_1- Creates file systems with high availability and throughput capacities of 64 - 4,096 MB/s by deploying a primary and standby file system within the same Availability Zone.

    \n
  • \n
  • \n

    \n SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s\n using an NVMe L2ARC cache that automatically recover within a single Availability Zone.

    \n
  • \n
  • \n

    \n SINGLE_AZ_1- Creates file systems with throughput capacities of 64 - 4,096 MBs that automatically recover within a single Availability Zone.

    \n
  • \n
\n

For a list of which Amazon Web Services Regions each deployment type is available in, see Deployment type availability.\n For more information on the differences in performance between deployment types, see File system performance\n in the Amazon FSx for OpenZFS User Guide.

", "smithy.api#required": {} } }, @@ -6737,7 +6743,7 @@ "FailureDetails": { "target": "com.amazonaws.fsx#FileCacheFailureDetails", "traits": { - "smithy.api#documentation": "

A structure providing details of any failures that occurred.

" + "smithy.api#documentation": "

A structure providing details of any failures that occurred in creating a cache.

" } }, "StorageCapacity": { @@ -8422,6 +8428,12 @@ "traits": { "smithy.api#enumValue": "SINGLE_AZ_2" } + }, + "MULTI_AZ_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MULTI_AZ_2" + } } } }, @@ -8449,7 +8461,7 @@ "DeploymentType": { "target": "com.amazonaws.fsx#OntapDeploymentType", "traits": { - "smithy.api#documentation": "

Specifies the FSx for ONTAP file system deployment type in use in the file\n system.

\n
    \n
  • \n

    \n MULTI_AZ_1 - (Default) A high availability file system configured\n for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ)\n unavailability.

    \n
  • \n
  • \n

    \n SINGLE_AZ_1 - A file system configured for Single-AZ\n redundancy.

    \n
  • \n
  • \n

    \n SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy.

    \n
  • \n
\n

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to\n Choosing Multi-AZ or\n Single-AZ file system deployment.

" + "smithy.api#documentation": "

Specifies the FSx for ONTAP file system deployment type in use in the file\n system.

\n
    \n
  • \n

    \n MULTI_AZ_1 - A high availability file system configured\n for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ)\n unavailability. This is a first-generation FSx for ONTAP file system.

    \n
  • \n
  • \n

    \n MULTI_AZ_2 - A high availability file system configured for Multi-AZ redundancy to tolerate \n temporary AZ unavailability. This is a second-generation FSx for ONTAP file system.

    \n
  • \n
  • \n

    \n SINGLE_AZ_1 - A file system configured for Single-AZ\n redundancy. This is a first-generation FSx for ONTAP file system.

    \n
  • \n
  • \n

    \n SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. \n This is a second-generation FSx for ONTAP file system.

    \n
  • \n
\n

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to\n Choosing Multi-AZ or\n Single-AZ file system deployment.

" } }, "EndpointIpAddressRange": { @@ -8494,13 +8506,13 @@ "HAPairs": { "target": "com.amazonaws.fsx#HAPairs", "traits": { - "smithy.api#documentation": "

Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.

\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of HAPairs is less than 1 or greater than 12.

    \n
  • \n
  • \n

    The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1 or MULTI_AZ_1.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity, \n Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.

\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of HAPairs is less than 1 or greater than 12.

    \n
  • \n
  • \n

    The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1, MULTI_AZ_1, or MULTI_AZ_2.

    \n
  • \n
" } }, "ThroughputCapacityPerHAPair": { "target": "com.amazonaws.fsx#ThroughputCapacityPerHAPair", "traits": { - "smithy.api#documentation": "

Use to choose the throughput capacity per HA pair. When the value of HAPairs is equal to 1, the value of ThroughputCapacityPerHAPair is the total throughput for the file system.

\n

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

\n

This field and ThroughputCapacity are the same for file systems with one HA pair.

\n
    \n
  • \n

    For SINGLE_AZ_1 and MULTI_AZ_1, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

    \n
  • \n
  • \n

    For SINGLE_AZ_2, valid values are 3072 or 6144 MBps.

    \n
  • \n
\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value.

    \n
  • \n
  • \n

    The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and 12).

    \n
  • \n
  • \n

    The value of ThroughputCapacityPerHAPair is not a valid value.

    \n
  • \n
" + "smithy.api#documentation": "

Use to choose the throughput capacity per HA pair. When the value of HAPairs is equal to 1, the value of ThroughputCapacityPerHAPair is the total throughput for the file system.

\n

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

\n

This field and ThroughputCapacity are the same for file systems with one HA pair.

\n
    \n
  • \n

    For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

    \n
  • \n
  • \n

    For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps.

    \n
  • \n
  • \n

    For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps.

    \n
  • \n
\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value.

    \n
  • \n
  • \n

    The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12).

    \n
  • \n
  • \n

    The value of ThroughputCapacityPerHAPair is not a valid value.

    \n
  • \n
" } } }, @@ -8786,6 +8798,18 @@ "smithy.api#enumValue": "SINGLE_AZ_2" } }, + "SINGLE_AZ_HA_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SINGLE_AZ_HA_1" + } + }, + "SINGLE_AZ_HA_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SINGLE_AZ_HA_2" + } + }, "MULTI_AZ_1": { "target": "smithy.api#Unit", "traits": { @@ -8818,7 +8842,7 @@ "DeploymentType": { "target": "com.amazonaws.fsx#OpenZFSDeploymentType", "traits": { - "smithy.api#documentation": "

Specifies the file-system deployment type. Amazon FSx for OpenZFS supports\u2028 MULTI_AZ_1, SINGLE_AZ_1, and SINGLE_AZ_2.

" + "smithy.api#documentation": "

Specifies the file-system deployment type. Amazon FSx for OpenZFS supports\u2028 MULTI_AZ_1, SINGLE_AZ_HA_2, SINGLE_AZ_HA_1, SINGLE_AZ_2, and SINGLE_AZ_1.

" } }, "ThroughputCapacity": { @@ -10308,6 +10332,12 @@ "traits": { "smithy.api#enumValue": "UPDATED_OPTIMIZING" } + }, + "OPTIMIZING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OPTIMIZING" + } } } }, @@ -11421,7 +11451,13 @@ "ThroughputCapacityPerHAPair": { "target": "com.amazonaws.fsx#ThroughputCapacityPerHAPair", "traits": { - "smithy.api#documentation": "

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

\n

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

\n

This field and ThroughputCapacity are the same for file systems with one HA pair.

\n
    \n
  • \n

    For SINGLE_AZ_1 and MULTI_AZ_1, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

    \n
  • \n
  • \n

    For SINGLE_AZ_2, valid values are 3072 or 6144 MBps.

    \n
  • \n
\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair.

    \n
  • \n
  • \n

    The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and 12).

    \n
  • \n
  • \n

    The value of ThroughputCapacityPerHAPair is not a valid value.

    \n
  • \n
" + "smithy.api#documentation": "

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

\n

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

\n

This field and ThroughputCapacity are the same for file systems with one HA pair.

\n
    \n
  • \n

    For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.

    \n
  • \n
  • \n

    For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps.

    \n
  • \n
  • \n

    For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps.

    \n
  • \n
\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair.

    \n
  • \n
  • \n

    The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12).

    \n
  • \n
  • \n

    The value of ThroughputCapacityPerHAPair is not a valid value.

    \n
  • \n
" + } + }, + "HAPairs": { + "target": "com.amazonaws.fsx#HAPairs", + "traits": { + "smithy.api#documentation": "

Use to update the number of high-availability (HA) pairs for a second-generation single-AZ file system. \n If you increase the number of HA pairs for your file system, you must specify proportional increases for StorageCapacity, \n Iops, and ThroughputCapacity. For more information, see \n High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support \n (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see \n Using block storage protocols.

" } } }, diff --git a/models/gamelift.json b/models/gamelift.json index bc5ff2bcb7..a68b2d2844 100644 --- a/models/gamelift.json +++ b/models/gamelift.json @@ -12112,7 +12112,20 @@ "outputToken": "NextToken", "items": "Builds", "pageSize": "Limit" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListBuildsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.gamelift#ListBuildsInput": { diff --git a/models/global-accelerator.json b/models/global-accelerator.json index 7374c575f0..bf937e1e3f 100644 --- a/models/global-accelerator.json +++ b/models/global-accelerator.json @@ -807,6 +807,9 @@ "target": "com.amazonaws.globalaccelerator#CreateAcceleratorResponse" }, "errors": [ + { + "target": "com.amazonaws.globalaccelerator#AccessDeniedException" + }, { "target": "com.amazonaws.globalaccelerator#InternalServiceErrorException" }, @@ -815,6 +818,9 @@ }, { "target": "com.amazonaws.globalaccelerator#LimitExceededException" + }, + { + "target": "com.amazonaws.globalaccelerator#TransactionInProgressException" } ], "traits": { @@ -986,6 +992,9 @@ }, { "target": "com.amazonaws.globalaccelerator#LimitExceededException" + }, + { + "target": "com.amazonaws.globalaccelerator#TransactionInProgressException" } ], "traits": { @@ -1829,6 +1838,9 @@ }, { "target": "com.amazonaws.globalaccelerator#InvalidArgumentException" + }, + { + "target": "com.amazonaws.globalaccelerator#TransactionInProgressException" } ], "traits": { @@ -1917,6 +1929,9 @@ }, { "target": "com.amazonaws.globalaccelerator#InvalidArgumentException" + }, + { + "target": "com.amazonaws.globalaccelerator#TransactionInProgressException" } ], "traits": { @@ -5033,11 +5048,20 @@ { "target": "com.amazonaws.globalaccelerator#AcceleratorNotFoundException" }, + { + "target": "com.amazonaws.globalaccelerator#AttachmentNotFoundException" + }, + { + "target": "com.amazonaws.globalaccelerator#EndpointGroupNotFoundException" + }, { "target": "com.amazonaws.globalaccelerator#InternalServiceErrorException" }, { "target": "com.amazonaws.globalaccelerator#InvalidArgumentException" + }, + { + "target": "com.amazonaws.globalaccelerator#ListenerNotFoundException" } ], "traits": { @@ -5754,11 +5778,17 @@ { "target": "com.amazonaws.globalaccelerator#AccessDeniedException" }, + { + "target": "com.amazonaws.globalaccelerator#ConflictException" + }, { "target": "com.amazonaws.globalaccelerator#InternalServiceErrorException" }, { "target": "com.amazonaws.globalaccelerator#InvalidArgumentException" + }, + { + "target": "com.amazonaws.globalaccelerator#TransactionInProgressException" } ], "traits": { @@ -5785,6 +5815,9 @@ }, { "target": "com.amazonaws.globalaccelerator#InvalidArgumentException" + }, + { + "target": "com.amazonaws.globalaccelerator#TransactionInProgressException" } ], "traits": { @@ -5994,11 +6027,17 @@ { "target": "com.amazonaws.globalaccelerator#AcceleratorNotFoundException" }, + { + "target": "com.amazonaws.globalaccelerator#ConflictException" + }, { "target": "com.amazonaws.globalaccelerator#InternalServiceErrorException" }, { "target": "com.amazonaws.globalaccelerator#InvalidArgumentException" + }, + { + "target": "com.amazonaws.globalaccelerator#TransactionInProgressException" } ], "traits": { @@ -6025,6 +6064,9 @@ }, { "target": "com.amazonaws.globalaccelerator#InvalidArgumentException" + }, + { + "target": "com.amazonaws.globalaccelerator#TransactionInProgressException" } ], "traits": { diff --git a/models/glue.json b/models/glue.json index 5d3acdb195..1f7dff017e 100644 --- a/models/glue.json +++ b/models/glue.json @@ -6537,6 +6537,40 @@ "smithy.api#error": "client" } }, + "com.amazonaws.glue#ConditionExpression": { + "type": "structure", + "members": { + "Condition": { + "target": "com.amazonaws.glue#DatabrewCondition", + "traits": { + "smithy.api#documentation": "

The condition of the condition expression.

", + "smithy.api#required": {} + } + }, + "Value": { + "target": "com.amazonaws.glue#DatabrewConditionValue", + "traits": { + "smithy.api#documentation": "

The value of the condition expression.

" + } + }, + "TargetColumn": { + "target": "com.amazonaws.glue#TargetColumn", + "traits": { + "smithy.api#documentation": "

The target column of the condition expressions.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Condition expression defined in the Glue Studio data preparation recipe node.

" + } + }, + "com.amazonaws.glue#ConditionExpressionList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#ConditionExpression" + } + }, "com.amazonaws.glue#ConditionList": { "type": "list", "member": { @@ -11873,6 +11907,25 @@ "com.amazonaws.glue#DatabaseName": { "type": "string" }, + "com.amazonaws.glue#DatabrewCondition": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[A-Z\\_]+$" + } + }, + "com.amazonaws.glue#DatabrewConditionValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, "com.amazonaws.glue#Datatype": { "type": "structure", "members": { @@ -14385,13 +14438,13 @@ "com.amazonaws.glue#EnclosedInStringProperty": { "type": "string", "traits": { - "smithy.api#pattern": "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*$" + "smithy.api#pattern": "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$" } }, "com.amazonaws.glue#EnclosedInStringPropertyWithQuote": { "type": "string", "traits": { - "smithy.api#pattern": "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n])*$" + "smithy.api#pattern": "^([\\u0009\\u000B\\u000C\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF])*$" } }, "com.amazonaws.glue#EncryptionAtRest": { @@ -26461,7 +26514,7 @@ "com.amazonaws.glue#NodeName": { "type": "string", "traits": { - "smithy.api#pattern": "^([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*$" + "smithy.api#pattern": "^([^\\r\\n])*$" } }, "com.amazonaws.glue#NodeType": { @@ -26756,6 +26809,16 @@ "smithy.api#documentation": "

A structure representing an open format table.

" } }, + "com.amazonaws.glue#Operation": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[A-Z\\_]+$" + } + }, "com.amazonaws.glue#OperationNotSupportedException": { "type": "structure", "members": { @@ -27164,6 +27227,34 @@ } } }, + "com.amazonaws.glue#ParameterMap": { + "type": "map", + "key": { + "target": "com.amazonaws.glue#ParameterName" + }, + "value": { + "target": "com.amazonaws.glue#ParameterValue" + } + }, + "com.amazonaws.glue#ParameterName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[A-Za-z0-9]+$" + } + }, + "com.amazonaws.glue#ParameterValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 32768 + } + } + }, "com.amazonaws.glue#ParametersMap": { "type": "map", "key": { @@ -28456,8 +28547,13 @@ "RecipeReference": { "target": "com.amazonaws.glue#RecipeReference", "traits": { - "smithy.api#documentation": "

A reference to the DataBrew recipe used by the node.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A reference to the DataBrew recipe used by the node.

" + } + }, + "RecipeSteps": { + "target": "com.amazonaws.glue#RecipeSteps", + "traits": { + "smithy.api#documentation": "

Transform steps used in the recipe node.

" } } }, @@ -28465,6 +28561,27 @@ "smithy.api#documentation": "

A Glue Studio node that uses a Glue DataBrew recipe in Glue jobs.

" } }, + "com.amazonaws.glue#RecipeAction": { + "type": "structure", + "members": { + "Operation": { + "target": "com.amazonaws.glue#Operation", + "traits": { + "smithy.api#documentation": "

The operation of the recipe action.

", + "smithy.api#required": {} + } + }, + "Parameters": { + "target": "com.amazonaws.glue#ParameterMap", + "traits": { + "smithy.api#documentation": "

The parameters of the recipe action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Actions defined in the Glue Studio data preparation recipe node.

" + } + }, "com.amazonaws.glue#RecipeReference": { "type": "structure", "members": { @@ -28487,6 +28604,33 @@ "smithy.api#documentation": "

A reference to a Glue DataBrew recipe.

" } }, + "com.amazonaws.glue#RecipeStep": { + "type": "structure", + "members": { + "Action": { + "target": "com.amazonaws.glue#RecipeAction", + "traits": { + "smithy.api#documentation": "

The transformation action of the recipe step.

", + "smithy.api#required": {} + } + }, + "ConditionExpressions": { + "target": "com.amazonaws.glue#ConditionExpressionList", + "traits": { + "smithy.api#documentation": "

The condition expressions for the recipe step.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A recipe step used in a Glue Studio data preparation recipe node.

" + } + }, + "com.amazonaws.glue#RecipeSteps": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#RecipeStep" + } + }, "com.amazonaws.glue#RecipeVersion": { "type": "string", "traits": { @@ -34341,6 +34485,15 @@ } } }, + "com.amazonaws.glue#TargetColumn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, "com.amazonaws.glue#TargetFormat": { "type": "enum", "members": { diff --git a/models/groundstation.json b/models/groundstation.json index 48e79bc409..fbdf8a4860 100644 --- a/models/groundstation.json +++ b/models/groundstation.json @@ -4979,7 +4979,7 @@ } }, "traits": { - "smithy.api#documentation": "

Ephemeris data in Orbit Ephemeris Message (OEM) format.

" + "smithy.api#documentation": "

\n Ephemeris data in Orbit Ephemeris Message (OEM) format.\n

\n

\n Position, velocity, and acceleration units must be represented in km, km/s, and\n km/s**2, respectively, in ephemeris data lines. Covariance matrix line units must be\n represented in km**2 if computed from two positions, km**2/s if computed from one\n position and one velocity, and km**2/s**2 if computed from two velocities. Consult section\n 7.7.2 of The Consultative Committee for Space Data Systems (CCSDS)\n Recommended Standard for Orbit Data Messages\n for more information.\n

" } }, "com.amazonaws.groundstation#PaginationMaxResults": { diff --git a/models/ivschat.json b/models/ivschat.json index 5ba8fb7d2c..38a532a2f3 100644 --- a/models/ivschat.json +++ b/models/ivschat.json @@ -103,7 +103,7 @@ "date" ] }, - "smithy.api#documentation": "

\n Introduction\n

\n

The Amazon IVS Chat control-plane API enables you to create and manage Amazon IVS Chat\n resources. You also need to integrate with the Amazon IVS Chat Messaging\n API, to enable users to interact with chat rooms in real time.

\n

The API is an AWS regional service. For a list of supported regions and Amazon IVS Chat\n HTTPS service endpoints, see the Amazon IVS Chat information on the Amazon IVS page in the\n AWS General Reference.

\n

\n Notes on terminology:\n

\n
    \n
  • \n

    You create service applications using the Amazon IVS Chat API. We refer to these as\n applications.

    \n
  • \n
  • \n

    You create front-end client applications (browser and Android/iOS apps) using the\n Amazon IVS Chat Messaging API. We refer to these as clients.

    \n
  • \n
\n

\n Key Concepts\n

\n
    \n
  • \n

    \n LoggingConfiguration — A configuration that allows customers to store and record sent messages in a chat room.

    \n
  • \n
  • \n

    \n Room — The central Amazon IVS Chat resource through which clients connect to and exchange chat messages.

    \n
  • \n
\n

\n Tagging\n

\n

A tag is a metadata label that you assign to an AWS resource. A tag\n comprises a key and a value, both set by you. For\n example, you might set a tag as topic:nature to label a particular video\n category. See Tagging AWS Resources for more information, including restrictions that apply to\n tags and \"Tag naming limits and requirements\"; Amazon IVS Chat has no service-specific\n constraints beyond what is documented there.

\n

Tags can help you identify and organize your AWS resources. For example, you can use the\n same tag for different resources to indicate that they are related. You can also use tags to\n manage access (see Access Tags).

\n

The Amazon IVS Chat API has these tag-related endpoints: TagResource, UntagResource, and\n ListTagsForResource. The following resource supports tagging: Room.

\n

At most 50 tags can be applied to a resource.

\n

\n API Access Security\n

\n

Your Amazon IVS Chat applications (service applications and clients) must be authenticated\n and authorized to access Amazon IVS Chat resources. Note the differences between these\n concepts:

\n
    \n
  • \n

    \n Authentication is about verifying identity. Requests to the\n Amazon IVS Chat API must be signed to verify your identity.

    \n
  • \n
  • \n

    \n Authorization is about granting permissions. Your IAM roles need\n to have permissions for Amazon IVS Chat API requests.

    \n
  • \n
\n

Users (viewers) connect to a room using secure access tokens that you create using the\n CreateChatToken endpoint through the AWS SDK. You call CreateChatToken for\n every user’s chat session, passing identity and authorization information about the\n user.

\n

\n Signing API Requests\n

\n

HTTP API requests must be signed with an AWS SigV4 signature using your AWS security\n credentials. The AWS Command Line Interface (CLI) and the AWS SDKs take care of signing the\n underlying API calls for you. However, if your application calls the Amazon IVS Chat HTTP API\n directly, it’s your responsibility to sign the requests.

\n

You generate a signature using valid AWS credentials for an IAM role that has permission\n to perform the requested action. For example, DeleteMessage requests must be made using an IAM\n role that has the ivschat:DeleteMessage permission.

\n

For more information:

\n \n

\n Amazon Resource Names (ARNs)\n

\n

ARNs uniquely identify AWS resources. An ARN is required when you need to specify a\n resource unambiguously across all of AWS, such as in IAM policies and API calls. For more\n information, see Amazon Resource Names in the AWS General\n Reference.

\n

\n Messaging Endpoints\n

\n
    \n
  • \n

    \n DeleteMessage — Sends an event to a specific room which\n directs clients to delete a specific message; that is, unrender it from view and delete it\n from the client’s chat history. This event’s EventName is\n aws:DELETE_MESSAGE. This replicates the \n DeleteMessage WebSocket operation in the Amazon IVS Chat Messaging API.

    \n
  • \n
  • \n

    \n DisconnectUser — Disconnects all connections using a specified\n user ID from a room. This replicates the \n DisconnectUser WebSocket operation in the Amazon IVS Chat Messaging API.

    \n
  • \n
  • \n

    \n SendEvent — Sends an event to a room. Use this within your\n application’s business logic to send events to clients of a room; e.g., to notify clients\n to change the way the chat UI is rendered.

    \n
  • \n
\n

\n Chat Token Endpoint\n

\n
    \n
  • \n

    \n CreateChatToken — Creates an encrypted token that is used by a chat participant to establish an\n individual WebSocket chat connection to a room. When the token is used to connect to chat,\n the connection is valid for the session duration specified in the request. The token\n becomes invalid at the token-expiration timestamp included in the response.

    \n
  • \n
\n

\n Room Endpoints\n

\n
    \n
  • \n

    \n CreateRoom — Creates a room that allows clients to connect and\n pass messages.

    \n
  • \n
  • \n

    \n DeleteRoom — Deletes the specified room.

    \n
  • \n
  • \n

    \n GetRoom — Gets the specified room.

    \n
  • \n
  • \n

    \n ListRooms — Gets summary information about all your rooms in\n the AWS region where the API request is processed.

    \n
  • \n
  • \n

    \n UpdateRoom — Updates a room’s configuration.

    \n
  • \n
\n

\n Logging Configuration Endpoints\n

\n \n

\n Tags Endpoints\n

\n
    \n
  • \n

    \n ListTagsForResource — Gets information about AWS tags for the\n specified ARN.

    \n
  • \n
  • \n

    \n TagResource — Adds or updates tags for the AWS resource with\n the specified ARN.

    \n
  • \n
  • \n

    \n UntagResource — Removes tags from the resource with the\n specified ARN.

    \n
  • \n
\n

All the above are HTTP operations. There is a separate messaging API\n for managing Chat resources; see the Amazon IVS Chat Messaging API\n Reference.

", + "smithy.api#documentation": "

\n Introduction\n

\n

The Amazon IVS Chat control-plane API enables you to create and manage Amazon IVS Chat\n resources. You also need to integrate with the Amazon IVS Chat Messaging\n API, to enable users to interact with chat rooms in real time.

\n

The API is an AWS regional service. For a list of supported regions and Amazon IVS Chat\n HTTPS service endpoints, see the Amazon IVS Chat information on the Amazon IVS page in the\n AWS General Reference.

\n

This document describes HTTP operations. There is a separate messaging API\n for managing Chat resources; see the Amazon IVS Chat Messaging API\n Reference.

\n

\n Notes on terminology:\n

\n
    \n
  • \n

    You create service applications using the Amazon IVS Chat API. We refer to these as\n applications.

    \n
  • \n
  • \n

    You create front-end client applications (browser and Android/iOS apps) using the\n Amazon IVS Chat Messaging API. We refer to these as clients.

    \n
  • \n
\n

\n Resources\n

\n

The following resources are part of Amazon IVS Chat:

\n
    \n
  • \n

    \n LoggingConfiguration — A configuration that allows customers to store and record sent messages in a chat room. See the Logging Configuration endpoints for more information.

    \n
  • \n
  • \n

    \n Room — The central Amazon IVS Chat resource through\n which clients connect to and exchange chat messages. See the Room endpoints for more\n information.

    \n
  • \n
\n

\n Tagging\n

\n

A tag is a metadata label that you assign to an AWS resource. A tag\n comprises a key and a value, both set by you. For\n example, you might set a tag as topic:nature to label a particular video\n category. See Tagging AWS Resources for more information, including restrictions that apply to\n tags and \"Tag naming limits and requirements\"; Amazon IVS Chat has no service-specific\n constraints beyond what is documented there.

\n

Tags can help you identify and organize your AWS resources. For example, you can use the\n same tag for different resources to indicate that they are related. You can also use tags to\n manage access (see Access Tags).

\n

The Amazon IVS Chat API has these tag-related endpoints: TagResource, UntagResource, and\n ListTagsForResource. The following resource supports tagging: Room.

\n

At most 50 tags can be applied to a resource.

\n

\n API Access Security\n

\n

Your Amazon IVS Chat applications (service applications and clients) must be authenticated\n and authorized to access Amazon IVS Chat resources. Note the differences between these\n concepts:

\n
    \n
  • \n

    \n Authentication is about verifying identity. Requests to the\n Amazon IVS Chat API must be signed to verify your identity.

    \n
  • \n
  • \n

    \n Authorization is about granting permissions. Your IAM roles need\n to have permissions for Amazon IVS Chat API requests.

    \n
  • \n
\n

Users (viewers) connect to a room using secure access tokens that you create using the\n CreateChatToken endpoint through the AWS SDK. You call CreateChatToken for\n every user’s chat session, passing identity and authorization information about the\n user.

\n

\n Signing API Requests\n

\n

HTTP API requests must be signed with an AWS SigV4 signature using your AWS security\n credentials. The AWS Command Line Interface (CLI) and the AWS SDKs take care of signing the\n underlying API calls for you. However, if your application calls the Amazon IVS Chat HTTP API\n directly, it’s your responsibility to sign the requests.

\n

You generate a signature using valid AWS credentials for an IAM role that has permission\n to perform the requested action. For example, DeleteMessage requests must be made using an IAM\n role that has the ivschat:DeleteMessage permission.

\n

For more information:

\n \n

\n Amazon Resource Names (ARNs)\n

\n

ARNs uniquely identify AWS resources. An ARN is required when you need to specify a\n resource unambiguously across all of AWS, such as in IAM policies and API calls. For more\n information, see Amazon Resource Names in the AWS General\n Reference.

", "smithy.api#title": "Amazon Interactive Video Service Chat", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/license-manager-linux-subscriptions.json b/models/license-manager-linux-subscriptions.json index 30c6469b38..1fa1ca48ce 100644 --- a/models/license-manager-linux-subscriptions.json +++ b/models/license-manager-linux-subscriptions.json @@ -7,6 +7,60 @@ "com.amazonaws.licensemanagerlinuxsubscriptions#BoxLong": { "type": "long" }, + "com.amazonaws.licensemanagerlinuxsubscriptions#DeregisterSubscriptionProvider": { + "type": "operation", + "input": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#DeregisterSubscriptionProviderRequest" + }, + "output": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#DeregisterSubscriptionProviderResponse" + }, + "errors": [ + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#InternalServerException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ThrottlingException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Remove a third-party subscription provider from the Bring Your Own License (BYOL) subscriptions \n\t\t\tregistered to your account.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/subscription/DeregisterSubscriptionProvider" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#DeregisterSubscriptionProviderRequest": { + "type": "structure", + "members": { + "SubscriptionProviderArn": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the subscription provider resource to deregister.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#DeregisterSubscriptionProviderResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.licensemanagerlinuxsubscriptions#Filter": { "type": "structure", "members": { @@ -39,6 +93,97 @@ "target": "com.amazonaws.licensemanagerlinuxsubscriptions#Filter" } }, + "com.amazonaws.licensemanagerlinuxsubscriptions#GetRegisteredSubscriptionProvider": { + "type": "operation", + "input": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#GetRegisteredSubscriptionProviderRequest" + }, + "output": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#GetRegisteredSubscriptionProviderResponse" + }, + "errors": [ + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#InternalServerException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ThrottlingException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Get details for a Bring Your Own License (BYOL) subscription that's registered to your account.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/subscription/GetRegisteredSubscriptionProvider" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#GetRegisteredSubscriptionProviderRequest": { + "type": "structure", + "members": { + "SubscriptionProviderArn": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the BYOL registration resource to get details for.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#GetRegisteredSubscriptionProviderResponse": { + "type": "structure", + "members": { + "SubscriptionProviderArn": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the BYOL registration resource specified in the request.

" + } + }, + "SubscriptionProviderSource": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderSource", + "traits": { + "smithy.api#documentation": "

The subscription provider for the BYOL registration resource specified \n\t\t\tin the request.

" + } + }, + "SecretArn": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SecretArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the third-party access secret stored in Secrets Manager for the BYOL \n\t\t\tregistration resource specified in the request.

" + } + }, + "SubscriptionProviderStatus": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderStatus", + "traits": { + "smithy.api#documentation": "

The status of the Linux subscription provider access token from the last \n\t\t\tsuccessful subscription data request.

" + } + }, + "SubscriptionProviderStatusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The detailed message from your subscription provider token status.

" + } + }, + "LastSuccessfulDataRetrievalTime": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The timestamp from the last time License Manager retrieved subscription details \n\t\t\tfrom your registered third-party Linux subscription provider.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.licensemanagerlinuxsubscriptions#GetServiceSettings": { "type": "operation", "input": { @@ -59,7 +204,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the Linux subscriptions service settings.

", + "smithy.api#documentation": "

Lists the Linux subscriptions service settings for your account.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -167,7 +312,37 @@ "SubscriptionName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The name of the subscription being used by the instance.

" + "smithy.api#documentation": "

The name of the license subscription that the instance uses.

" + } + }, + "OsVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The operating system software version that runs on your instance.

" + } + }, + "SubscriptionProviderCreateTime": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The timestamp when you registered the third-party Linux subscription \n\t\t\tprovider for the subscription that the instance uses.

" + } + }, + "SubscriptionProviderUpdateTime": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The timestamp from the last time that the instance synced with the registered \n\t\t\tthird-party Linux subscription provider.

" + } + }, + "DualSubscription": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Indicates that you have two different license subscriptions for \n\t\t\tthe same software on your instance.

" + } + }, + "RegisteredWithSubscriptionProvider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Indicates that your instance uses a BYOL license subscription from \n\t\t\ta third-party Linux subscription provider that you've registered with License Manager.

" } } }, @@ -197,6 +372,12 @@ "type": "service", "version": "2018-05-10", "operations": [ + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#DeregisterSubscriptionProvider" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#GetRegisteredSubscriptionProvider" + }, { "target": "com.amazonaws.licensemanagerlinuxsubscriptions#GetServiceSettings" }, @@ -206,6 +387,21 @@ { "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ListLinuxSubscriptions" }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ListRegisteredSubscriptionProviders" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ListTagsForResource" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#RegisterSubscriptionProvider" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#TagResource" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#UntagResource" + }, { "target": "com.amazonaws.licensemanagerlinuxsubscriptions#UpdateServiceSettings" } @@ -227,15 +423,7 @@ "x-amz-content-sha256", "x-amz-user-agent", "x-amzn-platform-id", - "x-amzn-trace-id", - "content-length", - "x-api-key", - "authorization", - "x-amz-date", - "x-amz-security-token", - "Access-Control-Allow-Headers", - "Access-Control-Allow-Methods", - "Access-Control-Allow-Origin" + "x-amzn-trace-id" ], "additionalExposedHeaders": [ "x-amzn-errortype", @@ -287,7 +475,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -306,7 +493,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -334,13 +520,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -353,7 +540,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -367,7 +553,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -390,7 +575,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -425,11 +609,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -440,16 +622,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -463,14 +648,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -479,15 +662,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -498,16 +680,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -521,7 +706,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -541,11 +725,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -556,20 +738,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -580,18 +764,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] }, @@ -991,19 +1179,19 @@ "Filters": { "target": "com.amazonaws.licensemanagerlinuxsubscriptions#FilterList", "traits": { - "smithy.api#documentation": "

An array of structures that you can use to filter the results to those that match one or\n more sets of key-value pairs that you specify. For example, you can filter by the name of\n AmiID with an optional operator to see subscriptions that match, partially\n match, or don't match a certain Amazon Machine Image (AMI) ID.

\n

The valid names for this filter are:

\n
    \n
  • \n

    \n AmiID\n

    \n
  • \n
  • \n

    \n InstanceID\n

    \n
  • \n
  • \n

    \n AccountID\n

    \n
  • \n
  • \n

    \n Status\n

    \n
  • \n
  • \n

    \n Region\n

    \n
  • \n
  • \n

    \n UsageOperation\n

    \n
  • \n
  • \n

    \n ProductCode\n

    \n
  • \n
  • \n

    \n InstanceType\n

    \n
  • \n
\n

The valid Operators for this filter are:

\n
    \n
  • \n

    \n contains\n

    \n
  • \n
  • \n

    \n equals\n

    \n
  • \n
  • \n

    \n Notequal\n

    \n
  • \n
" + "smithy.api#documentation": "

An array of structures that you can use to filter the results by your specified criteria. \n \tFor example, you can specify Region in the Name, with the \n \tcontains operator to list all subscriptions that match a partial string in the \n \tValue, such as us-west.

\n

For each filter, you can specify one of the following values for the Name key \n \tto streamline results:

\n
    \n
  • \n

    \n AccountID\n

    \n
  • \n
  • \n

    \n AmiID\n

    \n
  • \n
  • \n

    \n DualSubscription\n

    \n
  • \n
  • \n

    \n InstanceID\n

    \n
  • \n
  • \n

    \n InstanceType\n

    \n
  • \n
  • \n

    \n ProductCode\n

    \n
  • \n
  • \n

    \n Region\n

    \n
  • \n
  • \n

    \n Status\n

    \n
  • \n
  • \n

    \n UsageOperation\n

    \n
  • \n
\n

For each filter, you can use one of the following Operator values to \n \t\tdefine the behavior of the filter:

\n
    \n
  • \n

    \n contains\n

    \n
  • \n
  • \n

    \n equals\n

    \n
  • \n
  • \n

    \n Notequal\n

    \n
  • \n
" } }, "MaxResults": { "target": "com.amazonaws.licensemanagerlinuxsubscriptions#BoxInteger", "traits": { - "smithy.api#documentation": "

Maximum number of results to return in a single call.

" + "smithy.api#documentation": "

The maximum items to return in a request.

" } }, "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

Token for the next set of results.

", + "smithy.api#documentation": "

A token to specify where to start paginating. This \n\tis the nextToken from a previously truncated response.

", "smithy.api#length": { "min": 1, "max": 16384 @@ -1027,7 +1215,7 @@ "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

Token for the next set of results.

" + "smithy.api#documentation": "

The next token used for paginated responses. When this \n\tfield isn't empty, there are additional elements that the service hasn't \n\tincluded in this request. Use this token with the next request to retrieve \n\tadditional objects.

" } } } @@ -1079,13 +1267,13 @@ "MaxResults": { "target": "com.amazonaws.licensemanagerlinuxsubscriptions#BoxInteger", "traits": { - "smithy.api#documentation": "

Maximum number of results to return in a single call.

" + "smithy.api#documentation": "

The maximum items to return in a request.

" } }, "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

Token for the next set of results.

", + "smithy.api#documentation": "

A token to specify where to start paginating. This \n\tis the nextToken from a previously truncated response.

", "smithy.api#length": { "min": 1, "max": 16384 @@ -1109,9 +1297,152 @@ "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

Token for the next set of results.

" + "smithy.api#documentation": "

The next token used for paginated responses. When this \n\tfield isn't empty, there are additional elements that the service hasn't \n\tincluded in this request. Use this token with the next request to retrieve \n\tadditional objects.

" + } + } + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#ListRegisteredSubscriptionProviders": { + "type": "operation", + "input": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ListRegisteredSubscriptionProvidersRequest" + }, + "output": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ListRegisteredSubscriptionProvidersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#InternalServerException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ThrottlingException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List Bring Your Own License (BYOL) subscription registration resources for your account.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/subscription/ListRegisteredSubscriptionProviders" + }, + "smithy.api#idempotent": {}, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "RegisteredSubscriptionProviders" + } + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#ListRegisteredSubscriptionProvidersRequest": { + "type": "structure", + "members": { + "SubscriptionProviderSources": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderSourceList", + "traits": { + "smithy.api#documentation": "

To filter your results, specify which subscription providers to return \n\t\t\tin the list.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#BoxInteger", + "traits": { + "smithy.api#documentation": "

The maximum items to return in a request.

", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A token to specify where to start paginating. This \n\tis the nextToken from a previously truncated response.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#ListRegisteredSubscriptionProvidersResponse": { + "type": "structure", + "members": { + "RegisteredSubscriptionProviders": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#RegisteredSubscriptionProviderList", + "traits": { + "smithy.api#documentation": "

The list of BYOL registration resources that fit the criteria \n\t\t\tyou specified in the request.

" + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The next token used for paginated responses. When this \n\tfield isn't empty, there are additional elements that the service hasn't \n\tincluded in this request. Use this token with the next request to retrieve \n\tadditional objects.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#InternalServerException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List the metadata tags that are assigned to the \n\t\t\tspecified Amazon Web Services resource.

", + "smithy.api#http": { + "method": "GET", + "uri": "/tags/{resourceArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource for which to list metadata tags.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#Tags", + "traits": { + "smithy.api#documentation": "

The metadata tags for the requested resource.

" + } + } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.licensemanagerlinuxsubscriptions#Operator": { @@ -1163,6 +1494,157 @@ "target": "smithy.api#String" } }, + "com.amazonaws.licensemanagerlinuxsubscriptions#RegisterSubscriptionProvider": { + "type": "operation", + "input": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#RegisterSubscriptionProviderRequest" + }, + "output": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#RegisterSubscriptionProviderResponse" + }, + "errors": [ + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#InternalServerException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ThrottlingException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Register the supported third-party subscription provider for your Bring Your Own License (BYOL) subscription.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/subscription/RegisterSubscriptionProvider" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#RegisterSubscriptionProviderRequest": { + "type": "structure", + "members": { + "SubscriptionProviderSource": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderSource", + "traits": { + "smithy.api#documentation": "

The supported Linux subscription provider to register.

", + "smithy.api#required": {} + } + }, + "SecretArn": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SecretArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret where you've stored your subscription provider's access token. For \n\t\t\tRHEL subscriptions managed through the Red Hat Subscription Manager (RHSM), the secret contains \n\t\t\tyour Red Hat Offline token.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#Tags", + "traits": { + "smithy.api#documentation": "

The metadata tags to assign to your registered Linux subscription provider \n\t\t\tresource.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#RegisterSubscriptionProviderResponse": { + "type": "structure", + "members": { + "SubscriptionProviderSource": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderSource", + "traits": { + "smithy.api#documentation": "

The Linux subscription provider that you registered.

" + } + }, + "SubscriptionProviderArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Linux subscription provider resource that you registered.

" + } + }, + "SubscriptionProviderStatus": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderStatus", + "traits": { + "smithy.api#documentation": "

Indicates the status of the registration action for the Linux subscription provider \n\t\t\tthat you requested.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#RegisteredSubscriptionProvider": { + "type": "structure", + "members": { + "SubscriptionProviderArn": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Linux subscription provider resource that you registered.

" + } + }, + "SubscriptionProviderSource": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderSource", + "traits": { + "smithy.api#documentation": "

A supported third-party Linux subscription provider. License Manager currently supports \n\t\t\tRed Hat subscriptions.

" + } + }, + "SecretArn": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SecretArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Secrets Manager secret that stores your registered Linux subscription provider \n\t\t\taccess token. For RHEL account subscriptions, this is the offline token.

" + } + }, + "SubscriptionProviderStatus": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderStatus", + "traits": { + "smithy.api#documentation": "

Indicates the status of your registered Linux subscription provider access token \n\t\t\tfrom the last time License Manager retrieved subscription data. For RHEL account subscriptions, \n\t\t\tthis is the status of the offline token.

" + } + }, + "SubscriptionProviderStatusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A detailed message that's associated with your BYOL subscription \n\t\t\tprovider token status.

" + } + }, + "LastSuccessfulDataRetrievalTime": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The timestamp from the last time that License Manager accessed third-party subscription data \n\t\t\tfor your account from your registered Linux subscription provider.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A third-party provider for operating system (OS) platform software and license \n\t\t\tsubscriptions, such as Red Hat. When you register a third-party Linux subscription \n\t\t\tprovider, License Manager can get subscription data from the registered provider.

" + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#RegisteredSubscriptionProviderList": { + "type": "list", + "member": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#RegisteredSubscriptionProvider" + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

Unable to find the requested Amazon Web Services resource.

", + "smithy.api#error": "client" + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#SecretArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:[a-z0-9-\\.]{1,63}:secretsmanager:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:secret:[^/]{1,1023}$" + } + }, "com.amazonaws.licensemanagerlinuxsubscriptions#Status": { "type": "string", "traits": { @@ -1250,6 +1732,139 @@ "target": "com.amazonaws.licensemanagerlinuxsubscriptions#Subscription" } }, + "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{1,510}/[a-z0-9-\\.]{1,510}$" + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderSource": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "RedHat", + "value": "RedHat", + "documentation": "RedHat subscription provider namespace" + } + ] + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderSourceList": { + "type": "list", + "member": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderSource" + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "ACTIVE", + "value": "ACTIVE", + "documentation": "ACTIVE status" + }, + { + "name": "INVALID", + "value": "INVALID", + "documentation": "INVALID status" + }, + { + "name": "PENDING", + "value": "PENDING", + "documentation": "PENDING status" + } + ] + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#TagKeyList": { + "type": "list", + "member": { + "target": "smithy.api#String" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#InternalServerException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Add metadata tags to the specified Amazon Web Services resource.

", + "smithy.api#http": { + "method": "PUT", + "uri": "/tags/{resourceArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#TagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services resource to which to add the specified \n\t\t\tmetadata tags.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#Tags", + "traits": { + "smithy.api#documentation": "

The metadata tags to assign to the Amazon Web Services resource. Tags are \n\t\t\tformatted as key value pairs.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#Tags": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.licensemanagerlinuxsubscriptions#ThrottlingException": { "type": "structure", "members": { @@ -1262,6 +1877,62 @@ "smithy.api#error": "client" } }, + "com.amazonaws.licensemanagerlinuxsubscriptions#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#InternalServerException" + }, + { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Remove one or more metadata tag from the specified Amazon Web Services resource.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/tags/{resourceArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#SubscriptionProviderArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services resource to remove the metadata tags from.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.licensemanagerlinuxsubscriptions#TagKeyList", + "traits": { + "smithy.api#documentation": "

A list of metadata tag keys to remove from the requested \n\t\t\tresource.

", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.licensemanagerlinuxsubscriptions#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.licensemanagerlinuxsubscriptions#UpdateServiceSettings": { "type": "operation", "input": { diff --git a/models/mediaconnect.json b/models/mediaconnect.json index 84998e7d71..5f362e56e9 100644 --- a/models/mediaconnect.json +++ b/models/mediaconnect.json @@ -991,6 +991,13 @@ "smithy.api#documentation": "The name of the VPC interface attachment to use for this output.", "smithy.api#jsonName": "vpcInterfaceAttachment" } + }, + "OutputStatus": { + "target": "com.amazonaws.mediaconnect#OutputStatus", + "traits": { + "smithy.api#documentation": "An indication of whether the new output should be enabled or disabled as soon as it is created. If you don't specify the outputStatus field in your request, MediaConnect sets it to ENABLED.", + "smithy.api#jsonName": "outputStatus" + } } }, "traits": { @@ -5326,6 +5333,9 @@ "name": "mediaconnect" }, "aws.protocols#restJson1": {}, + "smithy.api#auth": [ + "aws.auth#sigv4" + ], "smithy.api#documentation": "API for AWS Elemental MediaConnect", "smithy.api#title": "AWS MediaConnect", "smithy.rules#endpointRuleSet": { @@ -6714,12 +6724,36 @@ "smithy.api#documentation": "The bridge output ports currently in use.", "smithy.api#jsonName": "bridgePorts" } + }, + "OutputStatus": { + "target": "com.amazonaws.mediaconnect#OutputStatus", + "traits": { + "smithy.api#documentation": "An indication of whether the output is transmitting data or not.", + "smithy.api#jsonName": "outputStatus" + } } }, "traits": { "smithy.api#documentation": "The settings for an output." } }, + "com.amazonaws.mediaconnect#OutputStatus": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.mediaconnect#PriceUnits": { "type": "enum", "members": { @@ -9719,6 +9753,13 @@ "smithy.api#documentation": "The name of the VPC interface attachment to use for this output.", "smithy.api#jsonName": "vpcInterfaceAttachment" } + }, + "OutputStatus": { + "target": "com.amazonaws.mediaconnect#OutputStatus", + "traits": { + "smithy.api#documentation": "An indication of whether the output should transmit data or not. If you don't specify the outputStatus field in your request, MediaConnect leaves the value unchanged.", + "smithy.api#jsonName": "outputStatus" + } } }, "traits": { diff --git a/models/medialive.json b/models/medialive.json index 319afa1374..2a9c638ff4 100644 --- a/models/medialive.json +++ b/models/medialive.json @@ -654,6 +654,32 @@ "smithy.api#documentation": "Afd Signaling" } }, + "com.amazonaws.medialive#Algorithm": { + "type": "enum", + "members": { + "AES128": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AES128" + } + }, + "AES192": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AES192" + } + }, + "AES256": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AES256" + } + } + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for Algorithm" + } + }, "com.amazonaws.medialive#AncillarySourceSettings": { "type": "structure", "members": { @@ -4969,6 +4995,13 @@ "traits": { "smithy.api#jsonName": "vpc" } + }, + "SrtSettings": { + "target": "com.amazonaws.medialive#SrtSettingsRequest", + "traits": { + "smithy.api#documentation": "The settings associated with an SRT input.", + "smithy.api#jsonName": "srtSettings" + } } }, "traits": { @@ -7907,6 +7940,13 @@ "traits": { "smithy.api#jsonName": "type" } + }, + "SrtSettings": { + "target": "com.amazonaws.medialive#SrtSettings", + "traits": { + "smithy.api#documentation": "The settings associated with an SRT input.", + "smithy.api#jsonName": "srtSettings" + } } }, "traits": { @@ -9720,7 +9760,7 @@ "Bitrate": { "target": "com.amazonaws.medialive#__double", "traits": { - "smithy.api#documentation": "Average bitrate in bits/second. Valid bitrates depend on the coding mode.\n// * @affectsRightSizing true", + "smithy.api#documentation": "Average bitrate in bits/second. Valid bitrates depend on the coding mode.", "smithy.api#jsonName": "bitrate" } }, @@ -13030,7 +13070,7 @@ "FilterSettings": { "target": "com.amazonaws.medialive#H264FilterSettings", "traits": { - "smithy.api#documentation": "Optional filters that you can apply to an encode.", + "smithy.api#documentation": "Optional. Both filters reduce bandwidth by removing imperceptible details. You can enable one of the filters. We\nrecommend that you try both filters and observe the results to decide which one to use.\n\nThe Temporal Filter reduces bandwidth by removing imperceptible details in the content. It combines perceptual\nfiltering and motion compensated temporal filtering (MCTF). It operates independently of the compression level.\n\nThe Bandwidth Reduction filter is a perceptual filter located within the encoding loop. It adapts to the current\ncompression level to filter imperceptible signals. This filter works only when the resolution is 1080p or lower.", "smithy.api#jsonName": "filterSettings" } }, @@ -13851,7 +13891,7 @@ "FilterSettings": { "target": "com.amazonaws.medialive#H265FilterSettings", "traits": { - "smithy.api#documentation": "Optional filters that you can apply to an encode.", + "smithy.api#documentation": "Optional. Both filters reduce bandwidth by removing imperceptible details. You can enable one of the filters. We\nrecommend that you try both filters and observe the results to decide which one to use.\n\nThe Temporal Filter reduces bandwidth by removing imperceptible details in the content. It combines perceptual\nfiltering and motion compensated temporal filtering (MCTF). It operates independently of the compression level.\n\nThe Bandwidth Reduction filter is a perceptual filter located within the encoding loop. It adapts to the current\ncompression level to filter imperceptible signals. This filter works only when the resolution is 1080p or lower.", "smithy.api#jsonName": "filterSettings" } }, @@ -15587,6 +15627,13 @@ "traits": { "smithy.api#jsonName": "type" } + }, + "SrtSettings": { + "target": "com.amazonaws.medialive#SrtSettings", + "traits": { + "smithy.api#documentation": "The settings associated with an SRT input.", + "smithy.api#jsonName": "srtSettings" + } } }, "traits": { @@ -17464,6 +17511,12 @@ "traits": { "smithy.api#enumValue": "TS_FILE" } + }, + "SRT_CALLER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SRT_CALLER" + } } }, "traits": { @@ -20553,6 +20606,9 @@ "name": "medialive" }, "aws.protocols#restJson1": {}, + "smithy.api#auth": [ + "aws.auth#sigv4" + ], "smithy.api#documentation": "API for AWS Elemental MediaLive", "smithy.api#title": "AWS Elemental MediaLive", "smithy.rules#endpointRuleSet": { @@ -26180,6 +26236,162 @@ "smithy.api#documentation": "Smpte Tt Destination Settings" } }, + "com.amazonaws.medialive#SrtCallerDecryption": { + "type": "structure", + "members": { + "Algorithm": { + "target": "com.amazonaws.medialive#Algorithm", + "traits": { + "smithy.api#documentation": "The algorithm used to encrypt content.", + "smithy.api#jsonName": "algorithm" + } + }, + "PassphraseSecretArn": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "The ARN for the secret in Secrets Manager. Someone in your organization must create a secret and provide you with its ARN. The secret holds the passphrase that MediaLive uses to decrypt the source content.", + "smithy.api#jsonName": "passphraseSecretArn" + } + } + }, + "traits": { + "smithy.api#documentation": "The decryption settings for the SRT caller source. Present only if the source has decryption enabled." + } + }, + "com.amazonaws.medialive#SrtCallerDecryptionRequest": { + "type": "structure", + "members": { + "Algorithm": { + "target": "com.amazonaws.medialive#Algorithm", + "traits": { + "smithy.api#documentation": "The algorithm used to encrypt content.", + "smithy.api#jsonName": "algorithm" + } + }, + "PassphraseSecretArn": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "The ARN for the secret in Secrets Manager. Someone in your organization must create a secret and provide you with its ARN. This secret holds the passphrase that MediaLive will use to decrypt the source content.", + "smithy.api#jsonName": "passphraseSecretArn" + } + } + }, + "traits": { + "smithy.api#documentation": "Complete these parameters only if the content is encrypted." + } + }, + "com.amazonaws.medialive#SrtCallerSource": { + "type": "structure", + "members": { + "Decryption": { + "target": "com.amazonaws.medialive#SrtCallerDecryption", + "traits": { + "smithy.api#jsonName": "decryption" + } + }, + "MinimumLatency": { + "target": "com.amazonaws.medialive#__integer", + "traits": { + "smithy.api#documentation": "The preferred latency (in milliseconds) for implementing packet loss and recovery. Packet recovery is a key feature of SRT.", + "smithy.api#jsonName": "minimumLatency" + } + }, + "SrtListenerAddress": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "The IP address at the upstream system (the listener) that MediaLive (the caller) connects to.", + "smithy.api#jsonName": "srtListenerAddress" + } + }, + "SrtListenerPort": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "The port at the upstream system (the listener) that MediaLive (the caller) connects to.", + "smithy.api#jsonName": "srtListenerPort" + } + }, + "StreamId": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "The stream ID, if the upstream system uses this identifier.", + "smithy.api#jsonName": "streamId" + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration for a source that uses SRT as the connection protocol. In terms of establishing the connection, MediaLive is always caller and the upstream system is always the listener. In terms of transmission of the source content, MediaLive is always the receiver and the upstream system is always the sender." + } + }, + "com.amazonaws.medialive#SrtCallerSourceRequest": { + "type": "structure", + "members": { + "Decryption": { + "target": "com.amazonaws.medialive#SrtCallerDecryptionRequest", + "traits": { + "smithy.api#jsonName": "decryption" + } + }, + "MinimumLatency": { + "target": "com.amazonaws.medialive#__integer", + "traits": { + "smithy.api#documentation": "The preferred latency (in milliseconds) for implementing packet loss and recovery. Packet recovery is a key feature of SRT. Obtain this value from the operator at the upstream system.", + "smithy.api#jsonName": "minimumLatency" + } + }, + "SrtListenerAddress": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "The IP address at the upstream system (the listener) that MediaLive (the caller) will connect to.", + "smithy.api#jsonName": "srtListenerAddress" + } + }, + "SrtListenerPort": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "The port at the upstream system (the listener) that MediaLive (the caller) will connect to.", + "smithy.api#jsonName": "srtListenerPort" + } + }, + "StreamId": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "This value is required if the upstream system uses this identifier because without it, the SRT handshake between MediaLive (the caller) and the upstream system (the listener) might fail.", + "smithy.api#jsonName": "streamId" + } + } + }, + "traits": { + "smithy.api#documentation": "Configures the connection for a source that uses SRT as the connection protocol. In terms of establishing the connection, MediaLive is always the caller and the upstream system is always the listener. In terms of transmission of the source content, MediaLive is always the receiver and the upstream system is always the sender." + } + }, + "com.amazonaws.medialive#SrtSettings": { + "type": "structure", + "members": { + "SrtCallerSources": { + "target": "com.amazonaws.medialive#__listOfSrtCallerSource", + "traits": { + "smithy.api#jsonName": "srtCallerSources" + } + } + }, + "traits": { + "smithy.api#documentation": "The configured sources for this SRT input." + } + }, + "com.amazonaws.medialive#SrtSettingsRequest": { + "type": "structure", + "members": { + "SrtCallerSources": { + "target": "com.amazonaws.medialive#__listOfSrtCallerSourceRequest", + "traits": { + "smithy.api#jsonName": "srtCallerSources" + } + } + }, + "traits": { + "smithy.api#documentation": "Configures the sources for this SRT input. For a single-pipeline input, include one srtCallerSource in the array. For a standard-pipeline input, include two srtCallerSource." + } + }, "com.amazonaws.medialive#StandardHlsSettings": { "type": "structure", "members": { @@ -30027,6 +30239,13 @@ "smithy.api#documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.", "smithy.api#jsonName": "sources" } + }, + "SrtSettings": { + "target": "com.amazonaws.medialive#SrtSettingsRequest", + "traits": { + "smithy.api#documentation": "The settings associated with an SRT input.", + "smithy.api#jsonName": "srtSettings" + } } }, "traits": { @@ -32033,6 +32252,24 @@ "smithy.api#documentation": "Placeholder documentation for __listOfSignalMapSummary" } }, + "com.amazonaws.medialive#__listOfSrtCallerSource": { + "type": "list", + "member": { + "target": "com.amazonaws.medialive#SrtCallerSource" + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for __listOfSrtCallerSource" + } + }, + "com.amazonaws.medialive#__listOfSrtCallerSourceRequest": { + "type": "list", + "member": { + "target": "com.amazonaws.medialive#SrtCallerSourceRequest" + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for __listOfSrtCallerSourceRequest" + } + }, "com.amazonaws.medialive#__listOfThumbnail": { "type": "list", "member": { diff --git a/models/opensearch.json b/models/opensearch.json index c20cf2b097..a04daec615 100644 --- a/models/opensearch.json +++ b/models/opensearch.json @@ -29,6 +29,51 @@ ] }, "shapes": { + "com.amazonaws.opensearch#AIMLOptionsInput": { + "type": "structure", + "members": { + "NaturalLanguageQueryGenerationOptions": { + "target": "com.amazonaws.opensearch#NaturalLanguageQueryGenerationOptionsInput", + "traits": { + "smithy.api#documentation": "

Container for parameters required for natural language query generation on the specified domain.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Container for parameters required to enable all machine learning features.

" + } + }, + "com.amazonaws.opensearch#AIMLOptionsOutput": { + "type": "structure", + "members": { + "NaturalLanguageQueryGenerationOptions": { + "target": "com.amazonaws.opensearch#NaturalLanguageQueryGenerationOptionsOutput", + "traits": { + "smithy.api#documentation": "

Container for parameters required for natural language query generation on the specified domain.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Container for parameters representing the state of machine learning features on the specified domain.

" + } + }, + "com.amazonaws.opensearch#AIMLOptionsStatus": { + "type": "structure", + "members": { + "Options": { + "target": "com.amazonaws.opensearch#AIMLOptionsOutput", + "traits": { + "smithy.api#documentation": "

Machine learning options on the specified domain.

" + } + }, + "Status": { + "target": "com.amazonaws.opensearch#OptionStatus" + } + }, + "traits": { + "smithy.api#documentation": "

The status of machine learning options on the specified domain.

" + } + }, "com.amazonaws.opensearch#ARN": { "type": "string", "traits": { @@ -3318,6 +3363,12 @@ "traits": { "smithy.api#documentation": "

Software update options for the domain.

" } + }, + "AIMLOptions": { + "target": "com.amazonaws.opensearch#AIMLOptionsInput", + "traits": { + "smithy.api#documentation": "

Options for all machine learning features for the specified domain.

" + } } }, "traits": { @@ -5710,6 +5761,12 @@ "traits": { "smithy.api#documentation": "

Information about the domain properties that are currently being modified.

" } + }, + "AIMLOptions": { + "target": "com.amazonaws.opensearch#AIMLOptionsStatus", + "traits": { + "smithy.api#documentation": "

Container for parameters required to enable all machine learning features.

" + } } }, "traits": { @@ -6386,6 +6443,12 @@ "traits": { "smithy.api#documentation": "

Information about the domain properties that are currently being modified.

" } + }, + "AIMLOptions": { + "target": "com.amazonaws.opensearch#AIMLOptionsOutput", + "traits": { + "smithy.api#documentation": "

Container for parameters required to enable all machine learning features.

" + } } }, "traits": { @@ -9036,6 +9099,104 @@ "target": "com.amazonaws.opensearch#ModifyingProperties" } }, + "com.amazonaws.opensearch#NaturalLanguageQueryGenerationCurrentState": { + "type": "enum", + "members": { + "NotEnabled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_ENABLED" + } + }, + "EnableComplete": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLE_COMPLETE" + } + }, + "EnableInProgress": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLE_IN_PROGRESS" + } + }, + "EnableFailed": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLE_FAILED" + } + }, + "DisableComplete": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLE_COMPLETE" + } + }, + "DisableInProgress": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLE_IN_PROGRESS" + } + }, + "DisableFailed": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLE_FAILED" + } + } + } + }, + "com.amazonaws.opensearch#NaturalLanguageQueryGenerationDesiredState": { + "type": "enum", + "members": { + "Enabled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "Disabled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.opensearch#NaturalLanguageQueryGenerationOptionsInput": { + "type": "structure", + "members": { + "DesiredState": { + "target": "com.amazonaws.opensearch#NaturalLanguageQueryGenerationDesiredState", + "traits": { + "smithy.api#documentation": "

The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Container for parameters required to enable the natural language query generation feature.

" + } + }, + "com.amazonaws.opensearch#NaturalLanguageQueryGenerationOptionsOutput": { + "type": "structure", + "members": { + "DesiredState": { + "target": "com.amazonaws.opensearch#NaturalLanguageQueryGenerationDesiredState", + "traits": { + "smithy.api#documentation": "

The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.

" + } + }, + "CurrentState": { + "target": "com.amazonaws.opensearch#NaturalLanguageQueryGenerationCurrentState", + "traits": { + "smithy.api#documentation": "

The current state of the natural language query generation feature, indicating completion, in progress, or failure.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Container for parameters representing the state of the natural language query generation feature on the specified domain.

" + } + }, "com.amazonaws.opensearch#NextToken": { "type": "string", "traits": { @@ -12221,6 +12382,12 @@ "traits": { "smithy.api#documentation": "

Service software update options for the domain.

" } + }, + "AIMLOptions": { + "target": "com.amazonaws.opensearch#AIMLOptionsInput", + "traits": { + "smithy.api#documentation": "

Options for all machine learning features for the specified domain.

" + } } }, "traits": { diff --git a/models/pinpoint.json b/models/pinpoint.json index 5dfd2d90a1..88844d00aa 100644 --- a/models/pinpoint.json +++ b/models/pinpoint.json @@ -8034,7 +8034,22 @@ "method": "GET", "uri": "/v1/apps/{ApplicationId}", "code": 200 - } + }, + "smithy.test#smokeTests": [ + { + "id": "GetAppFailure", + "params": { + "ApplicationId": "InvalidApplicationId" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.pinpoint#GetAppRequest": { @@ -8286,7 +8301,20 @@ "method": "GET", "uri": "/v1/apps", "code": 200 - } + }, + "smithy.test#smokeTests": [ + { + "id": "GetAppsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.pinpoint#GetAppsRequest": { @@ -15202,6 +15230,9 @@ "name": "mobiletargeting" }, "aws.protocols#restJson1": {}, + "smithy.api#auth": [ + "aws.auth#sigv4" + ], "smithy.api#documentation": "

Doc Engage API - Amazon Pinpoint API

", "smithy.api#title": "Amazon Pinpoint", "smithy.rules#endpointRuleSet": { diff --git a/models/qapps.json b/models/qapps.json new file mode 100644 index 0000000000..ace21fd4a3 --- /dev/null +++ b/models/qapps.json @@ -0,0 +1,5254 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.qapps#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The client is not authorized to perform the requested operation.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.qapps#AmazonResourceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1011 + } + } + }, + "com.amazonaws.qapps#AppArn": { + "type": "string" + }, + "com.amazonaws.qapps#AppDefinition": { + "type": "structure", + "members": { + "appDefinitionVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The version of the app definition schema or specification.

", + "smithy.api#required": {} + } + }, + "cards": { + "target": "com.amazonaws.qapps#CardModelList", + "traits": { + "smithy.api#documentation": "

The cards that make up the Q App, such as text input, file upload, or query cards.

", + "smithy.api#required": {} + } + }, + "canEdit": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

A flag indicating whether the Q App's definition can be edited by the user.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition of the Q App, specifying the cards and flow.

" + } + }, + "com.amazonaws.qapps#AppDefinitionInput": { + "type": "structure", + "members": { + "cards": { + "target": "com.amazonaws.qapps#CardList", + "traits": { + "smithy.api#documentation": "

The cards that make up the Q App definition.

", + "smithy.api#required": {} + } + }, + "initialPrompt": { + "target": "com.amazonaws.qapps#InitialPrompt", + "traits": { + "smithy.api#documentation": "

The initial prompt displayed when the Q App is started.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The input for defining an Q App.

" + } + }, + "com.amazonaws.qapps#AppRequiredCapabilities": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#AppRequiredCapability" + } + }, + "com.amazonaws.qapps#AppRequiredCapability": { + "type": "enum", + "members": { + "FILE_UPLOAD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FileUpload" + } + }, + "CREATOR_MODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CreatorMode" + } + }, + "RETRIEVAL_MODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RetrievalMode" + } + }, + "PLUGIN_MODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PluginMode" + } + } + } + }, + "com.amazonaws.qapps#AppStatus": { + "type": "enum", + "members": { + "PUBLISHED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PUBLISHED" + } + }, + "DRAFT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DRAFT" + } + }, + "DELETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETED" + } + } + } + }, + "com.amazonaws.qapps#AppVersion": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 2147483647 + } + } + }, + "com.amazonaws.qapps#AssociateLibraryItemReview": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#AssociateLibraryItemReviewInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Associates a rating or review for a library item with the user submitting \n the request. This increments the rating count for the specified library item.

", + "smithy.api#examples": [ + { + "title": "Increase the rating counter by 1 for the related app for this user", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "libraryItemId": "cb9ecf72-8563-450d-9db9-994f98297316" + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/catalog.associateItemRating" + } + } + }, + "com.amazonaws.qapps#AssociateLibraryItemReviewInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier for the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "libraryItemId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the library item to associate the review with.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#AssociateQAppWithUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#AssociateQAppWithUserInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation creates a link between the user's identity calling the operation and a\n specific Q App. This is useful to mark the Q App as a favorite for\n the user if the user doesn't own the Amazon Q App so they can still run it and see it in their\n inventory of Q Apps.

", + "smithy.api#examples": [ + { + "title": "Links an Amazon Q App to the invoker's list of apps", + "input": { + "appId": "393e77fb-0a30-4f47-ad30-75d71aeaed8a", + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f" + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/apps.install" + } + } + }, + "com.amazonaws.qapps#AssociateQAppWithUserInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Q App to associate with the user.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#AttributeFilter": { + "type": "structure", + "members": { + "andAllFilters": { + "target": "com.amazonaws.qapps#AttributeFilters", + "traits": { + "smithy.api#documentation": "

Performs a logical AND operation on all supplied filters.

" + } + }, + "orAllFilters": { + "target": "com.amazonaws.qapps#AttributeFilters", + "traits": { + "smithy.api#documentation": "

Performs a logical OR operation on all supplied filters.

" + } + }, + "notFilter": { + "target": "com.amazonaws.qapps#AttributeFilter", + "traits": { + "smithy.api#documentation": "

Performs a logical NOT operation on all supplied filters.

" + } + }, + "equalsTo": { + "target": "com.amazonaws.qapps#DocumentAttribute", + "traits": { + "smithy.api#documentation": "

Performs an equals operation on two document attributes or metadata fields. Supported\n for the following document attribute value types: dateValue,\n longValue, stringListValue and\n stringValue.

" + } + }, + "containsAll": { + "target": "com.amazonaws.qapps#DocumentAttribute", + "traits": { + "smithy.api#documentation": "

Returns true when a document contains all the specified document\n attributes or metadata fields. Supported for the following document attribute value types:\n stringListValue.

" + } + }, + "containsAny": { + "target": "com.amazonaws.qapps#DocumentAttribute", + "traits": { + "smithy.api#documentation": "

Returns true when a document contains any of the specified document\n attributes or metadata fields. Supported for the following document attribute value types:\n stringListValue.

" + } + }, + "greaterThan": { + "target": "com.amazonaws.qapps#DocumentAttribute", + "traits": { + "smithy.api#documentation": "

Performs a greater than operation on two document attributes or metadata fields.\n Supported for the following document attribute value types: dateValue\n and longValue.

" + } + }, + "greaterThanOrEquals": { + "target": "com.amazonaws.qapps#DocumentAttribute", + "traits": { + "smithy.api#documentation": "

Performs a greater than or equals operation on two document attributes or metadata\n fields. Supported for the following document attribute value types: dateValue\n and longValue.

" + } + }, + "lessThan": { + "target": "com.amazonaws.qapps#DocumentAttribute", + "traits": { + "smithy.api#documentation": "

Performs a less than operation on two document attributes or metadata fields.\n Supported for the following document attribute value types: dateValue\n and longValue.

" + } + }, + "lessThanOrEquals": { + "target": "com.amazonaws.qapps#DocumentAttribute", + "traits": { + "smithy.api#documentation": "

Performs a less than or equals operation on two document attributes or metadata\n fields.Supported for the following document attribute value type: dateValue\n and longValue.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The filter criteria used on responses based on document attributes or metadata fields.

" + } + }, + "com.amazonaws.qapps#AttributeFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#AttributeFilter" + } + }, + "com.amazonaws.qapps#Card": { + "type": "union", + "members": { + "textInput": { + "target": "com.amazonaws.qapps#TextInputCard", + "traits": { + "smithy.api#documentation": "

A container for the properties of the text input card.

" + } + }, + "qQuery": { + "target": "com.amazonaws.qapps#QQueryCard", + "traits": { + "smithy.api#documentation": "

A container for the properties of the query card.

" + } + }, + "qPlugin": { + "target": "com.amazonaws.qapps#QPluginCard", + "traits": { + "smithy.api#documentation": "

A container for the properties of the plugin card.

" + } + }, + "fileUpload": { + "target": "com.amazonaws.qapps#FileUploadCard", + "traits": { + "smithy.api#documentation": "

A container for the properties of the file upload card.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A card representing a component or step in an Amazon Q App's flow.

" + } + }, + "com.amazonaws.qapps#CardInput": { + "type": "union", + "members": { + "textInput": { + "target": "com.amazonaws.qapps#TextInputCardInput", + "traits": { + "smithy.api#documentation": "

A container for the properties of the text input card.

" + } + }, + "qQuery": { + "target": "com.amazonaws.qapps#QQueryCardInput", + "traits": { + "smithy.api#documentation": "

A container for the properties of the query input card.

" + } + }, + "qPlugin": { + "target": "com.amazonaws.qapps#QPluginCardInput", + "traits": { + "smithy.api#documentation": "

A container for the properties of the plugin input card.

" + } + }, + "fileUpload": { + "target": "com.amazonaws.qapps#FileUploadCardInput", + "traits": { + "smithy.api#documentation": "

A container for the properties of the file upload input card.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The properties defining an input card in an Amazon Q App.

" + } + }, + "com.amazonaws.qapps#CardList": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#CardInput" + }, + "traits": { + "smithy.api#length": { + "max": 20 + } + } + }, + "com.amazonaws.qapps#CardModelList": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#Card" + }, + "traits": { + "smithy.api#length": { + "max": 20 + } + } + }, + "com.amazonaws.qapps#CardOutputSource": { + "type": "enum", + "members": { + "APPROVED_SOURCES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "approved-sources" + } + }, + "LLM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "llm" + } + } + } + }, + "com.amazonaws.qapps#CardStatus": { + "type": "structure", + "members": { + "currentState": { + "target": "com.amazonaws.qapps#ExecutionStatus", + "traits": { + "smithy.api#documentation": "

The current state of the card.

", + "smithy.api#required": {} + } + }, + "currentValue": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The current value or result associated with the card.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The current status and value of a card in an active Amazon Q App session.

" + } + }, + "com.amazonaws.qapps#CardStatusMap": { + "type": "map", + "key": { + "target": "com.amazonaws.qapps#UUID" + }, + "value": { + "target": "com.amazonaws.qapps#CardStatus" + } + }, + "com.amazonaws.qapps#CardType": { + "type": "enum", + "members": { + "TEXT_INPUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "text-input" + } + }, + "Q_QUERY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "q-query" + } + }, + "FILE_UPLOAD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "file-upload" + } + }, + "Q_PLUGIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "q-plugin" + } + } + } + }, + "com.amazonaws.qapps#CardValue": { + "type": "structure", + "members": { + "cardId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the card.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value or result associated with the card.

", + "smithy.api#length": { + "max": 5000 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The value or result associated with a card in a Amazon Q App session.

" + } + }, + "com.amazonaws.qapps#CardValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#CardValue" + }, + "traits": { + "smithy.api#length": { + "max": 20 + } + } + }, + "com.amazonaws.qapps#Category": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the category.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The title or name of the category.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A category used to classify and filter library items for Amazon Q Apps.

" + } + }, + "com.amazonaws.qapps#CategoryIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#UUID" + }, + "traits": { + "smithy.api#length": { + "max": 3 + } + } + }, + "com.amazonaws.qapps#CategoryList": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#Category" + }, + "traits": { + "smithy.api#length": { + "max": 3 + } + } + }, + "com.amazonaws.qapps#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the resource

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the resource

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The requested operation could not be completed due to a \n conflict with the current state of the resource.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.qapps#ContentTooLargeException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the resource

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the resource

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The requested operation could not be completed because \n the content exceeds the maximum allowed size.

", + "smithy.api#error": "client", + "smithy.api#httpError": 413 + } + }, + "com.amazonaws.qapps#ConversationMessage": { + "type": "structure", + "members": { + "body": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The text content of the conversation message.

", + "smithy.api#length": { + "max": 7000 + }, + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.qapps#Sender", + "traits": { + "smithy.api#documentation": "

The type of the conversation message.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A message in a conversation, used as input for generating an Amazon Q App definition.

" + } + }, + "com.amazonaws.qapps#CreateLibraryItem": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#CreateLibraryItemInput" + }, + "output": { + "target": "com.amazonaws.qapps#CreateLibraryItemOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a new library item for an Amazon Q App, allowing it to be discovered and \n used by other allowed users.

", + "smithy.api#examples": [ + { + "title": "Create a Library Item", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "appId": "7a11f34b-42d4-4bc8-b668-ae4a788dae1e", + "appVersion": 6, + "categories": [ + "9c871ed4-1c41-4065-aefe-321cd4b61cf8" + ] + }, + "output": { + "libraryItemId": "cb9ecf72-8563-450d-9db9-994f98297316", + "status": "PUBLISHED", + "createdAt": "2024-05-21T23:17:27.350Z", + "createdBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "updatedAt": "2024-05-21T23:17:27.350Z", + "updatedBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "ratingCount": 0 + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/catalog.createItem" + } + } + }, + "com.amazonaws.qapps#CreateLibraryItemInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q App to publish to the library.

", + "smithy.api#required": {} + } + }, + "appVersion": { + "target": "com.amazonaws.qapps#AppVersion", + "traits": { + "smithy.api#documentation": "

The version of the Amazon Q App to publish to the library.

", + "smithy.api#required": {} + } + }, + "categories": { + "target": "com.amazonaws.qapps#CategoryIdList", + "traits": { + "smithy.api#documentation": "

The categories to associate with the library item for easier discovery.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#CreateLibraryItemOutput": { + "type": "structure", + "members": { + "libraryItemId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the new library item.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status of the new library item, such as \"Published\".

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the library item was created.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who created the library item.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the library item was last updated.

" + } + }, + "updatedBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who last updated the library item.

" + } + }, + "ratingCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of ratings the library item has received from users.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#CreateQApp": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#CreateQAppInput" + }, + "output": { + "target": "com.amazonaws.qapps#CreateQAppOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#ConflictException" + }, + { + "target": "com.amazonaws.qapps#ContentTooLargeException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a new Amazon Q App based on the provided definition. The Q App definition specifies\n the cards and flow of the Q App. This operation also calculates the dependencies between the cards\n by inspecting the references in the prompts.

", + "smithy.api#examples": [ + { + "title": "A basic application with 1 text input card and 1 output card", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "title": "Color Palette Generator", + "appDefinition": { + "cards": [ + { + "textInput": { + "type": "text-input", + "title": "Color Base", + "id": "4cf94d96-8819-45c2-98cc-58c56b35c72f" + } + }, + { + "qQuery": { + "type": "q-query", + "title": "Recommended Palette", + "id": "18870b94-1e63-40e0-8c12-669c90ac5acc", + "prompt": "Recommend me a list of colors that go well with @4cf94d96-8819-45c2-98cc-58c56b35c72f" + } + } + ], + "initialPrompt": "Create an app that recommend a list of colors based on input." + } + }, + "output": { + "appId": "7212ff04-de7b-4831-bd80-45d6975ba1b0", + "appArn": "arn:aws:qapps:us-west-2:123456789012:app/7212ff04-de7b-4831-bd80-45d6975ba1b0", + "title": "Color Palette Generator", + "initialPrompt": "Create an app that recommend a list of colors based on input.", + "appVersion": 1, + "status": "DRAFT", + "createdAt": "2024-05-14T00:11:54.232Z", + "createdBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "updatedAt": "2024-05-14T00:13:26.168Z", + "updatedBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "requiredCapabilities": [ + "CreatorMode" + ] + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/apps.create" + } + } + }, + "com.amazonaws.qapps#CreateQAppInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title of the new Q App.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.qapps#Description", + "traits": { + "smithy.api#documentation": "

The description of the new Q App.

" + } + }, + "appDefinition": { + "target": "com.amazonaws.qapps#AppDefinitionInput", + "traits": { + "smithy.api#documentation": "

The definition of the new Q App, specifying the cards and flow.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.qapps#TagMap", + "traits": { + "smithy.api#documentation": "

Optional tags to associate with the new Q App.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#CreateQAppOutput": { + "type": "structure", + "members": { + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the new Q App.

", + "smithy.api#required": {} + } + }, + "appArn": { + "target": "com.amazonaws.qapps#AppArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the new Q App.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title of the new Q App.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.qapps#Description", + "traits": { + "smithy.api#documentation": "

The description of the new Q App.

" + } + }, + "initialPrompt": { + "target": "com.amazonaws.qapps#InitialPrompt", + "traits": { + "smithy.api#documentation": "

The initial prompt displayed when the Q App is started.

" + } + }, + "appVersion": { + "target": "com.amazonaws.qapps#AppVersion", + "traits": { + "smithy.api#documentation": "

The version of the new Q App.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.qapps#AppStatus", + "traits": { + "smithy.api#documentation": "

The status of the new Q App, such as \"Created\".

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the Q App was created.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who created the Q App.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the Q App was last updated.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who last updated the Q App.

", + "smithy.api#required": {} + } + }, + "requiredCapabilities": { + "target": "com.amazonaws.qapps#AppRequiredCapabilities", + "traits": { + "smithy.api#documentation": "

The capabilities required to run the Q App, such as file upload or third-party integrations.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#Default": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 500 + } + } + }, + "com.amazonaws.qapps#DeleteLibraryItem": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#DeleteLibraryItemInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a library item for an Amazon Q App, removing it from the library \n so it can no longer be discovered or used by other users.

", + "smithy.api#examples": [ + { + "title": "Delete a library item", + "input": { + "instanceId": "3642ba81-344c-42fd-a480-9119a5a5f26b", + "libraryItemId": "72088fd4-78b6-43da-bfb8-8621323c3cfb" + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/catalog.deleteItem" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qapps#DeleteLibraryItemInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "libraryItemId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the library item to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#DeleteQApp": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#DeleteQAppInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an Amazon Q App owned by the user. If the Q App was previously published to the library, it is also removed from the library.

", + "smithy.api#examples": [ + { + "title": "Delete an Amazon Q App", + "input": { + "appId": "393e77fb-0a30-4f47-ad30-75d71aeaed8a", + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f" + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/apps.delete" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qapps#DeleteQAppInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#DependencyList": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.qapps#Description": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 500 + } + } + }, + "com.amazonaws.qapps#DisassociateLibraryItemReview": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#DisassociateLibraryItemReviewInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Removes a rating or review previously submitted by the user for a library item.

", + "smithy.api#examples": [ + { + "title": "Decrease the rating counter by 1 for the related app for this user", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "libraryItemId": "cb9ecf72-8563-450d-9db9-994f98297316" + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/catalog.disassociateItemRating" + } + } + }, + "com.amazonaws.qapps#DisassociateLibraryItemReviewInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "libraryItemId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the library item to remove the review from.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#DisassociateQAppFromUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#DisassociateQAppFromUserInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Disassociates a Q App from a user removing the user's access to run the\n Q App.

", + "smithy.api#examples": [ + { + "title": "Unlinks an Amazon Q App from the invoker's list of apps", + "input": { + "appId": "393e77fb-0a30-4f47-ad30-75d71aeaed8a", + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f" + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/apps.uninstall" + } + } + }, + "com.amazonaws.qapps#DisassociateQAppFromUserInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App to disassociate from the user.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#DocumentAttribute": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.qapps#DocumentAttributeKey", + "traits": { + "smithy.api#documentation": "

The identifier for the attribute.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.qapps#DocumentAttributeValue", + "traits": { + "smithy.api#documentation": "

The value of the attribute.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A document attribute or metadata field.

" + } + }, + "com.amazonaws.qapps#DocumentAttributeKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + }, + "smithy.api#pattern": "^[a-zA-Z0-9_][a-zA-Z0-9_-]*$" + } + }, + "com.amazonaws.qapps#DocumentAttributeStringListValue": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#PlatoString" + } + }, + "com.amazonaws.qapps#DocumentAttributeStringValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + } + } + }, + "com.amazonaws.qapps#DocumentAttributeValue": { + "type": "union", + "members": { + "stringValue": { + "target": "com.amazonaws.qapps#DocumentAttributeStringValue", + "traits": { + "smithy.api#documentation": "

A string.

", + "smithy.api#length": { + "max": 2048 + } + } + }, + "stringListValue": { + "target": "com.amazonaws.qapps#DocumentAttributeStringListValue", + "traits": { + "smithy.api#documentation": "

A list of strings.

" + } + }, + "longValue": { + "target": "com.amazonaws.qapps#Long", + "traits": { + "smithy.api#documentation": "

A long integer value.

" + } + }, + "dateValue": { + "target": "com.amazonaws.qapps#Timestamp", + "traits": { + "smithy.api#documentation": "

A date expressed as an ISO 8601 string.

\n

It's important for the time zone to be included in the ISO 8601 date-time format. For\n example, 2012-03-25T12:30:10+01:00 is the ISO 8601 date-time format for March 25th 2012\n at 12:30PM (plus 10 seconds) in Central European Time.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The value of a document attribute. You can only provide one value for a document\n attribute.

" + } + }, + "com.amazonaws.qapps#DocumentScope": { + "type": "enum", + "members": { + "APPLICATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "APPLICATION" + } + }, + "SESSION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SESSION" + } + } + } + }, + "com.amazonaws.qapps#ExecutionStatus": { + "type": "enum", + "members": { + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "WAITING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WAITING" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + } + } + }, + "com.amazonaws.qapps#FileUploadCard": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the file upload card.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title of the file upload card.

", + "smithy.api#required": {} + } + }, + "dependencies": { + "target": "com.amazonaws.qapps#DependencyList", + "traits": { + "smithy.api#documentation": "

Any dependencies or requirements for the file upload card.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.qapps#CardType", + "traits": { + "smithy.api#documentation": "

The type of the card.

", + "smithy.api#required": {} + } + }, + "filename": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the file being uploaded.

" + } + }, + "fileId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the file associated with the card.

" + } + }, + "allowOverride": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

A flag indicating if the user can override the default file for the upload card.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A card in an Amazon Q App that allows the user to upload a file.

" + } + }, + "com.amazonaws.qapps#FileUploadCardInput": { + "type": "structure", + "members": { + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title or label of the file upload card.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the file upload card.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.qapps#CardType", + "traits": { + "smithy.api#default": "file-upload", + "smithy.api#documentation": "

The type of the card.

", + "smithy.api#required": {} + } + }, + "filename": { + "target": "com.amazonaws.qapps#Filename", + "traits": { + "smithy.api#documentation": "

The default filename to use for the file upload card.

" + } + }, + "fileId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The identifier of a pre-uploaded file associated with the card.

" + } + }, + "allowOverride": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

A flag indicating if the user can override the default file for the upload card.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Represents a file upload card. It can optionally \n receive a filename and fileId to set a \n default file. If not received, the user must provide the file \n when the Q App runs.

" + } + }, + "com.amazonaws.qapps#Filename": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 100 + } + } + }, + "com.amazonaws.qapps#GetLibraryItem": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#GetLibraryItemInput" + }, + "output": { + "target": "com.amazonaws.qapps#GetLibraryItemOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves details about a library item for an Amazon Q App, including its metadata, \n categories, ratings, and usage statistics.

", + "smithy.api#examples": [ + { + "title": "Retrieve a library item", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "libraryItemId": "18cbebaa-196a-4aa5-a840-88d548e07f8f" + }, + "output": { + "libraryItemId": "18cbebaa-196a-4aa5-a840-88d548e07f8f", + "appId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "appVersion": 1, + "categories": [ + { + "id": "9c871ed4-1c41-4065-aefe-321cd4b61cf8", + "title": "HR" + }, + { + "id": "fdc4b483-c4e2-44c9-b4b2-6c850bbdb579", + "title": "General" + }, + { + "id": "c1c4e374-118c-446f-81fb-cba6225d88da", + "title": "IT" + } + ], + "status": "PUBLISHED", + "ratingCount": 0, + "isRatedByUser": false, + "createdBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "createdAt": "2024-05-08T16:09:56.080Z", + "updatedBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "updatedAt": "2024-05-08T16:09:56.080Z", + "userCount": 1 + } + } + ], + "smithy.api#http": { + "method": "GET", + "uri": "/catalog.getItem" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.qapps#GetLibraryItemInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "libraryItemId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the library item to retrieve.

", + "smithy.api#httpQuery": "libraryItemId", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q App associated with the library item.

", + "smithy.api#httpQuery": "appId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#GetLibraryItemOutput": { + "type": "structure", + "members": { + "libraryItemId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the library item.

", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App associated with the library item.

", + "smithy.api#required": {} + } + }, + "appVersion": { + "target": "com.amazonaws.qapps#AppVersion", + "traits": { + "smithy.api#documentation": "

The version of the Q App associated with the library item.

", + "smithy.api#required": {} + } + }, + "categories": { + "target": "com.amazonaws.qapps#CategoryList", + "traits": { + "smithy.api#documentation": "

The categories associated with the library item for discovery.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status of the library item, such as \"Published\".

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the library item was created.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who created the library item.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the library item was last updated.

" + } + }, + "updatedBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who last updated the library item.

" + } + }, + "ratingCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of ratings the library item has received from users.

", + "smithy.api#required": {} + } + }, + "isRatedByUser": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Whether the current user has rated the library item.

" + } + }, + "userCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of users who have associated the Q App with their account.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#GetQApp": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#GetQAppInput" + }, + "output": { + "target": "com.amazonaws.qapps#GetQAppOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the full details of an Q App, including its definition specifying the cards and flow.

", + "smithy.api#examples": [ + { + "title": "A basic application with 1 text input card and 1 output card", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "appId": "3d110749-efc3-427c-87e8-15e966e5c168" + }, + "output": { + "appId": "7212ff04-de7b-4831-bd80-45d6975ba1b0", + "appArn": "arn:aws:qapps:us-west-2:123456789012:app/7212ff04-de7b-4831-bd80-45d6975ba1b0", + "title": "Color Palette Generator", + "appVersion": 1, + "status": "DRAFT", + "appDefinition": { + "appDefinitionVersion": "1", + "cards": [ + { + "textInput": { + "title": "Color Base", + "id": "4cf94d96-8819-45c2-98cc-58c56b35c72f", + "type": "text-input", + "dependencies": [] + } + }, + { + "qQuery": { + "title": "Recommended Palette", + "id": "18870b94-1e63-40e0-8c12-669c90ac5acc", + "type": "q-query", + "prompt": "Recommend me a list of colors that go well with @91e4513d-6981-454a-9329-329c9302eef4 ", + "outputSource": "llm", + "dependencies": [ + "91e4513d-6981-454a-9329-329c9302eef4" + ] + } + } + ] + }, + "createdAt": "2024-05-14T00:11:54.232Z", + "createdBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "updatedAt": "2024-05-14T00:13:26.168Z", + "updatedBy": "a841e300-40c1-7062-fa34-5b46dadbbaac" + } + } + ], + "smithy.api#http": { + "method": "GET", + "uri": "/apps.get" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.qapps#GetQAppInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App to retrieve.

", + "smithy.api#httpQuery": "appId", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#GetQAppOutput": { + "type": "structure", + "members": { + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App.

", + "smithy.api#required": {} + } + }, + "appArn": { + "target": "com.amazonaws.qapps#AppArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Q App.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title of the Q App.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.qapps#Description", + "traits": { + "smithy.api#documentation": "

The description of the Q App.

" + } + }, + "initialPrompt": { + "target": "com.amazonaws.qapps#InitialPrompt", + "traits": { + "smithy.api#documentation": "

The initial prompt displayed when the Q App is started.

" + } + }, + "appVersion": { + "target": "com.amazonaws.qapps#AppVersion", + "traits": { + "smithy.api#documentation": "

The version of the Q App.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.qapps#AppStatus", + "traits": { + "smithy.api#documentation": "

The status of the Q App.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the Q App was created.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who created the Q App.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the Q App was last updated.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who last updated the Q App.

", + "smithy.api#required": {} + } + }, + "requiredCapabilities": { + "target": "com.amazonaws.qapps#AppRequiredCapabilities", + "traits": { + "smithy.api#documentation": "

The capabilities required to run the Q App, such as file upload or third-party integrations.

" + } + }, + "appDefinition": { + "target": "com.amazonaws.qapps#AppDefinition", + "traits": { + "smithy.api#documentation": "

The full definition of the Q App, specifying the cards and flow.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#GetQAppSession": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#GetQAppSessionInput" + }, + "output": { + "target": "com.amazonaws.qapps#GetQAppSessionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the current state and results for an active session of an Amazon Q App.

", + "smithy.api#examples": [ + { + "title": "Retrieves an existing session for an Amazon Q App", + "input": { + "instanceId": "288ae830-1df2-4871-b6c0-4314d74dadef", + "sessionId": "1fca878e-64c5-4dc4-b1d9-c93effed4e82" + }, + "output": { + "sessionId": "1fca878e-64c5-4dc4-b1d9-c93effed4e82", + "sessionArn": "arn:aws:qapps:us-west-2:0123456789012:application/a929ecd6-5765-4ec7-bd3e-2ca90098b18e/qapp/65e7dce7-226a-47f9-b689-22850becef89/session/1fca878e-64c5-4dc4-b1d9-c93effed4e82", + "status": "COMPLETED", + "cardStatus": { + "6fb5b404-3b7b-48a4-8a8b-56406922a606": { + "currentState": "COMPLETED", + "currentValue": "What is the circumference of Earth?" + }, + "1e6caeac-b481-45ff-a082-8b9a4a0b72e8": { + "currentState": "COMPLETED", + "currentValue": "Earth's circumference is 24,901 miles" + } + } + } + } + ], + "smithy.api#http": { + "method": "GET", + "uri": "/runtime.getQAppSession" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.qapps#GetQAppSessionInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "sessionId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App session to retrieve.

", + "smithy.api#httpQuery": "sessionId", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#GetQAppSessionOutput": { + "type": "structure", + "members": { + "sessionId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App session.

", + "smithy.api#required": {} + } + }, + "sessionArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Q App session.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.qapps#ExecutionStatus", + "traits": { + "smithy.api#documentation": "

The current status of the Q App session.

", + "smithy.api#required": {} + } + }, + "cardStatus": { + "target": "com.amazonaws.qapps#CardStatusMap", + "traits": { + "smithy.api#documentation": "

The current status for each card in the Q App session.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#ImportDocument": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#ImportDocumentInput" + }, + "output": { + "target": "com.amazonaws.qapps#ImportDocumentOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#ContentTooLargeException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Uploads a file that can then be used either as a default in a \n FileUploadCard from Q App definition or as a file that \n is used inside a single Q App run. The purpose of the document is \n determined by a scope parameter that indicates whether it is at the \n app definition level or at the app session level.

", + "smithy.api#examples": [ + { + "title": "Upload a file to a specific session", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "fileContentsBase64": "data:text/plain;base64,SomeFileEncodedInBase64", + "fileName": "myFile.txt", + "cardId": "82f69028-22a9-4bea-8727-0eabf58e9fed", + "appId": "4263767c-d889-4cb2-a8f6-8b649bc66af0", + "scope": "SESSION", + "sessionId": "4f0e5b87-9d38-41cd-9eb4-ebce2f2917cc" + }, + "output": { + "fileId": "412aa1b4-341c-45af-936d-da52f8a1a3b4" + } + }, + { + "title": "Upload a file into a application", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "fileContentsBase64": "data:text/plain;base64,SomeFileEncodedInBase64", + "fileName": "anApplicationFile.txt", + "appId": "4263767c-d889-4cb2-a8f6-8b649bc66af0", + "cardId": "7a11f34b-42d4-4bc8-b668-ae4a788dae1e", + "scope": "APPLICATION" + }, + "output": { + "fileId": "bc1a0cc9-076a-4e82-9a6c-f4d2d8a22489" + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/apps.importDocument" + } + } + }, + "com.amazonaws.qapps#ImportDocumentInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "cardId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the card the file is associated with, if applicable.

", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App the file is associated with.

", + "smithy.api#required": {} + } + }, + "fileContentsBase64": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The base64-encoded contents of the file to upload.

", + "smithy.api#required": {} + } + }, + "fileName": { + "target": "com.amazonaws.qapps#Filename", + "traits": { + "smithy.api#documentation": "

The name of the file being uploaded.

", + "smithy.api#required": {} + } + }, + "scope": { + "target": "com.amazonaws.qapps#DocumentScope", + "traits": { + "smithy.api#documentation": "

Whether the file is associated with an Q App definition or a specific Q App session.

", + "smithy.api#required": {} + } + }, + "sessionId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App session the file is associated with, if applicable.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#ImportDocumentOutput": { + "type": "structure", + "members": { + "fileId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier assigned to the uploaded file.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#InitialPrompt": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 10000 + } + } + }, + "com.amazonaws.qapps#InstanceId": { + "type": "string" + }, + "com.amazonaws.qapps#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "retryAfterSeconds": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of seconds to wait before retrying the operation

", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

An internal service error occurred while processing the request.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.qapps#LibraryItemList": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#LibraryItemMember" + } + }, + "com.amazonaws.qapps#LibraryItemMember": { + "type": "structure", + "members": { + "libraryItemId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the library item.

", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App associated with the library item.

", + "smithy.api#required": {} + } + }, + "appVersion": { + "target": "com.amazonaws.qapps#AppVersion", + "traits": { + "smithy.api#documentation": "

The version of the Q App associated with the library item.

", + "smithy.api#required": {} + } + }, + "categories": { + "target": "com.amazonaws.qapps#CategoryList", + "traits": { + "smithy.api#documentation": "

The categories associated with the library item.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status of the library item.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the library item was created.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who created the library item.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the library item was last updated.

" + } + }, + "updatedBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who last updated the library item.

" + } + }, + "ratingCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of ratings the library item has received.

", + "smithy.api#required": {} + } + }, + "isRatedByUser": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Whether the current user has rated the library item.

" + } + }, + "userCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of users who have the associated Q App.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A library item is a snapshot of an Amazon Q App \n that can be published so the users in their Amazon Q Apps library \n can discover it, clone it, and run it.

" + } + }, + "com.amazonaws.qapps#LibraryItemStatus": { + "type": "enum", + "members": { + "PUBLISHED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PUBLISHED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.qapps#ListLibraryItems": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#ListLibraryItemsInput" + }, + "output": { + "target": "com.amazonaws.qapps#ListLibraryItemsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the library items for Amazon Q Apps that are published and available for users in your Amazon Web Services account.

", + "smithy.api#examples": [ + { + "title": "List at most 3 library items for this instance", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "limit": 3 + }, + "output": { + "libraryItems": [ + { + "libraryItemId": "cb9ecf72-8563-450d-9db9-994f98297316", + "appId": "7a11f34b-42d4-4bc8-b668-ae4a788dae1e", + "appVersion": 6, + "categories": [ + { + "id": "9c871ed4-1c41-4065-aefe-321cd4b61cf8", + "title": "HR" + }, + { + "id": "c1c4e374-118c-446f-81fb-cba6225d88da", + "title": "IT" + } + ], + "status": "PUBLISHED", + "ratingCount": 3, + "isRatedByUser": true, + "createdBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "createdAt": "2024-05-21T23:17:27.350Z", + "updatedBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "updatedAt": "2024-05-21T23:17:27.350Z", + "userCount": 5 + }, + { + "libraryItemId": "18cbebaa-196a-4aa5-a840-88d548e07f8f", + "appId": "201272ac-d474-4a97-991c-5520dae04026", + "appVersion": 1, + "categories": [ + { + "id": "fdc4b483-c4e2-44c9-b4b2-6c850bbdb579", + "title": "General" + } + ], + "status": "PUBLISHED", + "ratingCount": 5, + "isRatedByUser": false, + "createdBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "createdAt": "2024-05-08T16:09:56.080Z", + "updatedBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "updatedAt": "2024-05-08T16:09:56.080Z", + "userCount": 8 + }, + { + "libraryItemId": "549abfe0-f5c4-45a2-bb9b-c05987a49c6d", + "appId": "1802f57f-079a-4b5b-839a-79bbe2e21b3c", + "appVersion": 1, + "categories": [ + { + "id": "fdc4b483-c4e2-44c9-b4b2-6c850bbdb579", + "title": "General" + } + ], + "status": "PUBLISHED", + "ratingCount": 8, + "isRatedByUser": false, + "createdBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "createdAt": "2024-05-07T22:57:59.327Z", + "updatedBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "updatedAt": "2024-05-07T22:57:59.327Z", + "userCount": 12 + } + ], + "nextToken": "YW5vdGhlclRva2VuIQ==" + } + } + ], + "smithy.api#http": { + "method": "GET", + "uri": "/catalog.list" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "libraryItems", + "pageSize": "limit" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.qapps#ListLibraryItemsInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "limit": { + "target": "com.amazonaws.qapps#PageLimit", + "traits": { + "smithy.api#documentation": "

The maximum number of library items to return in the response.

", + "smithy.api#httpQuery": "limit" + } + }, + "nextToken": { + "target": "com.amazonaws.qapps#PaginationToken", + "traits": { + "smithy.api#documentation": "

The token to request the next page of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "categoryId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

Optional category to filter the library items by.

", + "smithy.api#httpQuery": "categoryId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#ListLibraryItemsOutput": { + "type": "structure", + "members": { + "libraryItems": { + "target": "com.amazonaws.qapps#LibraryItemList", + "traits": { + "smithy.api#documentation": "

The list of library items meeting the request criteria.

" + } + }, + "nextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The token to use to request the next page of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#ListQApps": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#ListQAppsInput" + }, + "output": { + "target": "com.amazonaws.qapps#ListQAppsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the Amazon Q Apps owned by or associated with the user either because \n they created it or because they used it from the library in the past. The user \n identity is extracted from the credentials used to invoke this operation..

", + "smithy.api#examples": [ + { + "title": "List at most 3 Amazon Q Apps in an Q Business application", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "limit": 3 + }, + "output": { + "apps": [ + { + "appId": "7b9fe303-18bb-4643-952c-bfcf9f4c427f", + "appArn": "arn:aws:qapps:us-west-2:..../7b9fe303-18bb-4643-952c-bfcf9f4c427f", + "title": "App 1", + "description": "Description 1", + "createdAt": "2024-05-21T04:09:10.401Z", + "status": "DRAFT" + }, + { + "appId": "dd178fd6-ad3d-49b3-a32d-e915cf423e37", + "appArn": "arn:aws:qapps:us-west-2:..../dd178fd6-ad3d-49b3-a32d-e915cf423e37", + "title": "App 2", + "description": "Description 2", + "createdAt": "2024-05-21T04:09:10.401Z", + "status": "PUBLISHED" + }, + { + "appId": "3274b744-1a13-4aad-953f-eda2e4149e6e", + "appArn": "arn:aws:qapps:us-west-2:..../3274b744-1a13-4aad-953f-eda2e4149e6e", + "title": "App 3", + "description": "Description 3", + "createdAt": "2024-05-21T04:09:10.401Z", + "status": "DRAFT" + } + ], + "nextToken": "bXlzdGVyaW91c1BhZ2luYXRpb25Ub2tlbg==" + } + }, + { + "title": "Retrieve the next page of Amazon Q Apps", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "limit": 3, + "nextToken": "bXlzdGVyaW91c1BhZ2luYXRpb25Ub2tlbg==" + }, + "output": { + "apps": [ + { + "appId": "bec8ee64-2635-41e8-aace-e1e418f4f295", + "appArn": "arn:aws:qapps:us-west-2:..../bec8ee64-2635-41e8-aace-e1e418f4f295", + "title": "App 4", + "description": "Description 4", + "createdAt": "2024-05-21T04:09:10.401Z", + "status": "PUBLISHED" + }, + { + "appId": "c380a45d-bd77-45b0-a0e5-8a266c1d8bc4", + "appArn": "arn:aws:qapps:us-west-2:..../c380a45d-bd77-45b0-a0e5-8a266c1d8bc4", + "title": "App 5", + "description": "Description 5", + "createdAt": "2024-05-21T04:09:10.401Z", + "status": "PUBLISHED" + }, + { + "appId": "afc4ee80-9722-4396-85a6-7aeaff52c177", + "appArn": "arn:aws:qapps:us-west-2:..../afc4ee80-9722-4396-85a6-7aeaff52c177", + "title": "App 6", + "description": "Description 6", + "createdAt": "2024-05-21T04:09:10.401Z", + "status": "PUBLISHED" + } + ], + "nextToken": "YW5vdGhlclRva2VuIQ==" + } + } + ], + "smithy.api#http": { + "method": "GET", + "uri": "/apps.list" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "apps", + "pageSize": "limit" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "new" + ] + } + }, + "com.amazonaws.qapps#ListQAppsInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "limit": { + "target": "com.amazonaws.qapps#PageLimit", + "traits": { + "smithy.api#documentation": "

The maximum number of Q Apps to return in the response.

", + "smithy.api#httpQuery": "limit" + } + }, + "nextToken": { + "target": "com.amazonaws.qapps#PaginationToken", + "traits": { + "smithy.api#documentation": "

The token to request the next page of results.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#ListQAppsOutput": { + "type": "structure", + "members": { + "apps": { + "target": "com.amazonaws.qapps#UserAppsList", + "traits": { + "smithy.api#documentation": "

The list of Amazon Q Apps meeting the request criteria.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The token to use to request the next page of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.qapps#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the tags associated with an Amazon Q Apps resource.

", + "smithy.api#examples": [ + { + "title": "A call to list tags for a resource", + "input": { + "resourceARN": "arn:aws:qapps:us-west-2:123456789012:application/3642ba81-344c-42fd-a480-9119a5a5f26b/qapp/7212ff04-de7b-4831-bd80-45d6975ba1b0" + }, + "output": { + "tags": { + "department": "HR" + } + } + } + ], + "smithy.api#http": { + "uri": "/tags/{resourceARN}", + "method": "GET" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.qapps#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceARN": { + "target": "com.amazonaws.qapps#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource whose tags should be listed.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.qapps#Tags", + "traits": { + "smithy.api#documentation": "

The list of tags that are assigned to the resource.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#Long": { + "type": "long" + }, + "com.amazonaws.qapps#MessageList": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#ConversationMessage" + } + }, + "com.amazonaws.qapps#PageLimit": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.qapps#PaginationToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 300 + } + } + }, + "com.amazonaws.qapps#Placeholder": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 500 + } + } + }, + "com.amazonaws.qapps#PlatoString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, + "com.amazonaws.qapps#PluginId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + } + } + }, + "com.amazonaws.qapps#PluginType": { + "type": "enum", + "members": { + "SERVICE_NOW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVICE_NOW" + } + }, + "SALESFORCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SALESFORCE" + } + }, + "JIRA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "JIRA" + } + }, + "ZENDESK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ZENDESK" + } + }, + "CUSTOM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOM" + } + } + } + }, + "com.amazonaws.qapps#PredictAppDefinition": { + "type": "structure", + "members": { + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title of the generated Q App definition.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.qapps#Description", + "traits": { + "smithy.api#documentation": "

The description of the generated Q App definition.

" + } + }, + "appDefinition": { + "target": "com.amazonaws.qapps#AppDefinitionInput", + "traits": { + "smithy.api#documentation": "

The definition specifying the cards and flow of the generated Q App.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition of an Amazon Q App generated based on input such as a conversation \n or problem statement.

" + } + }, + "com.amazonaws.qapps#PredictQApp": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#PredictQAppInput" + }, + "output": { + "target": "com.amazonaws.qapps#PredictQAppOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Generates an Amazon Q App definition based on either a conversation or \n a problem statement provided as input.The resulting app definition\n can be used to call CreateQApp. This API doesn't create \n Amazon Q Apps directly.

", + "smithy.api#http": { + "method": "POST", + "uri": "/apps.predictQApp" + }, + "smithy.api#tags": [ + "new" + ] + } + }, + "com.amazonaws.qapps#PredictQAppInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "options": { + "target": "com.amazonaws.qapps#PredictQAppInputOptions", + "traits": { + "smithy.api#documentation": "

The input to generate the Q App definition from, either a conversation or problem statement.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#PredictQAppInputOptions": { + "type": "union", + "members": { + "conversation": { + "target": "com.amazonaws.qapps#MessageList", + "traits": { + "smithy.api#documentation": "

A conversation to use as input for generating the Q App definition.

", + "smithy.api#length": { + "min": 1, + "max": 25 + } + } + }, + "problemStatement": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A problem statement to use as input for generating the Q App definition.

", + "smithy.api#length": { + "max": 10000 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

The input options for generating an Q App definition.

" + } + }, + "com.amazonaws.qapps#PredictQAppOutput": { + "type": "structure", + "members": { + "app": { + "target": "com.amazonaws.qapps#PredictAppDefinition", + "traits": { + "smithy.api#documentation": "

The generated Q App definition.

", + "smithy.api#required": {} + } + }, + "problemStatement": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The problem statement extracted from the input conversation, if provided.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#Prompt": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 7000 + } + } + }, + "com.amazonaws.qapps#QAppsService": { + "type": "service", + "version": "2023-11-27", + "operations": [ + { + "target": "com.amazonaws.qapps#AssociateLibraryItemReview" + }, + { + "target": "com.amazonaws.qapps#AssociateQAppWithUser" + }, + { + "target": "com.amazonaws.qapps#CreateLibraryItem" + }, + { + "target": "com.amazonaws.qapps#CreateQApp" + }, + { + "target": "com.amazonaws.qapps#DeleteLibraryItem" + }, + { + "target": "com.amazonaws.qapps#DeleteQApp" + }, + { + "target": "com.amazonaws.qapps#DisassociateLibraryItemReview" + }, + { + "target": "com.amazonaws.qapps#DisassociateQAppFromUser" + }, + { + "target": "com.amazonaws.qapps#GetLibraryItem" + }, + { + "target": "com.amazonaws.qapps#GetQApp" + }, + { + "target": "com.amazonaws.qapps#GetQAppSession" + }, + { + "target": "com.amazonaws.qapps#ImportDocument" + }, + { + "target": "com.amazonaws.qapps#ListLibraryItems" + }, + { + "target": "com.amazonaws.qapps#ListQApps" + }, + { + "target": "com.amazonaws.qapps#ListTagsForResource" + }, + { + "target": "com.amazonaws.qapps#PredictQApp" + }, + { + "target": "com.amazonaws.qapps#StartQAppSession" + }, + { + "target": "com.amazonaws.qapps#StopQAppSession" + }, + { + "target": "com.amazonaws.qapps#TagResource" + }, + { + "target": "com.amazonaws.qapps#UntagResource" + }, + { + "target": "com.amazonaws.qapps#UpdateLibraryItem" + }, + { + "target": "com.amazonaws.qapps#UpdateQApp" + }, + { + "target": "com.amazonaws.qapps#UpdateQAppSession" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "QApps", + "arnNamespace": "qapps", + "endpointPrefix": "data.qapps" + }, + "aws.auth#sigv4": { + "name": "qapps" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "

The Amazon Q Apps feature capability within Amazon Q Business allows web experience \n users to create lightweight, purpose-built AI apps to fulfill specific tasks from \n within their web experience. For example, users can create an Q Appthat exclusively \n generates marketing-related content to improve your marketing team's productivity or a \n Q App for marketing content-generation like writing customer emails and creating \n promotional content using a certain style of voice, tone, and branding. \n For more information, see Amazon Q App in the \n Amazon Q Business User Guide. \n

", + "smithy.api#title": "QApps", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://data.qapps-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://data.qapps-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://data.qapps.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://data.qapps.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://data.qapps-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://data.qapps-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://data.qapps.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://data.qapps.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://data.qapps-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://data.qapps-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://data.qapps.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://data.qapps.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://data.qapps-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://data.qapps-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://data.qapps.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://data.qapps.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://data.qapps-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://data.qapps.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://data.qapps-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://data.qapps.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.qapps#QAppsTimestamp": { + "type": "timestamp", + "traits": { + "smithy.api#timestampFormat": "date-time" + } + }, + "com.amazonaws.qapps#QPluginCard": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the plugin card.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title or label of the plugin card.

", + "smithy.api#required": {} + } + }, + "dependencies": { + "target": "com.amazonaws.qapps#DependencyList", + "traits": { + "smithy.api#documentation": "

Any dependencies or requirements for the plugin card.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.qapps#CardType", + "traits": { + "smithy.api#documentation": "

The type of the card.

", + "smithy.api#required": {} + } + }, + "prompt": { + "target": "com.amazonaws.qapps#Prompt", + "traits": { + "smithy.api#documentation": "

The prompt or instructions displayed for the plugin card.

", + "smithy.api#required": {} + } + }, + "pluginType": { + "target": "com.amazonaws.qapps#PluginType", + "traits": { + "smithy.api#documentation": "

The type or category of the plugin used by the card.

", + "smithy.api#required": {} + } + }, + "pluginId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the plugin used by the card.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A card in an Q App that integrates with a third-party plugin or service.

" + } + }, + "com.amazonaws.qapps#QPluginCardInput": { + "type": "structure", + "members": { + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title or label of the plugin card.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the plugin card.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.qapps#CardType", + "traits": { + "smithy.api#default": "q-plugin", + "smithy.api#documentation": "

The type of the card.

", + "smithy.api#required": {} + } + }, + "prompt": { + "target": "com.amazonaws.qapps#Prompt", + "traits": { + "smithy.api#documentation": "

The prompt or instructions displayed for the plugin card.

", + "smithy.api#required": {} + } + }, + "pluginId": { + "target": "com.amazonaws.qapps#PluginId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the plugin used by the card.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The input shape for defining a plugin card in an Amazon Q App.

" + } + }, + "com.amazonaws.qapps#QQueryCard": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the query card.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title or label of the query card.

", + "smithy.api#required": {} + } + }, + "dependencies": { + "target": "com.amazonaws.qapps#DependencyList", + "traits": { + "smithy.api#documentation": "

Any dependencies or requirements for the query card.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.qapps#CardType", + "traits": { + "smithy.api#documentation": "

The type of the card.

", + "smithy.api#required": {} + } + }, + "prompt": { + "target": "com.amazonaws.qapps#Prompt", + "traits": { + "smithy.api#documentation": "

The prompt or instructions displayed for the query card.

", + "smithy.api#required": {} + } + }, + "outputSource": { + "target": "com.amazonaws.qapps#CardOutputSource", + "traits": { + "smithy.api#documentation": "

The source or type of output generated by the query card.

", + "smithy.api#required": {} + } + }, + "attributeFilter": { + "target": "com.amazonaws.qapps#AttributeFilter", + "traits": { + "smithy.api#documentation": "

The Amazon Q Business filters applied in this query card when resolving data sources

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A card in a Amazon Q App that generates a response \n based on the Amazon Q Business service.

" + } + }, + "com.amazonaws.qapps#QQueryCardInput": { + "type": "structure", + "members": { + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title or label of the query card.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the query card.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.qapps#CardType", + "traits": { + "smithy.api#default": "q-query", + "smithy.api#documentation": "

The type of the card.

", + "smithy.api#required": {} + } + }, + "prompt": { + "target": "com.amazonaws.qapps#Prompt", + "traits": { + "smithy.api#documentation": "

The prompt or instructions displayed for the query card.

", + "smithy.api#required": {} + } + }, + "outputSource": { + "target": "com.amazonaws.qapps#CardOutputSource", + "traits": { + "smithy.api#default": "approved-sources", + "smithy.api#documentation": "

The source or type of output to generate for the query card.

" + } + }, + "attributeFilter": { + "target": "com.amazonaws.qapps#AttributeFilter", + "traits": { + "smithy.api#documentation": "

Turns on filtering of responses based on document attributes or metadata fields.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The input shape for defining a query card in an Amazon Q App.

" + } + }, + "com.amazonaws.qapps#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the resource

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the resource

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The requested resource could not be found.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.qapps#Sender": { + "type": "enum", + "members": { + "USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "USER" + } + }, + "SYSTEM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SYSTEM" + } + } + } + }, + "com.amazonaws.qapps#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the resource

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the resource

", + "smithy.api#required": {} + } + }, + "serviceCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The code for the service where the quota was exceeded

", + "smithy.api#required": {} + } + }, + "quotaCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The code of the quota that was exceeded

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The requested operation could not be completed because \n it would exceed the service's quota or limit.

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.qapps#StartQAppSession": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#StartQAppSessionInput" + }, + "output": { + "target": "com.amazonaws.qapps#StartQAppSessionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Starts a new session for an Amazon Q App, allowing inputs to be provided \n and the app to be run.

\n \n

Each Q App session will be condensed into a single conversation \n in the web experience.

\n
", + "smithy.api#examples": [ + { + "title": "Start a session for an Amazon Q App using version 1, passing in initial values for one card", + "input": { + "instanceId": "4cc5e4c2-d2a2-4188-a114-9ca125b4aedc", + "appId": "65e7dce7-226a-47f9-b689-22850becef89", + "appVersion": 1, + "initialValues": [ + { + "cardId": "6fb5b404-3b7b-48a4-8a8b-56406922a606", + "value": "What is the circumference of Earth?" + } + ] + }, + "output": { + "sessionId": "1fca878e-64c5-4dc4-b1d9-c93effed4e82", + "sessionArn": "arn:aws:qapps:us-west-2:0123456789012:application/a929ecd6-5765-4ec7-bd3e-2ca90098b18e/qapp/65e7dce7-226a-47f9-b689-22850becef89/session/1fca878e-64c5-4dc4-b1d9-c93effed4e82" + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/runtime.startQAppSession" + } + } + }, + "com.amazonaws.qapps#StartQAppSessionInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App to start a session for.

", + "smithy.api#required": {} + } + }, + "appVersion": { + "target": "com.amazonaws.qapps#AppVersion", + "traits": { + "smithy.api#documentation": "

The version of the Q App to use for the session.

", + "smithy.api#required": {} + } + }, + "initialValues": { + "target": "com.amazonaws.qapps#CardValueList", + "traits": { + "smithy.api#documentation": "

Optional initial input values to provide for the Q App session.

" + } + }, + "tags": { + "target": "com.amazonaws.qapps#TagMap", + "traits": { + "smithy.api#documentation": "

Optional tags to associate with the new Q App session.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#StartQAppSessionOutput": { + "type": "structure", + "members": { + "sessionId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the new Q App session.

", + "smithy.api#required": {} + } + }, + "sessionArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the new Q App session.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#StopQAppSession": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#StopQAppSessionInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Stops an active session for an Amazon Q App.This deletes all data \n related to the session and makes it invalid for future uses. The \n results of the session will be persisted as part of the conversation.

", + "smithy.api#http": { + "method": "POST", + "uri": "/runtime.deleteMiniAppRun" + } + } + }, + "com.amazonaws.qapps#StopQAppSessionInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "sessionId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App session to stop.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.qapps#TagKeys": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.qapps#TagMap": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.qapps#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.qapps#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#ConflictException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "aws.iam#conditionKeys": [ + "aws:RequestTag/${TagKey}", + "aws:TagKeys" + ], + "smithy.api#documentation": "

Associates tags with an Amazon Q Apps resource.

", + "smithy.api#examples": [ + { + "title": "A call to tag a resource", + "input": { + "resourceARN": "arn:aws:qapps:us-west-2:123456789012:application/3642ba81-344c-42fd-a480-9119a5a5f26b/qapp/7212ff04-de7b-4831-bd80-45d6975ba1b0", + "tags": { + "department": "HR" + } + } + } + ], + "smithy.api#http": { + "uri": "/tags/{resourceARN}", + "method": "POST" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qapps#TagResourceRequest": { + "type": "structure", + "members": { + "resourceARN": { + "target": "com.amazonaws.qapps#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to tag.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.qapps#Tags", + "traits": { + "smithy.api#documentation": "

The tags to associate with the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, + "com.amazonaws.qapps#Tags": { + "type": "map", + "key": { + "target": "com.amazonaws.qapps#TagKey" + }, + "value": { + "target": "com.amazonaws.qapps#TagValue" + } + }, + "com.amazonaws.qapps#TextInputCard": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the text input card.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title or label of the text input card.

", + "smithy.api#required": {} + } + }, + "dependencies": { + "target": "com.amazonaws.qapps#DependencyList", + "traits": { + "smithy.api#documentation": "

Any dependencies or requirements for the text input card.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.qapps#CardType", + "traits": { + "smithy.api#documentation": "

The type of the card.

", + "smithy.api#required": {} + } + }, + "placeholder": { + "target": "com.amazonaws.qapps#Placeholder", + "traits": { + "smithy.api#documentation": "

The placeholder text to display in the text input field.

" + } + }, + "defaultValue": { + "target": "com.amazonaws.qapps#Default", + "traits": { + "smithy.api#documentation": "

The default value to pre-populate in the text input field.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A card in an Amazon Q App that allows the user to input text.

" + } + }, + "com.amazonaws.qapps#TextInputCardInput": { + "type": "structure", + "members": { + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title or label of the text input card.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the text input card.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.qapps#CardType", + "traits": { + "smithy.api#default": "text-input", + "smithy.api#documentation": "

The type of the card.

", + "smithy.api#required": {} + } + }, + "placeholder": { + "target": "com.amazonaws.qapps#Placeholder", + "traits": { + "smithy.api#documentation": "

The placeholder text to display in the text input field.

" + } + }, + "defaultValue": { + "target": "com.amazonaws.qapps#Default", + "traits": { + "smithy.api#documentation": "

The default value to pre-populate in the text input field.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The input shape for defining a text input card in an Amazon Q App.

" + } + }, + "com.amazonaws.qapps#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "serviceCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The code for the service where the quota was exceeded

", + "smithy.api#required": {} + } + }, + "quotaCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The code of the quota that was exceeded

", + "smithy.api#required": {} + } + }, + "retryAfterSeconds": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of seconds to wait before retrying the operation

", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

The requested operation could not be completed because too many \n requests were sent at once. Wait a bit and try again later.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": { + "throttling": true + } + } + }, + "com.amazonaws.qapps#Timestamp": { + "type": "timestamp" + }, + "com.amazonaws.qapps#Title": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 100 + } + } + }, + "com.amazonaws.qapps#UUID": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}$" + } + }, + "com.amazonaws.qapps#UnauthorizedException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The client is not authenticated or authorized to perform the requested operation.

", + "smithy.api#error": "client", + "smithy.api#httpError": 401 + } + }, + "com.amazonaws.qapps#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.qapps#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "aws.iam#conditionKeys": [ + "aws:TagKeys" + ], + "smithy.api#documentation": "

Disassociates tags from an Amazon Q Apps resource.

", + "smithy.api#examples": [ + { + "title": "A call to untag a resource", + "input": { + "resourceARN": "arn:aws:qapps:us-west-2:123456789012:application/3642ba81-344c-42fd-a480-9119a5a5f26b/qapp/7212ff04-de7b-4831-bd80-45d6975ba1b0", + "tagKeys": [ + "department" + ] + } + } + ], + "smithy.api#http": { + "uri": "/tags/{resourceARN}", + "method": "DELETE" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qapps#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceARN": { + "target": "com.amazonaws.qapps#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to disassociate the tag from.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.qapps#TagKeys", + "traits": { + "smithy.api#documentation": "

The keys of the tags to disassociate from the resource.

", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#UpdateLibraryItem": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#UpdateLibraryItemInput" + }, + "output": { + "target": "com.amazonaws.qapps#UpdateLibraryItemOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the metadata and status of a library item for an Amazon Q App.

", + "smithy.api#examples": [ + { + "title": "Sets the status of a library item to DISABLED", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "libraryItemId": "cb9ecf72-8563-450d-9db9-994f98297316", + "status": "DISABLED" + }, + "output": { + "libraryItemId": "cb9ecf72-8563-450d-9db9-994f98297316", + "appId": "7a11f34b-42d4-4bc8-b668-ae4a788dae1e", + "appVersion": 6, + "categories": [ + { + "id": "9c871ed4-1c41-4065-aefe-321cd4b61cf8", + "title": "HR" + }, + { + "id": "fdc4b483-c4e2-44c9-b4b2-6c850bbdb579", + "title": "General" + }, + { + "id": "c1c4e374-118c-446f-81fb-cba6225d88da", + "title": "IT" + } + ], + "status": "DISABLED", + "createdAt": "2024-05-21T23:17:27.350Z", + "createdBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "updatedAt": "2024-05-28T19:43:48.577Z", + "updatedBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "ratingCount": 24 + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/catalog.updateItem" + } + } + }, + "com.amazonaws.qapps#UpdateLibraryItemInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "libraryItemId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the library item to update.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.qapps#LibraryItemStatus", + "traits": { + "smithy.api#documentation": "

The new status to set for the library item, such as \"Published\" or \"Hidden\".

" + } + }, + "categories": { + "target": "com.amazonaws.qapps#CategoryIdList", + "traits": { + "smithy.api#documentation": "

The new categories to associate with the library item.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#UpdateLibraryItemOutput": { + "type": "structure", + "members": { + "libraryItemId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the updated library item.

", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App associated with the library item.

", + "smithy.api#required": {} + } + }, + "appVersion": { + "target": "com.amazonaws.qapps#AppVersion", + "traits": { + "smithy.api#documentation": "

The version of the Q App associated with the library item.

", + "smithy.api#required": {} + } + }, + "categories": { + "target": "com.amazonaws.qapps#CategoryList", + "traits": { + "smithy.api#documentation": "

The categories associated with the updated library item.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The new status of the updated library item.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the library item was originally created.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who originally created the library item.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the library item was last updated.

" + } + }, + "updatedBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who last updated the library item.

" + } + }, + "ratingCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of ratings the library item has received.

", + "smithy.api#required": {} + } + }, + "isRatedByUser": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Whether the current user has rated the library item.

" + } + }, + "userCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of users who have the associated Q App.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#UpdateQApp": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#UpdateQAppInput" + }, + "output": { + "target": "com.amazonaws.qapps#UpdateQAppOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#ContentTooLargeException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates an existing Amazon Q App, allowing modifications to its title, description, and definition.

", + "smithy.api#examples": [ + { + "title": "Updating the title of an app", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "appId": "7212ff04-de7b-4831-bd80-45d6975ba1b0", + "title": "This is the new title" + }, + "output": { + "appId": "7212ff04-de7b-4831-bd80-45d6975ba1b0", + "appArn": "arn:aws:qapps:us-west-2:123456789012:app/7212ff04-de7b-4831-bd80-45d6975ba1b0", + "title": "This is the new title", + "appVersion": 2, + "status": "DRAFT", + "createdAt": "2024-05-14T00:11:54.232Z", + "createdBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "updatedAt": "2024-05-17T23:15:08.571Z", + "updatedBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "requiredCapabilities": [ + "CreatorMode" + ] + } + }, + { + "title": "Updating the app so it has a single q-query card", + "input": { + "instanceId": "0b95c9c4-89cc-4aa8-9aae-aa91cbec699f", + "appId": "7212ff04-de7b-4831-bd80-45d6975ba1b0", + "appDefinition": { + "cards": [ + { + "qQuery": { + "type": "q-query", + "title": "Trip Ideas", + "id": "18870b94-1e63-40e0-8c12-669c90ac5acc", + "prompt": "Recommend me an itinerary for a trip" + } + } + ] + } + }, + "output": { + "appId": "7212ff04-de7b-4831-bd80-45d6975ba1b0", + "appArn": "arn:aws:qapps:us-west-2:123456789012:app/7212ff04-de7b-4831-bd80-45d6975ba1b0", + "title": "Previous Title Stays the Same", + "appVersion": 99, + "status": "DRAFT", + "createdAt": "2024-05-14T00:11:54.232Z", + "createdBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "updatedAt": "2024-05-17T23:15:08.571Z", + "updatedBy": "a841e300-40c1-7062-fa34-5b46dadbbaac", + "requiredCapabilities": [ + "CreatorMode" + ] + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/apps.update" + } + } + }, + "com.amazonaws.qapps#UpdateQAppInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App to update.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The new title for the Q App.

" + } + }, + "description": { + "target": "com.amazonaws.qapps#Description", + "traits": { + "smithy.api#documentation": "

The new description for the Q App.

" + } + }, + "appDefinition": { + "target": "com.amazonaws.qapps#AppDefinitionInput", + "traits": { + "smithy.api#documentation": "

The new definition specifying the cards and flow for the Q App.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#UpdateQAppOutput": { + "type": "structure", + "members": { + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the updated Q App.

", + "smithy.api#required": {} + } + }, + "appArn": { + "target": "com.amazonaws.qapps#AppArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the updated Q App.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The new title of the updated Q App.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.qapps#Description", + "traits": { + "smithy.api#documentation": "

The new description of the updated Q App.

" + } + }, + "initialPrompt": { + "target": "com.amazonaws.qapps#InitialPrompt", + "traits": { + "smithy.api#documentation": "

The initial prompt for the updated Q App.

" + } + }, + "appVersion": { + "target": "com.amazonaws.qapps#AppVersion", + "traits": { + "smithy.api#documentation": "

The new version of the updated Q App.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.qapps#AppStatus", + "traits": { + "smithy.api#documentation": "

The status of the updated Q App.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the Q App was originally created.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who originally created the Q App.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the Q App was last updated.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user who last updated the Q App.

", + "smithy.api#required": {} + } + }, + "requiredCapabilities": { + "target": "com.amazonaws.qapps#AppRequiredCapabilities", + "traits": { + "smithy.api#documentation": "

The capabilities required for the updated Q App.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#UpdateQAppSession": { + "type": "operation", + "input": { + "target": "com.amazonaws.qapps#UpdateQAppSessionInput" + }, + "output": { + "target": "com.amazonaws.qapps#UpdateQAppSessionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.qapps#AccessDeniedException" + }, + { + "target": "com.amazonaws.qapps#InternalServerException" + }, + { + "target": "com.amazonaws.qapps#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qapps#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qapps#ThrottlingException" + }, + { + "target": "com.amazonaws.qapps#UnauthorizedException" + }, + { + "target": "com.amazonaws.qapps#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the session for a given Q App sessionId. This is only \n valid when at least one card of the session is in the WAITING state. \n Data for each WAITING card can be provided as input. If inputs \n are not provided, the call will be accepted but session will not move forward. \n Inputs for cards that are not in the WAITING status will be ignored.

", + "smithy.api#http": { + "method": "POST", + "uri": "/runtime.updateQAppSession" + } + } + }, + "com.amazonaws.qapps#UpdateQAppSessionInput": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qapps#InstanceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon Q Business application environment instance.

", + "smithy.api#httpHeader": "instance-id", + "smithy.api#required": {} + } + }, + "sessionId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App session to provide input for.

", + "smithy.api#required": {} + } + }, + "values": { + "target": "com.amazonaws.qapps#CardValueList", + "traits": { + "smithy.api#documentation": "

The input values to provide for the current state of the Q App session.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qapps#UpdateQAppSessionOutput": { + "type": "structure", + "members": { + "sessionId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the updated Q App session.

", + "smithy.api#required": {} + } + }, + "sessionArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the updated Q App session.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qapps#UserAppItem": { + "type": "structure", + "members": { + "appId": { + "target": "com.amazonaws.qapps#UUID", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Q App.

", + "smithy.api#required": {} + } + }, + "appArn": { + "target": "com.amazonaws.qapps#AppArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Q App.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qapps#Title", + "traits": { + "smithy.api#documentation": "

The title of the Q App.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.qapps#Description", + "traits": { + "smithy.api#documentation": "

The description of the Q App.

" + } + }, + "createdAt": { + "target": "com.amazonaws.qapps#QAppsTimestamp", + "traits": { + "smithy.api#documentation": "

The date and time the user's association with the Q App was created.

", + "smithy.api#required": {} + } + }, + "canEdit": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

A flag indicating whether the user can edit the Q App.

" + } + }, + "status": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status of the user's association with the Q App.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An Amazon Q App associated with a user, either owned by the user or favorited.

" + } + }, + "com.amazonaws.qapps#UserAppsList": { + "type": "list", + "member": { + "target": "com.amazonaws.qapps#UserAppItem" + } + }, + "com.amazonaws.qapps#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The input failed to satisfy the constraints specified by the service.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + } + } +} \ No newline at end of file diff --git a/models/quicksight.json b/models/quicksight.json index 18face9a2b..f169298e86 100644 --- a/models/quicksight.json +++ b/models/quicksight.json @@ -232,6 +232,190 @@ } } }, + "com.amazonaws.quicksight#AggFunction": { + "type": "structure", + "members": { + "Aggregation": { + "target": "com.amazonaws.quicksight#AggType", + "traits": { + "smithy.api#documentation": "

The aggregation of an Agg function.

" + } + }, + "AggregationFunctionParameters": { + "target": "com.amazonaws.quicksight#AggFunctionParamMap", + "traits": { + "smithy.api#documentation": "

The aggregation parameters for an Agg function.

" + } + }, + "Period": { + "target": "com.amazonaws.quicksight#TopicTimeGranularity", + "traits": { + "smithy.api#documentation": "

The period of an Agg function.

" + } + }, + "PeriodField": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The period field for an Agg function.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition of an Agg function.

" + } + }, + "com.amazonaws.quicksight#AggFunctionParamKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.quicksight#AggFunctionParamMap": { + "type": "map", + "key": { + "target": "com.amazonaws.quicksight#AggFunctionParamKey" + }, + "value": { + "target": "com.amazonaws.quicksight#AggFunctionParamValue" + } + }, + "com.amazonaws.quicksight#AggFunctionParamValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, + "com.amazonaws.quicksight#AggType": { + "type": "enum", + "members": { + "SUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUM" + } + }, + "MIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MIN" + } + }, + "MAX": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MAX" + } + }, + "COUNT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COUNT" + } + }, + "AVERAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AVERAGE" + } + }, + "DISTINCT_COUNT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISTINCT_COUNT" + } + }, + "STDEV": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STDEV" + } + }, + "STDEVP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STDEVP" + } + }, + "VAR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VAR" + } + }, + "VARP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VARP" + } + }, + "PERCENTILE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PERCENTILE" + } + }, + "MEDIAN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MEDIAN" + } + }, + "PTD_SUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PTD_SUM" + } + }, + "PTD_MIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PTD_MIN" + } + }, + "PTD_MAX": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PTD_MAX" + } + }, + "PTD_COUNT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PTD_COUNT" + } + }, + "PTD_DISTINCT_COUNT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PTD_DISTINCT_COUNT" + } + }, + "PTD_AVERAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PTD_AVERAGE" + } + }, + "COLUMN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COLUMN" + } + }, + "CUSTOM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOM" + } + } + } + }, "com.amazonaws.quicksight#AggregationFunction": { "type": "structure", "members": { @@ -273,6 +457,38 @@ "target": "com.amazonaws.quicksight#LimitedString" } }, + "com.amazonaws.quicksight#AggregationPartitionBy": { + "type": "structure", + "members": { + "FieldName": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The field Name for an AggregationPartitionBy.

" + } + }, + "TimeGranularity": { + "target": "com.amazonaws.quicksight#TimeGranularity", + "traits": { + "smithy.api#documentation": "

The TimeGranularity for an AggregationPartitionBy.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition of an AggregationPartitionBy.

" + } + }, + "com.amazonaws.quicksight#AggregationPartitionByList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#AggregationPartitionBy" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, "com.amazonaws.quicksight#AggregationSortConfiguration": { "type": "structure", "members": { @@ -785,6 +1001,33 @@ } } }, + "com.amazonaws.quicksight#Anchor": { + "type": "structure", + "members": { + "AnchorType": { + "target": "com.amazonaws.quicksight#AnchorType", + "traits": { + "smithy.api#documentation": "

The AnchorType for the Anchor.

" + } + }, + "TimeGranularity": { + "target": "com.amazonaws.quicksight#TimeGranularity", + "traits": { + "smithy.api#documentation": "

The TimeGranularity of the Anchor.

" + } + }, + "Offset": { + "target": "com.amazonaws.quicksight#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The offset of the Anchor.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition of the Anchor.

" + } + }, "com.amazonaws.quicksight#AnchorDateConfiguration": { "type": "structure", "members": { @@ -816,6 +1059,17 @@ } } }, + "com.amazonaws.quicksight#AnchorType": { + "type": "enum", + "members": { + "TODAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TODAY" + } + } + } + }, "com.amazonaws.quicksight#AnonymousUserDashboardEmbeddingConfiguration": { "type": "structure", "members": { @@ -928,6 +1182,22 @@ "target": "com.amazonaws.quicksight#AnonymousUserSnapshotJobResult" } }, + "com.amazonaws.quicksight#AnswerId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9-_.\\\\+]*$" + } + }, + "com.amazonaws.quicksight#AnswerIds": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#AnswerId" + } + }, "com.amazonaws.quicksight#ArcAxisConfiguration": { "type": "structure", "members": { @@ -3893,6 +4163,230 @@ } } }, + "com.amazonaws.quicksight#BatchCreateTopicReviewedAnswer": { + "type": "operation", + "input": { + "target": "com.amazonaws.quicksight#BatchCreateTopicReviewedAnswerRequest" + }, + "output": { + "target": "com.amazonaws.quicksight#BatchCreateTopicReviewedAnswerResponse" + }, + "errors": [ + { + "target": "com.amazonaws.quicksight#AccessDeniedException" + }, + { + "target": "com.amazonaws.quicksight#InternalFailureException" + }, + { + "target": "com.amazonaws.quicksight#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.quicksight#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.quicksight#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates new reviewed answers for a Q Topic.

", + "smithy.api#http": { + "method": "POST", + "uri": "/accounts/{AwsAccountId}/topics/{TopicId}/batch-create-reviewed-answers", + "code": 200 + } + } + }, + "com.amazonaws.quicksight#BatchCreateTopicReviewedAnswerRequest": { + "type": "structure", + "members": { + "AwsAccountId": { + "target": "com.amazonaws.quicksight#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account that you want to create a reviewed answer in.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "TopicId": { + "target": "com.amazonaws.quicksight#TopicId", + "traits": { + "smithy.api#documentation": "

The ID for the topic reviewed answer that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Answers": { + "target": "com.amazonaws.quicksight#CreateTopicReviewedAnswers", + "traits": { + "smithy.api#documentation": "

The definition of the Answers to be created.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.quicksight#BatchCreateTopicReviewedAnswerResponse": { + "type": "structure", + "members": { + "TopicId": { + "target": "com.amazonaws.quicksight#TopicId", + "traits": { + "smithy.api#documentation": "

The ID for the topic reviewed answer that you want to create. This ID is unique per Amazon Web Services Region\n for each Amazon Web Services account.

" + } + }, + "TopicArn": { + "target": "com.amazonaws.quicksight#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the topic.

" + } + }, + "SucceededAnswers": { + "target": "com.amazonaws.quicksight#SucceededTopicReviewedAnswers", + "traits": { + "smithy.api#documentation": "

The definition of Answers that are successfully created.

" + } + }, + "InvalidAnswers": { + "target": "com.amazonaws.quicksight#InvalidTopicReviewedAnswers", + "traits": { + "smithy.api#documentation": "

The definition of Answers that are invalid and not created.

" + } + }, + "Status": { + "target": "com.amazonaws.quicksight#StatusCode", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The HTTP status of the request.

", + "smithy.api#httpResponseCode": {} + } + }, + "RequestId": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services request ID for this operation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.quicksight#BatchDeleteTopicReviewedAnswer": { + "type": "operation", + "input": { + "target": "com.amazonaws.quicksight#BatchDeleteTopicReviewedAnswerRequest" + }, + "output": { + "target": "com.amazonaws.quicksight#BatchDeleteTopicReviewedAnswerResponse" + }, + "errors": [ + { + "target": "com.amazonaws.quicksight#AccessDeniedException" + }, + { + "target": "com.amazonaws.quicksight#ConflictException" + }, + { + "target": "com.amazonaws.quicksight#InternalFailureException" + }, + { + "target": "com.amazonaws.quicksight#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.quicksight#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.quicksight#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes reviewed answers for Q Topic.

", + "smithy.api#http": { + "method": "POST", + "uri": "/accounts/{AwsAccountId}/topics/{TopicId}/batch-delete-reviewed-answers", + "code": 200 + } + } + }, + "com.amazonaws.quicksight#BatchDeleteTopicReviewedAnswerRequest": { + "type": "structure", + "members": { + "AwsAccountId": { + "target": "com.amazonaws.quicksight#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account that you want to delete a reviewed answers in.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "TopicId": { + "target": "com.amazonaws.quicksight#TopicId", + "traits": { + "smithy.api#documentation": "

The ID for the topic reviewed answer that you want to delete. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "AnswerIds": { + "target": "com.amazonaws.quicksight#AnswerIds", + "traits": { + "smithy.api#documentation": "

The Answer IDs of the Answers to be deleted.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.quicksight#BatchDeleteTopicReviewedAnswerResponse": { + "type": "structure", + "members": { + "TopicId": { + "target": "com.amazonaws.quicksight#TopicId", + "traits": { + "smithy.api#documentation": "

The ID of the topic reviewed answer that you want to delete. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

" + } + }, + "TopicArn": { + "target": "com.amazonaws.quicksight#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the topic.

" + } + }, + "SucceededAnswers": { + "target": "com.amazonaws.quicksight#SucceededTopicReviewedAnswers", + "traits": { + "smithy.api#documentation": "

The definition of Answers that are successfully deleted.

" + } + }, + "InvalidAnswers": { + "target": "com.amazonaws.quicksight#InvalidTopicReviewedAnswers", + "traits": { + "smithy.api#documentation": "

The definition of Answers that are invalid and not deleted.

" + } + }, + "RequestId": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services request ID for this operation.

" + } + }, + "Status": { + "target": "com.amazonaws.quicksight#StatusCode", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The HTTP status of the request.

", + "smithy.api#httpResponseCode": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.quicksight#BigQueryParameters": { "type": "structure", "members": { @@ -4564,6 +5058,18 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.quicksight#CalculatedFieldReferenceList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#Identifier" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 250 + } + } + }, "com.amazonaws.quicksight#CalculatedFields": { "type": "list", "member": { @@ -5197,6 +5703,38 @@ "smithy.api#documentation": "

A structure that represents a collective constant.

" } }, + "com.amazonaws.quicksight#CollectiveConstantEntry": { + "type": "structure", + "members": { + "ConstantType": { + "target": "com.amazonaws.quicksight#ConstantType", + "traits": { + "smithy.api#documentation": "

The ConstantType of a CollectiveConstantEntry.

" + } + }, + "Value": { + "target": "com.amazonaws.quicksight#ConstantValueString", + "traits": { + "smithy.api#documentation": "

The value of a CollectiveConstantEntry.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a CollectiveConstantEntry.

" + } + }, + "com.amazonaws.quicksight#CollectiveConstantEntryList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#CollectiveConstantEntry" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2000 + } + } + }, "com.amazonaws.quicksight#ColorFillType": { "type": "enum", "members": { @@ -6160,6 +6698,71 @@ } } }, + "com.amazonaws.quicksight#ComparisonMethodType": { + "type": "enum", + "members": { + "DIFF": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DIFF" + } + }, + "PERC_DIFF": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PERC_DIFF" + } + }, + "DIFF_AS_PERC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DIFF_AS_PERC" + } + }, + "POP_CURRENT_DIFF_AS_PERC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "POP_CURRENT_DIFF_AS_PERC" + } + }, + "POP_CURRENT_DIFF": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "POP_CURRENT_DIFF" + } + }, + "POP_OVERTIME_DIFF_AS_PERC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "POP_OVERTIME_DIFF_AS_PERC" + } + }, + "POP_OVERTIME_DIFF": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "POP_OVERTIME_DIFF" + } + }, + "PERCENT_OF_TOTAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PERCENT_OF_TOTAL" + } + }, + "RUNNING_SUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RUNNING_SUM" + } + }, + "MOVING_AVERAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MOVING_AVERAGE" + } + } + } + }, "com.amazonaws.quicksight#Computation": { "type": "structure", "members": { @@ -6552,6 +7155,15 @@ } } }, + "com.amazonaws.quicksight#ConstantValueString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, "com.amazonaws.quicksight#ContextMenuOption": { "type": "structure", "members": { @@ -6600,6 +7212,104 @@ } } }, + "com.amazonaws.quicksight#ContributionAnalysisDirection": { + "type": "enum", + "members": { + "INCREASE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INCREASE" + } + }, + "DECREASE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DECREASE" + } + }, + "NEUTRAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NEUTRAL" + } + } + } + }, + "com.amazonaws.quicksight#ContributionAnalysisFactor": { + "type": "structure", + "members": { + "FieldName": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The field name of the ContributionAnalysisFactor.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for the ContributionAnalysisFactor.

" + } + }, + "com.amazonaws.quicksight#ContributionAnalysisFactorsList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#ContributionAnalysisFactor" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.quicksight#ContributionAnalysisSortType": { + "type": "enum", + "members": { + "ABSOLUTE_DIFFERENCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ABSOLUTE_DIFFERENCE" + } + }, + "CONTRIBUTION_PERCENTAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONTRIBUTION_PERCENTAGE" + } + }, + "DEVIATION_FROM_EXPECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEVIATION_FROM_EXPECTED" + } + }, + "PERCENTAGE_DIFFERENCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PERCENTAGE_DIFFERENCE" + } + } + } + }, + "com.amazonaws.quicksight#ContributionAnalysisTimeRanges": { + "type": "structure", + "members": { + "StartRange": { + "target": "com.amazonaws.quicksight#TopicIRFilterOption", + "traits": { + "smithy.api#documentation": "

The start range for the ContributionAnalysisTimeRanges.

" + } + }, + "EndRange": { + "target": "com.amazonaws.quicksight#TopicIRFilterOption", + "traits": { + "smithy.api#documentation": "

The end range for the ContributionAnalysisTimeRanges.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for the ContributionAnalysisTimeRanges.

" + } + }, "com.amazonaws.quicksight#ContributorDimensionList": { "type": "list", "member": { @@ -9525,6 +10235,65 @@ "smithy.api#output": {} } }, + "com.amazonaws.quicksight#CreateTopicReviewedAnswer": { + "type": "structure", + "members": { + "AnswerId": { + "target": "com.amazonaws.quicksight#AnswerId", + "traits": { + "smithy.api#documentation": "

The answer ID for the CreateTopicReviewedAnswer.

", + "smithy.api#required": {} + } + }, + "DatasetArn": { + "target": "com.amazonaws.quicksight#Arn", + "traits": { + "smithy.api#documentation": "

The Dataset arn for the CreateTopicReviewedAnswer.

", + "smithy.api#required": {} + } + }, + "Question": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The Question to be created.

", + "smithy.api#required": {} + } + }, + "Mir": { + "target": "com.amazonaws.quicksight#TopicIR", + "traits": { + "smithy.api#documentation": "

The Mir for the CreateTopicReviewedAnswer.

" + } + }, + "PrimaryVisual": { + "target": "com.amazonaws.quicksight#TopicVisual", + "traits": { + "smithy.api#documentation": "

The PrimaryVisual for the CreateTopicReviewedAnswer.

" + } + }, + "Template": { + "target": "com.amazonaws.quicksight#TopicTemplate", + "traits": { + "smithy.api#documentation": "

The template for the CreateTopicReviewedAnswer.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a CreateTopicReviewedAnswer.

" + } + }, + "com.amazonaws.quicksight#CreateTopicReviewedAnswers": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#CreateTopicReviewedAnswer" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, "com.amazonaws.quicksight#CreateVPCConnection": { "type": "operation", "input": { @@ -13149,6 +13918,18 @@ "traits": { "smithy.api#documentation": "

The configuration of info icon label options.

" } + }, + "HelperTextVisibility": { + "target": "com.amazonaws.quicksight#Visibility", + "traits": { + "smithy.api#documentation": "

The helper text visibility of the DateTimePickerControlDisplayOptions.

" + } + }, + "DateIconVisibility": { + "target": "com.amazonaws.quicksight#Visibility", + "traits": { + "smithy.api#documentation": "

The date icon visibility of the DateTimePickerControlDisplayOptions.

" + } } }, "traits": { @@ -22406,6 +23187,44 @@ "smithy.api#documentation": "

With a Filter, you can remove portions of data from a particular visual or view.

\n

This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

" } }, + "com.amazonaws.quicksight#FilterAggMetrics": { + "type": "structure", + "members": { + "MetricOperand": { + "target": "com.amazonaws.quicksight#Identifier", + "traits": { + "smithy.api#documentation": "

The metric operand of the FilterAggMetrics.

" + } + }, + "Function": { + "target": "com.amazonaws.quicksight#AggType", + "traits": { + "smithy.api#documentation": "

The function for the FilterAggMetrics.

" + } + }, + "SortDirection": { + "target": "com.amazonaws.quicksight#TopicSortDirection", + "traits": { + "smithy.api#documentation": "

The sort direction for FilterAggMetrics.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for the FilterAggMetrics.

" + } + }, + "com.amazonaws.quicksight#FilterAggMetricsList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#FilterAggMetrics" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, "com.amazonaws.quicksight#FilterClass": { "type": "enum", "members": { @@ -26553,6 +27372,21 @@ } } }, + "com.amazonaws.quicksight#Identifier": { + "type": "structure", + "members": { + "Identity": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The identity of the identifier.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for the identifier.

" + } + }, "com.amazonaws.quicksight#IdentityCenterConfiguration": { "type": "structure", "members": { @@ -27599,6 +28433,32 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.quicksight#InvalidTopicReviewedAnswer": { + "type": "structure", + "members": { + "AnswerId": { + "target": "com.amazonaws.quicksight#AnswerId", + "traits": { + "smithy.api#documentation": "

The answer ID for the InvalidTopicReviewedAnswer.

" + } + }, + "Error": { + "target": "com.amazonaws.quicksight#ReviewedAnswerErrorCode", + "traits": { + "smithy.api#documentation": "

The error that is returned for the InvalidTopicReviewedAnswer.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a InvalidTopicReviewedAnswer.

" + } + }, + "com.amazonaws.quicksight#InvalidTopicReviewedAnswers": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#InvalidTopicReviewedAnswer" + } + }, "com.amazonaws.quicksight#IpRestrictionRuleDescription": { "type": "string", "traits": { @@ -31864,6 +32724,104 @@ "smithy.api#output": {} } }, + "com.amazonaws.quicksight#ListTopicReviewedAnswers": { + "type": "operation", + "input": { + "target": "com.amazonaws.quicksight#ListTopicReviewedAnswersRequest" + }, + "output": { + "target": "com.amazonaws.quicksight#ListTopicReviewedAnswersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.quicksight#AccessDeniedException" + }, + { + "target": "com.amazonaws.quicksight#InternalFailureException" + }, + { + "target": "com.amazonaws.quicksight#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.quicksight#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.quicksight#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all reviewed answers for a Q Topic.

", + "smithy.api#http": { + "method": "GET", + "uri": "/accounts/{AwsAccountId}/topics/{TopicId}/reviewed-answers", + "code": 200 + } + } + }, + "com.amazonaws.quicksight#ListTopicReviewedAnswersRequest": { + "type": "structure", + "members": { + "AwsAccountId": { + "target": "com.amazonaws.quicksight#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account that containd the reviewed answers that you want listed.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "TopicId": { + "target": "com.amazonaws.quicksight#TopicId", + "traits": { + "smithy.api#documentation": "

The ID for the topic that contains the reviewed answer that you want to list. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.quicksight#ListTopicReviewedAnswersResponse": { + "type": "structure", + "members": { + "TopicId": { + "target": "com.amazonaws.quicksight#TopicId", + "traits": { + "smithy.api#documentation": "

The ID for the topic that contains the reviewed answer that you want to list. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

" + } + }, + "TopicArn": { + "target": "com.amazonaws.quicksight#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the topic.

" + } + }, + "Answers": { + "target": "com.amazonaws.quicksight#TopicReviewedAnswers", + "traits": { + "smithy.api#documentation": "

The definition of all Answers in the topic.

" + } + }, + "Status": { + "target": "com.amazonaws.quicksight#StatusCode", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The HTTP status of the request.

", + "smithy.api#httpResponseCode": {} + } + }, + "RequestId": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services request ID for this operation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.quicksight#ListTopics": { "type": "operation", "input": { @@ -33118,6 +34076,20 @@ "target": "com.amazonaws.quicksight#NamedEntityDefinition" } }, + "com.amazonaws.quicksight#NamedEntityRef": { + "type": "structure", + "members": { + "NamedEntityName": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The NamedEntityName for the NamedEntityRef.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a NamedEntityRef.

" + } + }, "com.amazonaws.quicksight#NamedFilterAggType": { "type": "enum", "members": { @@ -33629,6 +34601,29 @@ } } }, + "com.amazonaws.quicksight#NullFilterOption": { + "type": "enum", + "members": { + "ALL_VALUES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALL_VALUES" + } + }, + "NON_NULLS_ONLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NON_NULLS_ONLY" + } + }, + "NULLS_ONLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NULLS_ONLY" + } + } + } + }, "com.amazonaws.quicksight#NullString": { "type": "string", "traits": { @@ -34160,6 +35155,18 @@ } } }, + "com.amazonaws.quicksight#OperandList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#Identifier" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 25 + } + } + }, "com.amazonaws.quicksight#OptionalPort": { "type": "integer", "traits": { @@ -36753,6 +37760,12 @@ "type": "service", "version": "2018-04-01", "operations": [ + { + "target": "com.amazonaws.quicksight#BatchCreateTopicReviewedAnswer" + }, + { + "target": "com.amazonaws.quicksight#BatchDeleteTopicReviewedAnswer" + }, { "target": "com.amazonaws.quicksight#CancelIngestion" }, @@ -37119,6 +38132,9 @@ { "target": "com.amazonaws.quicksight#ListTopicRefreshSchedules" }, + { + "target": "com.amazonaws.quicksight#ListTopicReviewedAnswers" + }, { "target": "com.amazonaws.quicksight#ListTopics" }, @@ -39957,6 +40973,53 @@ "smithy.api#pattern": "^[\\w\\-]+$" } }, + "com.amazonaws.quicksight#ReviewedAnswerErrorCode": { + "type": "enum", + "members": { + "INTERNAL_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNAL_ERROR" + } + }, + "MISSING_ANSWER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MISSING_ANSWER" + } + }, + "DATASET_DOES_NOT_EXIST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATASET_DOES_NOT_EXIST" + } + }, + "INVALID_DATASET_ARN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_DATASET_ARN" + } + }, + "DUPLICATED_ANSWER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DUPLICATED_ANSWER" + } + }, + "INVALID_DATA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_DATA" + } + }, + "MISSING_REQUIRED_FIELDS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MISSING_REQUIRED_FIELDS" + } + } + } + }, "com.amazonaws.quicksight#Role": { "type": "enum", "members": { @@ -42912,6 +43975,32 @@ "smithy.api#documentation": "

The display options of a control.

" } }, + "com.amazonaws.quicksight#Slot": { + "type": "structure", + "members": { + "SlotId": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The slot ID of the slot.

" + } + }, + "VisualId": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The visual ID for the slot.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for the slot.

" + } + }, + "com.amazonaws.quicksight#Slots": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#Slot" + } + }, "com.amazonaws.quicksight#SmallMultiplesAxisPlacement": { "type": "enum", "members": { @@ -44463,6 +45552,26 @@ "smithy.api#documentation": "

The subtotal options.

" } }, + "com.amazonaws.quicksight#SucceededTopicReviewedAnswer": { + "type": "structure", + "members": { + "AnswerId": { + "target": "com.amazonaws.quicksight#AnswerId", + "traits": { + "smithy.api#documentation": "

The answer ID for the SucceededTopicReviewedAnswer.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a SucceededTopicReviewedAnswer.

" + } + }, + "com.amazonaws.quicksight#SucceededTopicReviewedAnswers": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#SucceededTopicReviewedAnswer" + } + }, "com.amazonaws.quicksight#SuccessfulKeyRegistrationEntries": { "type": "list", "member": { @@ -47472,6 +48581,44 @@ "target": "com.amazonaws.quicksight#TopicColumn" } }, + "com.amazonaws.quicksight#TopicConstantValue": { + "type": "structure", + "members": { + "ConstantType": { + "target": "com.amazonaws.quicksight#ConstantType", + "traits": { + "smithy.api#documentation": "

The constant type of a TopicConstantValue.

" + } + }, + "Value": { + "target": "com.amazonaws.quicksight#ConstantValueString", + "traits": { + "smithy.api#documentation": "

The value of the TopicConstantValue.

" + } + }, + "Minimum": { + "target": "com.amazonaws.quicksight#ConstantValueString", + "traits": { + "smithy.api#documentation": "

The minimum for the TopicConstantValue.

" + } + }, + "Maximum": { + "target": "com.amazonaws.quicksight#ConstantValueString", + "traits": { + "smithy.api#documentation": "

The maximum for the TopicConstantValue.

" + } + }, + "ValueList": { + "target": "com.amazonaws.quicksight#CollectiveConstantEntryList", + "traits": { + "smithy.api#documentation": "

The value list of the TopicConstantValue.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a TopicConstantValue.

" + } + }, "com.amazonaws.quicksight#TopicDateRangeFilter": { "type": "structure", "members": { @@ -47607,6 +48754,499 @@ "target": "com.amazonaws.quicksight#TopicFilter" } }, + "com.amazonaws.quicksight#TopicIR": { + "type": "structure", + "members": { + "Metrics": { + "target": "com.amazonaws.quicksight#TopicIRMetricList", + "traits": { + "smithy.api#documentation": "

The metrics for the TopicIR.

" + } + }, + "GroupByList": { + "target": "com.amazonaws.quicksight#TopicIRGroupByList", + "traits": { + "smithy.api#documentation": "

The GroupBy list for the TopicIR.

" + } + }, + "Filters": { + "target": "com.amazonaws.quicksight#TopicIRFilterList", + "traits": { + "smithy.api#documentation": "

The filters for the TopicIR.

" + } + }, + "Sort": { + "target": "com.amazonaws.quicksight#TopicSortClause", + "traits": { + "smithy.api#documentation": "

The sort for the TopicIR.

" + } + }, + "ContributionAnalysis": { + "target": "com.amazonaws.quicksight#TopicIRContributionAnalysis", + "traits": { + "smithy.api#documentation": "

The contribution analysis for the TopicIR.

" + } + }, + "Visual": { + "target": "com.amazonaws.quicksight#VisualOptions", + "traits": { + "smithy.api#documentation": "

The visual for the TopicIR.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a TopicIR.

" + } + }, + "com.amazonaws.quicksight#TopicIRComparisonMethod": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.quicksight#ComparisonMethodType", + "traits": { + "smithy.api#documentation": "

The type for the TopicIRComparisonMethod.

" + } + }, + "Period": { + "target": "com.amazonaws.quicksight#TopicTimeGranularity", + "traits": { + "smithy.api#documentation": "

The period for the TopicIRComparisonMethod.

" + } + }, + "WindowSize": { + "target": "com.amazonaws.quicksight#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The window size for the TopicIRComparisonMethod.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition of a TopicIRComparisonMethod.

" + } + }, + "com.amazonaws.quicksight#TopicIRContributionAnalysis": { + "type": "structure", + "members": { + "Factors": { + "target": "com.amazonaws.quicksight#ContributionAnalysisFactorsList", + "traits": { + "smithy.api#documentation": "

The factors for a TopicIRContributionAnalysis.

" + } + }, + "TimeRanges": { + "target": "com.amazonaws.quicksight#ContributionAnalysisTimeRanges", + "traits": { + "smithy.api#documentation": "

The time ranges for the TopicIRContributionAnalysis.

" + } + }, + "Direction": { + "target": "com.amazonaws.quicksight#ContributionAnalysisDirection", + "traits": { + "smithy.api#documentation": "

The direction for the TopicIRContributionAnalysis.

" + } + }, + "SortType": { + "target": "com.amazonaws.quicksight#ContributionAnalysisSortType", + "traits": { + "smithy.api#documentation": "

The sort type for the TopicIRContributionAnalysis.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a TopicIRContributionAnalysis.

" + } + }, + "com.amazonaws.quicksight#TopicIRFilterEntry": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#TopicIRFilterOption" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2000 + } + } + }, + "com.amazonaws.quicksight#TopicIRFilterFunction": { + "type": "enum", + "members": { + "CONTAINS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONTAINS" + } + }, + "EXACT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXACT" + } + }, + "STARTS_WITH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STARTS_WITH" + } + }, + "ENDS_WITH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENDS_WITH" + } + }, + "CONTAINS_STRING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONTAINS_STRING" + } + }, + "PREVIOUS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PREVIOUS" + } + }, + "THIS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "THIS" + } + }, + "LAST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LAST" + } + }, + "NEXT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NEXT" + } + }, + "NOW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOW" + } + } + } + }, + "com.amazonaws.quicksight#TopicIRFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#TopicIRFilterEntry" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2000 + } + } + }, + "com.amazonaws.quicksight#TopicIRFilterOption": { + "type": "structure", + "members": { + "FilterType": { + "target": "com.amazonaws.quicksight#TopicIRFilterType", + "traits": { + "smithy.api#documentation": "

The filter type for the TopicIRFilterOption.

" + } + }, + "FilterClass": { + "target": "com.amazonaws.quicksight#FilterClass", + "traits": { + "smithy.api#documentation": "

The filter class for the TopicIRFilterOption.

" + } + }, + "OperandField": { + "target": "com.amazonaws.quicksight#Identifier", + "traits": { + "smithy.api#documentation": "

The operand field for the TopicIRFilterOption.

" + } + }, + "Function": { + "target": "com.amazonaws.quicksight#TopicIRFilterFunction", + "traits": { + "smithy.api#documentation": "

The function for the TopicIRFilterOption.

" + } + }, + "Constant": { + "target": "com.amazonaws.quicksight#TopicConstantValue", + "traits": { + "smithy.api#documentation": "

The constant for the TopicIRFilterOption.

" + } + }, + "Inverse": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

The inverse for the TopicIRFilterOption.

" + } + }, + "NullFilter": { + "target": "com.amazonaws.quicksight#NullFilterOption", + "traits": { + "smithy.api#documentation": "

The null filter for the TopicIRFilterOption.

" + } + }, + "Aggregation": { + "target": "com.amazonaws.quicksight#AggType", + "traits": { + "smithy.api#documentation": "

The aggregation for the TopicIRFilterOption.

" + } + }, + "AggregationFunctionParameters": { + "target": "com.amazonaws.quicksight#AggFunctionParamMap", + "traits": { + "smithy.api#documentation": "

The aggregation function parameters for the TopicIRFilterOption.

" + } + }, + "AggregationPartitionBy": { + "target": "com.amazonaws.quicksight#AggregationPartitionByList", + "traits": { + "smithy.api#documentation": "

The AggregationPartitionBy for the TopicIRFilterOption.

" + } + }, + "Range": { + "target": "com.amazonaws.quicksight#TopicConstantValue", + "traits": { + "smithy.api#documentation": "

The range for the TopicIRFilterOption.

" + } + }, + "Inclusive": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

The inclusive for the TopicIRFilterOption.

" + } + }, + "TimeGranularity": { + "target": "com.amazonaws.quicksight#TimeGranularity", + "traits": { + "smithy.api#documentation": "

The time granularity for the TopicIRFilterOption.

" + } + }, + "LastNextOffset": { + "target": "com.amazonaws.quicksight#TopicConstantValue", + "traits": { + "smithy.api#documentation": "

The last next offset for the TopicIRFilterOption.

" + } + }, + "AggMetrics": { + "target": "com.amazonaws.quicksight#FilterAggMetricsList", + "traits": { + "smithy.api#documentation": "

The agg metrics for the TopicIRFilterOption.

" + } + }, + "TopBottomLimit": { + "target": "com.amazonaws.quicksight#TopicConstantValue", + "traits": { + "smithy.api#documentation": "

The TopBottomLimit for the TopicIRFilterOption.

" + } + }, + "SortDirection": { + "target": "com.amazonaws.quicksight#TopicSortDirection", + "traits": { + "smithy.api#documentation": "

The sort direction for the TopicIRFilterOption.

" + } + }, + "Anchor": { + "target": "com.amazonaws.quicksight#Anchor", + "traits": { + "smithy.api#documentation": "

The anchor for the TopicIRFilterOption.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a TopicIRFilterOption.

" + } + }, + "com.amazonaws.quicksight#TopicIRFilterType": { + "type": "enum", + "members": { + "CATEGORY_FILTER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CATEGORY_FILTER" + } + }, + "NUMERIC_EQUALITY_FILTER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NUMERIC_EQUALITY_FILTER" + } + }, + "NUMERIC_RANGE_FILTER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NUMERIC_RANGE_FILTER" + } + }, + "DATE_RANGE_FILTER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATE_RANGE_FILTER" + } + }, + "RELATIVE_DATE_FILTER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RELATIVE_DATE_FILTER" + } + }, + "TOP_BOTTOM_FILTER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TOP_BOTTOM_FILTER" + } + }, + "EQUALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQUALS" + } + }, + "RANK_LIMIT_FILTER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RANK_LIMIT_FILTER" + } + }, + "ACCEPT_ALL_FILTER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACCEPT_ALL_FILTER" + } + } + } + }, + "com.amazonaws.quicksight#TopicIRGroupBy": { + "type": "structure", + "members": { + "FieldName": { + "target": "com.amazonaws.quicksight#Identifier", + "traits": { + "smithy.api#documentation": "

The field name for the TopicIRGroupBy.

" + } + }, + "TimeGranularity": { + "target": "com.amazonaws.quicksight#TopicTimeGranularity", + "traits": { + "smithy.api#documentation": "

The time granularity for the TopicIRGroupBy.

" + } + }, + "Sort": { + "target": "com.amazonaws.quicksight#TopicSortClause", + "traits": { + "smithy.api#documentation": "

The sort for the TopicIRGroupBy.

" + } + }, + "DisplayFormat": { + "target": "com.amazonaws.quicksight#DisplayFormat", + "traits": { + "smithy.api#documentation": "

The display format for the TopicIRGroupBy.

" + } + }, + "DisplayFormatOptions": { + "target": "com.amazonaws.quicksight#DisplayFormatOptions" + }, + "NamedEntity": { + "target": "com.amazonaws.quicksight#NamedEntityRef", + "traits": { + "smithy.api#documentation": "

The named entity for the TopicIRGroupBy.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a TopicIRGroupBy.

" + } + }, + "com.amazonaws.quicksight#TopicIRGroupByList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#TopicIRGroupBy" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2000 + } + } + }, + "com.amazonaws.quicksight#TopicIRMetric": { + "type": "structure", + "members": { + "MetricId": { + "target": "com.amazonaws.quicksight#Identifier", + "traits": { + "smithy.api#documentation": "

The metric ID for the TopicIRMetric.

" + } + }, + "Function": { + "target": "com.amazonaws.quicksight#AggFunction", + "traits": { + "smithy.api#documentation": "

The function for the TopicIRMetric.

" + } + }, + "Operands": { + "target": "com.amazonaws.quicksight#OperandList", + "traits": { + "smithy.api#documentation": "

The operands for the TopicIRMetric.

" + } + }, + "ComparisonMethod": { + "target": "com.amazonaws.quicksight#TopicIRComparisonMethod", + "traits": { + "smithy.api#documentation": "

The comparison method for the TopicIRMetric.

" + } + }, + "Expression": { + "target": "com.amazonaws.quicksight#Expression", + "traits": { + "smithy.api#documentation": "

The expression for the TopicIRMetric.

" + } + }, + "CalculatedFieldReferences": { + "target": "com.amazonaws.quicksight#CalculatedFieldReferenceList", + "traits": { + "smithy.api#documentation": "

The calculated field references for the TopicIRMetric.

" + } + }, + "DisplayFormat": { + "target": "com.amazonaws.quicksight#DisplayFormat", + "traits": { + "smithy.api#documentation": "

The display format for the TopicIRMetric.

" + } + }, + "DisplayFormatOptions": { + "target": "com.amazonaws.quicksight#DisplayFormatOptions" + }, + "NamedEntity": { + "target": "com.amazonaws.quicksight#NamedEntityRef", + "traits": { + "smithy.api#documentation": "

The named entity for the TopicIRMetric.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a TopicIRMetric.

" + } + }, + "com.amazonaws.quicksight#TopicIRMetricList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#TopicIRMetric" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2000 + } + } + }, "com.amazonaws.quicksight#TopicId": { "type": "string", "traits": { @@ -47954,6 +49594,65 @@ } } }, + "com.amazonaws.quicksight#TopicReviewedAnswer": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.quicksight#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the reviewed answer.

" + } + }, + "AnswerId": { + "target": "com.amazonaws.quicksight#AnswerId", + "traits": { + "smithy.api#documentation": "

The answer ID of the reviewed answer.

", + "smithy.api#required": {} + } + }, + "DatasetArn": { + "target": "com.amazonaws.quicksight#Arn", + "traits": { + "smithy.api#documentation": "

The Dataset ARN for the TopicReviewedAnswer.

", + "smithy.api#required": {} + } + }, + "Question": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The question for the TopicReviewedAnswer.

", + "smithy.api#required": {} + } + }, + "Mir": { + "target": "com.amazonaws.quicksight#TopicIR", + "traits": { + "smithy.api#documentation": "

The mir for the TopicReviewedAnswer.

" + } + }, + "PrimaryVisual": { + "target": "com.amazonaws.quicksight#TopicVisual", + "traits": { + "smithy.api#documentation": "

The primary visual for the TopicReviewedAnswer.

" + } + }, + "Template": { + "target": "com.amazonaws.quicksight#TopicTemplate", + "traits": { + "smithy.api#documentation": "

The template for the TopicReviewedAnswer.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The deinition for a TopicReviewedAnswer.

" + } + }, + "com.amazonaws.quicksight#TopicReviewedAnswers": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#TopicReviewedAnswer" + } + }, "com.amazonaws.quicksight#TopicScheduleType": { "type": "enum", "members": { @@ -48004,6 +49703,43 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.quicksight#TopicSortClause": { + "type": "structure", + "members": { + "Operand": { + "target": "com.amazonaws.quicksight#Identifier", + "traits": { + "smithy.api#documentation": "

The operand for a TopicSortClause.

" + } + }, + "SortDirection": { + "target": "com.amazonaws.quicksight#TopicSortDirection", + "traits": { + "smithy.api#documentation": "

The sort direction for the TopicSortClause.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a TopicSortClause.

" + } + }, + "com.amazonaws.quicksight#TopicSortDirection": { + "type": "enum", + "members": { + "ASCENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASCENDING" + } + }, + "DESCENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DESCENDING" + } + } + } + }, "com.amazonaws.quicksight#TopicSummaries": { "type": "list", "member": { @@ -48042,6 +49778,26 @@ "smithy.api#documentation": "

A topic summary.

" } }, + "com.amazonaws.quicksight#TopicTemplate": { + "type": "structure", + "members": { + "TemplateType": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The template type for the TopicTemplate.

" + } + }, + "Slots": { + "target": "com.amazonaws.quicksight#Slots", + "traits": { + "smithy.api#documentation": "

The slots for the TopicTemplate.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a TopicTemplate.

" + } + }, "com.amazonaws.quicksight#TopicTimeGranularity": { "type": "enum", "members": { @@ -48112,6 +49868,44 @@ } } }, + "com.amazonaws.quicksight#TopicVisual": { + "type": "structure", + "members": { + "VisualId": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The visual ID for the TopicVisual.

" + } + }, + "Role": { + "target": "com.amazonaws.quicksight#VisualRole", + "traits": { + "smithy.api#documentation": "

The role for the TopicVisual.

" + } + }, + "Ir": { + "target": "com.amazonaws.quicksight#TopicIR", + "traits": { + "smithy.api#documentation": "

The ir for the TopicVisual.

" + } + }, + "SupportingVisuals": { + "target": "com.amazonaws.quicksight#TopicVisuals", + "traits": { + "smithy.api#documentation": "

The supporting visuals for the TopicVisual.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a TopicVisual.

" + } + }, + "com.amazonaws.quicksight#TopicVisuals": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#TopicVisual" + } + }, "com.amazonaws.quicksight#TotalAggregationComputation": { "type": "structure", "members": { @@ -53875,6 +55669,20 @@ "smithy.api#documentation": "

The menu options for a visual.

" } }, + "com.amazonaws.quicksight#VisualOptions": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.quicksight#LimitedString", + "traits": { + "smithy.api#documentation": "

The type for a VisualOptions.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The definition for a VisualOptions.

" + } + }, "com.amazonaws.quicksight#VisualPalette": { "type": "structure", "members": { @@ -53895,6 +55703,41 @@ "smithy.api#documentation": "

The visual display options for the visual palette.

" } }, + "com.amazonaws.quicksight#VisualRole": { + "type": "enum", + "members": { + "PRIMARY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRIMARY" + } + }, + "COMPLIMENTARY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLIMENTARY" + } + }, + "MULTI_INTENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MULTI_INTENT" + } + }, + "FALLBACK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FALLBACK" + } + }, + "FRAGMENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FRAGMENT" + } + } + } + }, "com.amazonaws.quicksight#VisualSubtitleLabelOptions": { "type": "structure", "members": { diff --git a/models/rds.json b/models/rds.json index 58ab8cf2a1..902fc849c7 100644 --- a/models/rds.json +++ b/models/rds.json @@ -4409,7 +4409,7 @@ "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether the DB cluster is publicly accessible.

\n

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint\n resolves to the private IP address from within the DB cluster's virtual private cloud\n (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access\n to the DB cluster is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB cluster doesn't permit\n it.

\n

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

\n

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

\n

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
\n

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies whether the DB cluster is publicly accessible.

\n

When the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), \n its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, \n the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

\n

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

\n

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

\n

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
\n

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
" } }, "AutoMinorVersionUpgrade": { @@ -5053,7 +5053,7 @@ "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether the DB instance is publicly accessible.

\n

When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from \n within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. \n Access to the DB instance is ultimately controlled by the security group it uses. \n That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

\n

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private.

    \n
  • \n
  • \n

    If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public.

    \n
  • \n
\n

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private.

    \n
  • \n
  • \n

    If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies whether the DB instance is publicly accessible.

\n

When the DB instance is publicly accessible and you connect from outside of the DB instance's virtual private cloud (VPC), \n its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB instance, \n the endpoint resolves to the private IP address. Access to the DB instance is ultimately controlled by the security group it uses. \n That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

\n

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private.

    \n
  • \n
  • \n

    If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public.

    \n
  • \n
\n

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private.

    \n
  • \n
  • \n

    If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public.

    \n
  • \n
" } }, "Tags": { @@ -7436,7 +7436,7 @@ "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Indicates whether the DB cluster is publicly accessible.

\n

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint\n resolves to the private IP address from within the DB cluster's virtual private cloud\n (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access\n to the DB cluster is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB cluster doesn't permit\n it.

\n

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

\n

For more information, see CreateDBCluster.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "smithy.api#documentation": "

Indicates whether the DB cluster is publicly accessible.

\n

When the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), \n its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, \n the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

\n

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

\n

For more information, see CreateDBCluster.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" } }, "AutoMinorVersionUpgrade": { @@ -9150,7 +9150,7 @@ "PubliclyAccessible": { "target": "com.amazonaws.rds#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether the DB instance is publicly accessible.

\n

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint\n resolves to the private IP address from within the DB cluster's virtual private cloud\n (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access\n to the DB cluster is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB cluster doesn't permit\n it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

For more information, see CreateDBInstance.

" + "smithy.api#documentation": "

Indicates whether the DB instance is publicly accessible.

\n

When the DB instance is publicly accessible and you connect from outside of the DB instance's virtual private cloud (VPC), \n its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB instance, \n the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

For more information, see CreateDBInstance.

" } }, "StatusInfos": { @@ -12149,7 +12149,7 @@ "DeleteAutomatedBackups": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether to remove automated backups immediately after the DB\n cluster is deleted. This parameter isn't case-sensitive. The default is to remove \n automated backups immediately after the DB cluster is deleted.

" + "smithy.api#documentation": "

Specifies whether to remove automated backups immediately after the DB\n cluster is deleted. This parameter isn't case-sensitive. The default is to remove \n automated backups immediately after the DB cluster is deleted.\n

\n \n

You must delete automated backups for Amazon RDS Multi-AZ DB clusters. For more information about managing automated backups for RDS Multi-AZ DB clusters, see Managing automated backups.

\n
" } } }, @@ -13909,7 +13909,7 @@ "Source": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

A specific source to return parameters for.

\n

Valid Values:

\n
    \n
  • \n

    \n customer\n

    \n
  • \n
  • \n

    \n engine\n

    \n
  • \n
  • \n

    \n service\n

    \n
  • \n
" + "smithy.api#documentation": "

A specific source to return parameters for.

\n

Valid Values:

\n
    \n
  • \n

    \n user\n

    \n
  • \n
  • \n

    \n engine\n

    \n
  • \n
  • \n

    \n service\n

    \n
  • \n
" } }, "Filters": { @@ -14615,7 +14615,20 @@ "outputToken": "Marker", "items": "DBEngineVersions", "pageSize": "MaxRecords" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeDBEngineVersionsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.rds#DescribeDBEngineVersionsMessage": { @@ -14840,6 +14853,21 @@ "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], + "smithy.test#smokeTests": [ + { + "id": "DescribeDBInstancesFailure", + "params": { + "DBInstanceIdentifier": "fake-id" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ], "smithy.waiters#waitable": { "DBInstanceAvailable": { "acceptors": [ @@ -17485,7 +17513,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.

", + "smithy.api#documentation": "

Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.

\n

This API follows an eventual consistency model. This means that the result of the\n DescribePendingMaintenanceActions command might not be immediately\n visible to all subsequent RDS commands. Keep this in mind when you use\n DescribePendingMaintenanceActions immediately after using a previous\n API command such as ApplyPendingMaintenanceActions.

", "smithy.api#examples": [ { "title": "To list resources with at least one pending maintenance action", @@ -22284,7 +22312,7 @@ "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether the DB instance is publicly accessible.

\n

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint\n resolves to the private IP address from within the DB cluster's virtual private cloud\n (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access\n to the DB cluster is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB cluster doesn't permit\n it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

\n PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a \n public subnet and PubliclyAccessible must be enabled for it to be publicly accessible.

\n

Changes to the PubliclyAccessible parameter are applied immediately regardless\n of the value of the ApplyImmediately parameter.

" + "smithy.api#documentation": "

Specifies whether the DB instance is publicly accessible.

\n

When the DB instance is publicly accessible and you connect from outside of the DB instance's virtual private cloud (VPC), \n its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB instance, \n the endpoint resolves to the private IP address. Access to the DB instance is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB instance doesn't permit it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

\n PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a \n public subnet and PubliclyAccessible must be enabled for it to be publicly accessible.

\n

Changes to the PubliclyAccessible parameter are applied immediately regardless\n of the value of the ApplyImmediately parameter.

" } }, "MonitoringRoleArn": { diff --git a/models/route53resolver.json b/models/route53resolver.json index 95aa1d4cc8..0ca0cefc6d 100644 --- a/models/route53resolver.json +++ b/models/route53resolver.json @@ -4788,7 +4788,20 @@ "outputToken": "NextToken", "items": "ResolverEndpoints", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListResolverEndpointsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.route53resolver#ListResolverEndpointsRequest": { diff --git a/models/sagemaker.json b/models/sagemaker.json index a02c56b9e2..09b64b049f 100644 --- a/models/sagemaker.json +++ b/models/sagemaker.json @@ -385,6 +385,51 @@ } } }, + "com.amazonaws.sagemaker#AdditionalModelChannelName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[A-Za-z0-9\\.\\-_]+$" + } + }, + "com.amazonaws.sagemaker#AdditionalModelDataSource": { + "type": "structure", + "members": { + "ChannelName": { + "target": "com.amazonaws.sagemaker#AdditionalModelChannelName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

A custom name for this AdditionalModelDataSource object.

", + "smithy.api#required": {} + } + }, + "S3DataSource": { + "target": "com.amazonaws.sagemaker#S3ModelDataSource", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Data sources that are available to your model in addition to the one that you specify for ModelDataSource when you use the CreateModel action.

" + } + }, + "com.amazonaws.sagemaker#AdditionalModelDataSources": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#AdditionalModelDataSource" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, "com.amazonaws.sagemaker#AdditionalS3DataSource": { "type": "structure", "members": { @@ -851,6 +896,26 @@ "smithy.api#documentation": "

Specifies configurations for one or more training jobs that SageMaker runs to test the\n algorithm.

" } }, + "com.amazonaws.sagemaker#AmazonQSettings": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.sagemaker#FeatureStatus", + "traits": { + "smithy.api#documentation": "

Whether Amazon Q has been enabled within the domain.

" + } + }, + "QProfileArn": { + "target": "com.amazonaws.sagemaker#QProfileArn", + "traits": { + "smithy.api#documentation": "

The ARN of the Amazon Q profile used within the domain.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A collection of settings that configure the Amazon Q experience within the domain.

" + } + }, "com.amazonaws.sagemaker#AnnotationConsolidationConfig": { "type": "structure", "members": { @@ -7396,7 +7461,7 @@ } }, "traits": { - "smithy.api#documentation": "

A Git repository that SageMaker automatically displays to users for cloning in the JupyterServer application.

" + "smithy.api#documentation": "

A Git repository that SageMaker automatically displays to users for cloning in the\n JupyterServer application.

" } }, "com.amazonaws.sagemaker#CodeRepositoryArn": { @@ -8040,6 +8105,12 @@ "smithy.api#documentation": "

Specifies the location of ML model data to deploy.

\n \n

Currently you cannot use ModelDataSource in conjunction with SageMaker\n batch transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and SageMaker\n Marketplace.

\n
" } }, + "AdditionalModelDataSources": { + "target": "com.amazonaws.sagemaker#AdditionalModelDataSources", + "traits": { + "smithy.api#documentation": "

Data sources that are available to your model in addition to the one that you specify for ModelDataSource when you use the CreateModel action.

" + } + }, "Environment": { "target": "com.amazonaws.sagemaker#EnvironmentMap", "traits": { @@ -8599,7 +8670,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a running app for the specified UserProfile. This operation is automatically\n invoked by Amazon SageMaker upon access to the associated Domain, and when new kernel\n configurations are selected by the user. A user may have multiple Apps active simultaneously.

" + "smithy.api#documentation": "

Creates a running app for the specified UserProfile. This operation is automatically\n invoked by Amazon SageMaker upon access to the associated Domain, and when new kernel\n configurations are selected by the user. A user may have multiple Apps active\n simultaneously.

" } }, "com.amazonaws.sagemaker#CreateAppImageConfig": { @@ -8687,13 +8758,13 @@ "UserProfileName": { "target": "com.amazonaws.sagemaker#UserProfileName", "traits": { - "smithy.api#documentation": "

The user profile name. If this value is not set, then SpaceName must be set.

" + "smithy.api#documentation": "

The user profile name. If this value is not set, then SpaceName must be\n set.

" } }, "SpaceName": { "target": "com.amazonaws.sagemaker#SpaceName", "traits": { - "smithy.api#documentation": "

The name of the space. If this value is not set, then UserProfileName\n must be set.

" + "smithy.api#documentation": "

The name of the space. If this value is not set, then UserProfileName must be\n set.

" } }, "AppType": { @@ -8715,13 +8786,13 @@ "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { - "smithy.api#documentation": "

Each tag consists of a key and an optional value.\n Tag keys must be unique per resource.

" + "smithy.api#documentation": "

Each tag consists of a key and an optional value. Tag keys must be unique per\n resource.

" } }, "ResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

\n \n

The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for\n the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a\n KernelGateway app, the CreateApp call fails with a request validation error.

\n
" + "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker image\n created on the instance.

\n \n

The value of InstanceType passed as part of the ResourceSpec\n in the CreateApp call overrides the value passed as part of the\n ResourceSpec configured for the user profile or the domain. If\n InstanceType is not specified in any of those three ResourceSpec\n values for a KernelGateway app, the CreateApp call fails with a\n request validation error.

\n
" } } }, @@ -9549,7 +9620,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a list\n of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC)\n configurations. Users within a domain can share notebook files and other artifacts with each\n other.

\n

\n EFS storage\n

\n

When a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks, Git\n repositories, and data files.

\n

SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to\n encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by\n default. For more control, you can specify a customer managed key. For more information, see\n Protect Data\n at Rest Using Encryption.

\n

\n VPC configuration\n

\n

All traffic between the domain and the Amazon EFS volume is through the specified VPC and\n subnets. For other traffic, you can specify the AppNetworkAccessType parameter.\n AppNetworkAccessType corresponds to the network access type that you choose\n when you onboard to the domain. The following options are available:

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic goes through a VPC managed by\n Amazon SageMaker, which allows internet access. This is the default value.

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets. Internet\n access is disabled by default. To allow internet access, you must specify a NAT\n gateway.

    \n

    When internet access is disabled, you won't be able to run a Amazon SageMaker Studio notebook\n or to train or host models unless your VPC has an interface endpoint to the SageMaker API and\n runtime or a NAT gateway and your security groups allow outbound connections.

    \n
  • \n
\n \n

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a Amazon SageMaker Studio app successfully.

\n
\n

For more information, see Connect Amazon SageMaker Studio\n Notebooks to Resources in a VPC.

" + "smithy.api#documentation": "

Creates a Domain. A domain consists of an associated Amazon Elastic File System\n volume, a list of authorized users, and a variety of security, application, policy, and\n Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files\n and other artifacts with each other.

\n

\n EFS storage\n

\n

When a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks, Git\n repositories, and data files.

\n

SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services\n KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key\n by default. For more control, you can specify a customer managed key. For more information,\n see Protect Data\n at Rest Using Encryption.

\n

\n VPC configuration\n

\n

All traffic between the domain and the Amazon EFS volume is through the specified\n VPC and subnets. For other traffic, you can specify the AppNetworkAccessType\n parameter. AppNetworkAccessType corresponds to the network access type that you\n choose when you onboard to the domain. The following options are available:

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic goes through a VPC managed by\n Amazon SageMaker, which allows internet access. This is the default value.

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets. Internet\n access is disabled by default. To allow internet access, you must specify a NAT\n gateway.

    \n

    When internet access is disabled, you won't be able to run a Amazon SageMaker\n Studio notebook or to train or host models unless your VPC has an interface endpoint to\n the SageMaker API and runtime or a NAT gateway and your security groups allow\n outbound connections.

    \n
  • \n
\n \n

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a Amazon SageMaker Studio app successfully.

\n
\n

For more information, see Connect Amazon SageMaker Studio Notebooks to Resources in a VPC.

" } }, "com.amazonaws.sagemaker#CreateDomainRequest": { @@ -9610,7 +9681,7 @@ "AppNetworkAccessType": { "target": "com.amazonaws.sagemaker#AppNetworkAccessType", "traits": { - "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by\n Amazon SageMaker, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" + "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" } }, "HomeEfsFileSystemKmsKeyId": { @@ -12427,6 +12498,123 @@ "smithy.api#output": {} } }, + "com.amazonaws.sagemaker#CreateOptimizationJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#CreateOptimizationJobRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#CreateOptimizationJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceInUse" + }, + { + "target": "com.amazonaws.sagemaker#ResourceLimitExceeded" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a job that optimizes a model for inference performance. To create the job, you\n provide the location of a source model, and you provide the settings for the optimization\n techniques that you want the job to apply. When the job completes successfully, SageMaker\n uploads the new optimized model to the output destination that you specify.

\n

For more information about how to use this action, and about the supported optimization\n techniques, see Optimize model inference with Amazon SageMaker.

" + } + }, + "com.amazonaws.sagemaker#CreateOptimizationJobRequest": { + "type": "structure", + "members": { + "OptimizationJobName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

A custom name for the new optimization job.

", + "smithy.api#required": {} + } + }, + "RoleArn": { + "target": "com.amazonaws.sagemaker#RoleArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.

\n

During model optimization, Amazon SageMaker needs your permission to:

\n
    \n
  • \n

    Read input data from an S3 bucket

    \n
  • \n
  • \n

    Write model artifacts to an S3 bucket

    \n
  • \n
  • \n

    Write logs to Amazon CloudWatch Logs

    \n
  • \n
  • \n

    Publish metrics to Amazon CloudWatch

    \n
  • \n
\n

You grant permissions for all of these tasks to an IAM role. To pass this\n role to Amazon SageMaker, the caller of this API must have the\n iam:PassRole permission. For more information, see Amazon SageMaker Roles.\n

", + "smithy.api#required": {} + } + }, + "ModelSource": { + "target": "com.amazonaws.sagemaker#OptimizationJobModelSource", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The location of the source model to optimize with an optimization job.

", + "smithy.api#required": {} + } + }, + "DeploymentInstanceType": { + "target": "com.amazonaws.sagemaker#OptimizationJobDeploymentInstanceType", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The type of instance that hosts the optimized model that you create with the optimization job.

", + "smithy.api#required": {} + } + }, + "OptimizationEnvironment": { + "target": "com.amazonaws.sagemaker#OptimizationJobEnvironmentVariables", + "traits": { + "smithy.api#documentation": "

The environment variables to set in the model container.

" + } + }, + "OptimizationConfigs": { + "target": "com.amazonaws.sagemaker#OptimizationConfigs", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Settings for each of the optimization techniques that the job applies.

", + "smithy.api#required": {} + } + }, + "OutputConfig": { + "target": "com.amazonaws.sagemaker#OptimizationJobOutputConfig", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Details for where to store the optimized model that you create with the optimization job.

", + "smithy.api#required": {} + } + }, + "StoppingCondition": { + "target": "com.amazonaws.sagemaker#StoppingCondition", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.sagemaker#TagList", + "traits": { + "smithy.api#documentation": "

A list of key-value pairs associated with the optimization job. For more information,\n see Tagging Amazon Web Services resources in the Amazon Web Services General Reference\n Guide.

" + } + }, + "VpcConfig": { + "target": "com.amazonaws.sagemaker#OptimizationVpcConfig", + "traits": { + "smithy.api#documentation": "

A VPC in Amazon VPC that your optimized model has access to.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#CreateOptimizationJobResponse": { + "type": "structure", + "members": { + "OptimizationJobArn": { + "target": "com.amazonaws.sagemaker#OptimizationJobArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the optimization job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.sagemaker#CreatePipeline": { "type": "operation", "input": { @@ -12547,7 +12735,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser,\n the user will be automatically signed in to the domain, and granted access to all of\n the Apps and files associated with the Domain's Amazon Elastic File System volume.\n This operation can only be called when the authentication mode equals IAM.\n

\n

The IAM role or user passed to this API defines the permissions to access the app. Once\n the presigned URL is created, no additional permission is required to access this URL. IAM\n authorization policies for this API are also enforced for every HTTP request and WebSocket\n frame that attempts to connect to the app.

\n

You can restrict access to this API and to the\n URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more\n information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint\n .

\n \n

The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you\n are directed to the Amazon Web Services console sign-in page.

\n
" + "smithy.api#documentation": "

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the\n user will be automatically signed in to the domain, and granted access to all of the Apps and\n files associated with the Domain's Amazon Elastic File System volume. This operation can only be\n called when the authentication mode equals IAM.

\n

The IAM role or user passed to this API defines the permissions to access\n the app. Once the presigned URL is created, no additional permission is required to access\n this URL. IAM authorization policies for this API are also enforced for every\n HTTP request and WebSocket frame that attempts to connect to the app.

\n

You can restrict access to this API and to the URL that it returns to a list of IP\n addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more\n information, see Connect to Amazon SageMaker\n Studio Through an Interface VPC Endpoint .

\n \n

The URL that you get from a call to CreatePresignedDomainUrl has a default\n timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If\n you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page.

\n
" } }, "com.amazonaws.sagemaker#CreatePresignedDomainUrlRequest": { @@ -12578,7 +12766,7 @@ "ExpiresInSeconds": { "target": "com.amazonaws.sagemaker#ExpiresInSeconds", "traits": { - "smithy.api#documentation": "

The number of seconds until the pre-signed URL expires. This value defaults to\n 300.

" + "smithy.api#documentation": "

The number of seconds until the pre-signed URL expires. This value defaults to 300.

" } }, "SpaceName": { @@ -12590,7 +12778,7 @@ "LandingUri": { "target": "com.amazonaws.sagemaker#LandingUri", "traits": { - "smithy.api#documentation": "

The landing page that the user is directed to when accessing the presigned URL. Using this value, users can access Studio or Studio Classic, even if it is not the default experience for the domain. The supported values are:

\n
    \n
  • \n

    \n studio::relative/path: Directs users to the relative path in Studio.

    \n
  • \n
  • \n

    \n app:JupyterServer:relative/path: Directs users to the relative path in the Studio Classic application.

    \n
  • \n
  • \n

    \n app:JupyterLab:relative/path: Directs users to the relative path in the JupyterLab application.

    \n
  • \n
  • \n

    \n app:RStudioServerPro:relative/path: Directs users to the relative path in the RStudio application.

    \n
  • \n
  • \n

    \n app:CodeEditor:relative/path: Directs users to the relative path in the Code Editor, based on Code-OSS, Visual Studio Code - Open Source application.

    \n
  • \n
  • \n

    \n app:Canvas:relative/path: Directs users to the relative path in the Canvas application.

    \n
  • \n
" + "smithy.api#documentation": "

The landing page that the user is directed to when accessing the presigned URL. Using this\n value, users can access Studio or Studio Classic, even if it is not the default experience for\n the domain. The supported values are:

\n
    \n
  • \n

    \n studio::relative/path: Directs users to the relative path in\n Studio.

    \n
  • \n
  • \n

    \n app:JupyterServer:relative/path: Directs users to the relative path in\n the Studio Classic application.

    \n
  • \n
  • \n

    \n app:JupyterLab:relative/path: Directs users to the relative path in the\n JupyterLab application.

    \n
  • \n
  • \n

    \n app:RStudioServerPro:relative/path: Directs users to the relative path in\n the RStudio application.

    \n
  • \n
  • \n

    \n app:CodeEditor:relative/path: Directs users to the relative path in the\n Code Editor, based on Code-OSS, Visual Studio Code - Open Source application.

    \n
  • \n
  • \n

    \n app:Canvas:relative/path: Directs users to the relative path in the\n Canvas application.

    \n
  • \n
" } } }, @@ -12956,7 +13144,7 @@ "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { - "smithy.api#documentation": "

Tags to associated with the space. Each tag consists of a key and an optional value.\n Tag keys must be unique for each resource. Tags are searchable using the\n Search API.

" + "smithy.api#documentation": "

Tags to associated with the space. Each tag consists of a key and an optional value. Tag\n keys must be unique for each resource. Tags are searchable using the Search\n API.

" } }, "SpaceSettings": { @@ -13034,7 +13222,7 @@ "target": "com.amazonaws.sagemaker#StudioLifecycleConfigContent", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The content of your Amazon SageMaker Studio Lifecycle Configuration script. This content must be base64 encoded.

", + "smithy.api#documentation": "

The content of your Amazon SageMaker Studio Lifecycle Configuration script. This\n content must be base64 encoded.

", "smithy.api#required": {} } }, @@ -13049,7 +13237,7 @@ "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { - "smithy.api#documentation": "

Tags to be associated with the Lifecycle Configuration. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.

" + "smithy.api#documentation": "

Tags to be associated with the Lifecycle Configuration. Each tag consists of a key and an\n optional value. Tag keys must be unique per resource. Tags are searchable using the Search\n API.

" } } }, @@ -13594,7 +13782,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a user profile. A user profile represents a single user within a domain, and is\n the main way to reference a \"person\" for the purposes of sharing, reporting, and other\n user-oriented features. This entity is created when a user onboards to a domain. If an\n administrator invites a person by email or imports them from IAM Identity Center, a user profile is\n automatically created. A user profile is the primary holder of settings for an individual\n user and has a reference to the user's private Amazon Elastic File System home directory.\n

" + "smithy.api#documentation": "

Creates a user profile. A user profile represents a single user within a domain, and is\n the main way to reference a \"person\" for the purposes of sharing, reporting, and other\n user-oriented features. This entity is created when a user onboards to a domain. If an\n administrator invites a person by email or imports them from IAM Identity Center, a user\n profile is automatically created. A user profile is the primary holder of settings for an\n individual user and has a reference to the user's private Amazon Elastic File System home\n directory.

" } }, "com.amazonaws.sagemaker#CreateUserProfileRequest": { @@ -13619,19 +13807,19 @@ "SingleSignOnUserIdentifier": { "target": "com.amazonaws.sagemaker#SingleSignOnUserIdentifier", "traits": { - "smithy.api#documentation": "

A specifier for the type of value specified in SingleSignOnUserValue. Currently, the only supported value is \"UserName\".\n If the Domain's AuthMode is IAM Identity Center, this field is required. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified.\n

" + "smithy.api#documentation": "

A specifier for the type of value specified in SingleSignOnUserValue. Currently, the only\n supported value is \"UserName\". If the Domain's AuthMode is IAM Identity Center, this field is\n required. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified.\n

" } }, "SingleSignOnUserValue": { "target": "com.amazonaws.sagemaker#String256", "traits": { - "smithy.api#documentation": "

The username of the associated Amazon Web Services Single Sign-On User for this UserProfile. If the Domain's AuthMode is IAM Identity Center, this field is\n required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified.\n

" + "smithy.api#documentation": "

The username of the associated Amazon Web Services Single Sign-On User for this\n UserProfile. If the Domain's AuthMode is IAM Identity Center, this field is required, and must\n match a valid username of a user in your directory. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified.

" } }, "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { - "smithy.api#documentation": "

Each tag consists of a key and an optional value.\n Tag keys must be unique per resource.

\n

Tags that you specify for the User Profile are also added to all Apps that the\n User Profile launches.

" + "smithy.api#documentation": "

Each tag consists of a key and an optional value. Tag keys must be unique per\n resource.

\n

Tags that you specify for the User Profile are also added to all Apps that the User\n Profile launches.

" } }, "UserSettings": { @@ -14634,7 +14822,7 @@ "SecurityGroups": { "target": "com.amazonaws.sagemaker#SecurityGroupIds", "traits": { - "smithy.api#documentation": "

The security group IDs for the Amazon VPC that the space uses for communication.

" + "smithy.api#documentation": "

The security group IDs for the Amazon VPC that the space uses for\n communication.

" } }, "JupyterServerAppSettings": { @@ -14655,7 +14843,7 @@ "CustomFileSystemConfigs": { "target": "com.amazonaws.sagemaker#CustomFileSystemConfigs", "traits": { - "smithy.api#documentation": "

The settings for assigning a custom file system to a domain. Permitted users can access this file system in Amazon SageMaker Studio.

" + "smithy.api#documentation": "

The settings for assigning a custom file system to a domain. Permitted users can access\n this file system in Amazon SageMaker Studio.

" } } }, @@ -14833,13 +15021,13 @@ "UserProfileName": { "target": "com.amazonaws.sagemaker#UserProfileName", "traits": { - "smithy.api#documentation": "

The user profile name. If this value is not set, then SpaceName must be set.

" + "smithy.api#documentation": "

The user profile name. If this value is not set, then SpaceName must be\n set.

" } }, "SpaceName": { "target": "com.amazonaws.sagemaker#SpaceName", "traits": { - "smithy.api#documentation": "

The name of the space. If this value is not set, then UserProfileName\n must be set.

" + "smithy.api#documentation": "

The name of the space. If this value is not set, then UserProfileName must be\n set.

" } }, "AppType": { @@ -16280,6 +16468,39 @@ "smithy.api#input": {} } }, + "com.amazonaws.sagemaker#DeleteOptimizationJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#DeleteOptimizationJobRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an optimization job.

" + } + }, + "com.amazonaws.sagemaker#DeleteOptimizationJobRequest": { + "type": "structure", + "members": { + "OptimizationJobName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name that you assigned to the optimization job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.sagemaker#DeletePipeline": { "type": "operation", "input": { @@ -16433,7 +16654,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the Amazon SageMaker Studio Lifecycle Configuration. In order to delete the Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You must also remove the Lifecycle Configuration from UserSettings in all Domains and UserProfiles.

" + "smithy.api#documentation": "

Deletes the Amazon SageMaker Studio Lifecycle Configuration. In order to delete the\n Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You\n must also remove the Lifecycle Configuration from UserSettings in all Domains and\n UserProfiles.

" } }, "com.amazonaws.sagemaker#DeleteStudioLifecycleConfigRequest": { @@ -16606,7 +16827,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a user profile. When a user profile is deleted, the user loses access to their EFS\n volume, including data, notebooks, and other artifacts.

" + "smithy.api#documentation": "

Deletes a user profile. When a user profile is deleted, the user loses access to their EFS\n volume, including data, notebooks, and other artifacts.

" } }, "com.amazonaws.sagemaker#DeleteUserProfileRequest": { @@ -17295,7 +17516,7 @@ "UserProfileName": { "target": "com.amazonaws.sagemaker#UserProfileName", "traits": { - "smithy.api#documentation": "

The user profile name. If this value is not set, then SpaceName must be set.

" + "smithy.api#documentation": "

The user profile name. If this value is not set, then SpaceName must be\n set.

" } }, "SpaceName": { @@ -17361,7 +17582,7 @@ "SpaceName": { "target": "com.amazonaws.sagemaker#SpaceName", "traits": { - "smithy.api#documentation": "

The name of the space. If this value is not set, then UserProfileName\n must be set.

" + "smithy.api#documentation": "

The name of the space. If this value is not set, then UserProfileName must be\n set.

" } }, "Status": { @@ -17379,13 +17600,13 @@ "LastUserActivityTimestamp": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The timestamp of the last user's activity. LastUserActivityTimestamp is also updated when SageMaker performs health checks without user activity. As a result, this value is set to the same value as LastHealthCheckTimestamp.

" + "smithy.api#documentation": "

The timestamp of the last user's activity. LastUserActivityTimestamp is also\n updated when SageMaker performs health checks without user activity. As a result, this\n value is set to the same value as LastHealthCheckTimestamp.

" } }, "CreationTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The creation time of the application.

\n \n

After an application has been shut down for 24 hours, SageMaker deletes all metadata for the application. To be considered an update and retain application metadata, applications must be restarted within 24 hours after the previous application has been shut down. After this time window, creation of an application is considered a new application rather than an update of the previous application.

\n
" + "smithy.api#documentation": "

The creation time of the application.

\n \n

After an application has been shut down for 24 hours, SageMaker deletes all\n metadata for the application. To be considered an update and retain application metadata,\n applications must be restarted within 24 hours after the previous application has been shut\n down. After this time window, creation of an application is considered a new application\n rather than an update of the previous application.

\n
" } }, "FailureReason": { @@ -17397,7 +17618,7 @@ "ResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" + "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker image\n created on the instance.

" } } }, @@ -18778,7 +18999,7 @@ "SingleSignOnApplicationArn": { "target": "com.amazonaws.sagemaker#SingleSignOnApplicationArn", "traits": { - "smithy.api#documentation": "

The ARN of the application managed by SageMaker in IAM Identity Center. This value is\n only returned for domains created after October 1, 2023.

" + "smithy.api#documentation": "

The ARN of the application managed by SageMaker in IAM Identity Center. This value\n is only returned for domains created after October 1, 2023.

" } }, "Status": { @@ -18832,7 +19053,7 @@ "AppNetworkAccessType": { "target": "com.amazonaws.sagemaker#AppNetworkAccessType", "traits": { - "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by\n Amazon SageMaker, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" + "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" } }, "HomeEfsFileSystemKmsKeyId": { @@ -23075,6 +23296,170 @@ "smithy.api#output": {} } }, + "com.amazonaws.sagemaker#DescribeOptimizationJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#DescribeOptimizationJobRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#DescribeOptimizationJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Provides the properties of the specified optimization job.

" + } + }, + "com.amazonaws.sagemaker#DescribeOptimizationJobRequest": { + "type": "structure", + "members": { + "OptimizationJobName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name that you assigned to the optimization job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#DescribeOptimizationJobResponse": { + "type": "structure", + "members": { + "OptimizationJobArn": { + "target": "com.amazonaws.sagemaker#OptimizationJobArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the optimization job.

", + "smithy.api#required": {} + } + }, + "OptimizationJobStatus": { + "target": "com.amazonaws.sagemaker#OptimizationJobStatus", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The current status of the optimization job.

", + "smithy.api#required": {} + } + }, + "OptimizationStartTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

The time when the optimization job started.

" + } + }, + "OptimizationEndTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

The time when the optimization job finished processing.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.sagemaker#CreationTime", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The time when you created the optimization job.

", + "smithy.api#required": {} + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.sagemaker#LastModifiedTime", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The time when the optimization job was last updated.

", + "smithy.api#required": {} + } + }, + "FailureReason": { + "target": "com.amazonaws.sagemaker#FailureReason", + "traits": { + "smithy.api#documentation": "

If the optimization job status is FAILED, the reason for the\n failure.

" + } + }, + "OptimizationJobName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name that you assigned to the optimization job.

", + "smithy.api#required": {} + } + }, + "ModelSource": { + "target": "com.amazonaws.sagemaker#OptimizationJobModelSource", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The location of the source model to optimize with an optimization job.

", + "smithy.api#required": {} + } + }, + "OptimizationEnvironment": { + "target": "com.amazonaws.sagemaker#OptimizationJobEnvironmentVariables", + "traits": { + "smithy.api#documentation": "

The environment variables to set in the model container.

" + } + }, + "DeploymentInstanceType": { + "target": "com.amazonaws.sagemaker#OptimizationJobDeploymentInstanceType", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The type of instance that hosts the optimized model that you create with the optimization job.

", + "smithy.api#required": {} + } + }, + "OptimizationConfigs": { + "target": "com.amazonaws.sagemaker#OptimizationConfigs", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Settings for each of the optimization techniques that the job applies.

", + "smithy.api#required": {} + } + }, + "OutputConfig": { + "target": "com.amazonaws.sagemaker#OptimizationJobOutputConfig", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Details for where to store the optimized model that you create with the optimization job.

", + "smithy.api#required": {} + } + }, + "OptimizationOutput": { + "target": "com.amazonaws.sagemaker#OptimizationOutput", + "traits": { + "smithy.api#documentation": "

Output values produced by an optimization job.

" + } + }, + "RoleArn": { + "target": "com.amazonaws.sagemaker#RoleArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ARN of the IAM role that you assigned to the optimization job.

", + "smithy.api#required": {} + } + }, + "StoppingCondition": { + "target": "com.amazonaws.sagemaker#StoppingCondition", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#required": {} + } + }, + "VpcConfig": { + "target": "com.amazonaws.sagemaker#OptimizationVpcConfig", + "traits": { + "smithy.api#documentation": "

A VPC in Amazon VPC that your optimized model has access to.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.sagemaker#DescribePipeline": { "type": "operation", "input": { @@ -23803,7 +24188,7 @@ "Url": { "target": "com.amazonaws.sagemaker#String1024", "traits": { - "smithy.api#documentation": "

Returns the URL of the space. If the space is created with Amazon Web Services IAM Identity Center (Successor to Amazon Web Services Single Sign-On) authentication, users can navigate to the URL after appending the respective redirect parameter for the application type to be federated through Amazon Web Services IAM Identity Center.

\n

The following application types are supported:

\n
    \n
  • \n

    Studio Classic: &redirect=JupyterServer\n

    \n
  • \n
  • \n

    JupyterLab: &redirect=JupyterLab\n

    \n
  • \n
  • \n

    Code Editor, based on Code-OSS, Visual Studio Code - Open Source: &redirect=CodeEditor\n

    \n
  • \n
" + "smithy.api#documentation": "

Returns the URL of the space. If the space is created with Amazon Web Services IAM Identity\n Center (Successor to Amazon Web Services Single Sign-On) authentication, users can navigate to\n the URL after appending the respective redirect parameter for the application type to be\n federated through Amazon Web Services IAM Identity Center.

\n

The following application types are supported:

\n
    \n
  • \n

    Studio Classic: &redirect=JupyterServer\n

    \n
  • \n
  • \n

    JupyterLab: &redirect=JupyterLab\n

    \n
  • \n
  • \n

    Code Editor, based on Code-OSS, Visual Studio Code - Open Source:\n &redirect=CodeEditor\n

    \n
  • \n
" } } }, @@ -23856,7 +24241,7 @@ "StudioLifecycleConfigName": { "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { - "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration that is described.

" + "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration that is\n described.

" } }, "CreationTime": { @@ -23868,7 +24253,7 @@ "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle Configurations are immutable.

" + "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle\n Configurations are immutable.

" } }, "StudioLifecycleConfigContent": { @@ -25672,7 +26057,7 @@ "VpcOnlyTrustedAccounts": { "target": "com.amazonaws.sagemaker#VpcOnlyTrustedAccounts", "traits": { - "smithy.api#documentation": "

The list of Amazon Web Services accounts that are trusted when the domain is created in VPC-only mode.

" + "smithy.api#documentation": "

The list of Amazon Web Services accounts that are trusted when the domain is created in\n VPC-only mode.

" } } }, @@ -25803,7 +26188,7 @@ "SecurityGroupIds": { "target": "com.amazonaws.sagemaker#DomainSecurityGroupIds", "traits": { - "smithy.api#documentation": "

The security groups for the Amazon Virtual Private Cloud that the Domain uses for communication\n between Domain-level apps and user apps.

" + "smithy.api#documentation": "

The security groups for the Amazon Virtual Private Cloud that the Domain uses for\n communication between Domain-level apps and user apps.

" } }, "RStudioServerProDomainSettings": { @@ -25815,7 +26200,7 @@ "ExecutionRoleIdentityConfig": { "target": "com.amazonaws.sagemaker#ExecutionRoleIdentityConfig", "traits": { - "smithy.api#documentation": "

The configuration for attaching a SageMaker user profile name to the execution role as a sts:SourceIdentity key.

" + "smithy.api#documentation": "

The configuration for attaching a SageMaker user profile name to the execution\n role as a sts:SourceIdentity key.

" } }, "DockerSettings": { @@ -25823,6 +26208,12 @@ "traits": { "smithy.api#documentation": "

A collection of settings that configure the domain's Docker interaction.

" } + }, + "AmazonQSettings": { + "target": "com.amazonaws.sagemaker#AmazonQSettings", + "traits": { + "smithy.api#documentation": "

A collection of settings that configure the Amazon Q experience within the domain. The\n AuthMode that you use to create the domain must be SSO.

" + } } }, "traits": { @@ -25841,13 +26232,13 @@ "ExecutionRoleIdentityConfig": { "target": "com.amazonaws.sagemaker#ExecutionRoleIdentityConfig", "traits": { - "smithy.api#documentation": "

The configuration for attaching a SageMaker user profile name to the execution role as a sts:SourceIdentity key. This configuration can only be modified if there are no\n apps in the InService or Pending state.

" + "smithy.api#documentation": "

The configuration for attaching a SageMaker user profile name to the execution\n role as a sts:SourceIdentity key. This configuration can only be modified if there are no\n apps in the InService or Pending state.

" } }, "SecurityGroupIds": { "target": "com.amazonaws.sagemaker#DomainSecurityGroupIds", "traits": { - "smithy.api#documentation": "

The security groups for the Amazon Virtual Private Cloud that the Domain uses for communication\n between Domain-level apps and user apps.

" + "smithy.api#documentation": "

The security groups for the Amazon Virtual Private Cloud that the Domain uses for\n communication between Domain-level apps and user apps.

" } }, "DockerSettings": { @@ -25855,6 +26246,12 @@ "traits": { "smithy.api#documentation": "

A collection of settings that configure the domain's Docker interaction.

" } + }, + "AmazonQSettings": { + "target": "com.amazonaws.sagemaker#AmazonQSettings", + "traits": { + "smithy.api#documentation": "

A collection of settings that configure the Amazon Q experience within the domain.

" + } } }, "traits": { @@ -34710,19 +35107,19 @@ "DefaultResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the LifecycleConfigArns parameter, then this parameter is also required.

" + "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the\n LifecycleConfigArns parameter, then this parameter is also required.

" } }, "LifecycleConfigArns": { "target": "com.amazonaws.sagemaker#LifecycleConfigArns", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the DefaultResourceSpec parameter is also required.

\n \n

To remove a Lifecycle Config, you must set LifecycleConfigArns to an empty list.

\n
" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the\n JupyterServerApp. If you use this parameter, the DefaultResourceSpec parameter is\n also required.

\n \n

To remove a Lifecycle Config, you must set LifecycleConfigArns to an empty\n list.

\n
" } }, "CodeRepositories": { "target": "com.amazonaws.sagemaker#CodeRepositories", "traits": { - "smithy.api#documentation": "

A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application.

" + "smithy.api#documentation": "

A list of Git repositories that SageMaker automatically displays to users for\n cloning in the JupyterServer application.

" } } }, @@ -34769,19 +35166,19 @@ "DefaultResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.

\n \n

The Amazon SageMaker Studio UI does not use the default instance type value set here. The\n default instance type set here is used when Apps are created using the CLI or\n CloudFormation and the instance type parameter value is not passed.

\n
" + "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.

\n \n

The Amazon SageMaker Studio UI does not use the default instance type value set\n here. The default instance type set here is used when Apps are created using the CLI or CloudFormation and the instance type parameter value is not\n passed.

\n
" } }, "CustomImages": { "target": "com.amazonaws.sagemaker#CustomImages", "traits": { - "smithy.api#documentation": "

A list of custom SageMaker images that are configured to run as a KernelGateway app.

" + "smithy.api#documentation": "

A list of custom SageMaker images that are configured to run as a KernelGateway\n app.

" } }, "LifecycleConfigArns": { "target": "com.amazonaws.sagemaker#LifecycleConfigArns", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.

\n \n

To remove a Lifecycle Config, you must set LifecycleConfigArns to an empty list.

\n
" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user\n profile or domain.

\n \n

To remove a Lifecycle Config, you must set LifecycleConfigArns to an empty\n list.

\n
" } } }, @@ -35985,13 +36382,13 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the previous response was truncated, you will receive this token.\n Use it in your next request to receive the next set of results.

" + "smithy.api#documentation": "

If the previous response was truncated, you will receive this token. Use it in your next\n request to receive the next set of results.

" } }, "MaxResults": { "target": "com.amazonaws.sagemaker#MaxResults", "traits": { - "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken\n is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" + "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single\n response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken is provided in the\n response. The NextToken indicates that the user should get the next set of\n results by providing this token as a part of a subsequent call. The default value for\n MaxResults is 10.

" } }, "SortOrder": { @@ -36015,13 +36412,13 @@ "UserProfileNameEquals": { "target": "com.amazonaws.sagemaker#UserProfileName", "traits": { - "smithy.api#documentation": "

A parameter to search by user profile name. If SpaceNameEquals is set, then this value cannot be set.

" + "smithy.api#documentation": "

A parameter to search by user profile name. If SpaceNameEquals is set, then\n this value cannot be set.

" } }, "SpaceNameEquals": { "target": "com.amazonaws.sagemaker#SpaceName", "traits": { - "smithy.api#documentation": "

A parameter to search by space name. If UserProfileNameEquals is set,\n then this value cannot be set.

" + "smithy.api#documentation": "

A parameter to search by space name. If UserProfileNameEquals is set, then\n this value cannot be set.

" } } }, @@ -36041,7 +36438,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the previous response was truncated, you will receive this token.\n Use it in your next request to receive the next set of results.

" + "smithy.api#documentation": "

If the previous response was truncated, you will receive this token. Use it in your next\n request to receive the next set of results.

" } } }, @@ -37316,7 +37713,7 @@ "MaxResults": { "target": "com.amazonaws.sagemaker#MaxResults", "traits": { - "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken\n is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" + "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single\n response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken is provided in the\n response. The NextToken indicates that the user should get the next set of\n results by providing this token as a part of a subsequent call. The default value for\n MaxResults is 10.

" } } }, @@ -41298,6 +41695,143 @@ "smithy.api#output": {} } }, + "com.amazonaws.sagemaker#ListOptimizationJobs": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#ListOptimizationJobsRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#ListOptimizationJobsResponse" + }, + "traits": { + "smithy.api#documentation": "

Lists the optimization jobs in your account and their properties.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "OptimizationJobSummaries", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.sagemaker#ListOptimizationJobsRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

A token that you use to get the next set of results following a truncated response. If\n the response to the previous request was truncated, that response provides the value for\n this token.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.sagemaker#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of optimization jobs to return in the response. The default is\n 50.

" + } + }, + "CreationTimeAfter": { + "target": "com.amazonaws.sagemaker#CreationTime", + "traits": { + "smithy.api#documentation": "

Filters the results to only those optimization jobs that were created after the\n specified time.

" + } + }, + "CreationTimeBefore": { + "target": "com.amazonaws.sagemaker#CreationTime", + "traits": { + "smithy.api#documentation": "

Filters the results to only those optimization jobs that were created before the\n specified time.

" + } + }, + "LastModifiedTimeAfter": { + "target": "com.amazonaws.sagemaker#LastModifiedTime", + "traits": { + "smithy.api#documentation": "

Filters the results to only those optimization jobs that were updated after the\n specified time.

" + } + }, + "LastModifiedTimeBefore": { + "target": "com.amazonaws.sagemaker#LastModifiedTime", + "traits": { + "smithy.api#documentation": "

Filters the results to only those optimization jobs that were updated before the\n specified time.

" + } + }, + "OptimizationContains": { + "target": "com.amazonaws.sagemaker#NameContains", + "traits": { + "smithy.api#documentation": "

Filters the results to only those optimization jobs that apply the specified\n optimization techniques. You can specify either Quantization or\n Compilation.

" + } + }, + "NameContains": { + "target": "com.amazonaws.sagemaker#NameContains", + "traits": { + "smithy.api#documentation": "

Filters the results to only those optimization jobs with a name that contains the\n specified string.

" + } + }, + "StatusEquals": { + "target": "com.amazonaws.sagemaker#OptimizationJobStatus", + "traits": { + "smithy.api#documentation": "

Filters the results to only those optimization jobs with the specified status.

" + } + }, + "SortBy": { + "target": "com.amazonaws.sagemaker#ListOptimizationJobsSortBy", + "traits": { + "smithy.api#documentation": "

The field by which to sort the optimization jobs in the response. The default is\n CreationTime\n

" + } + }, + "SortOrder": { + "target": "com.amazonaws.sagemaker#SortOrder", + "traits": { + "smithy.api#documentation": "

The sort order for results. The default is Ascending\n

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#ListOptimizationJobsResponse": { + "type": "structure", + "members": { + "OptimizationJobSummaries": { + "target": "com.amazonaws.sagemaker#OptimizationJobSummaries", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

A list of optimization jobs and their properties that matches any of the filters you\n specified in the request.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

The token to use in a subsequent request to get the next set of results following a\n truncated response.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.sagemaker#ListOptimizationJobsSortBy": { + "type": "enum", + "members": { + "NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Name" + } + }, + "CREATION_TIME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CreationTime" + } + }, + "STATUS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Status" + } + } + } + }, "com.amazonaws.sagemaker#ListPipelineExecutionSteps": { "type": "operation", "input": { @@ -41936,13 +42470,13 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the previous response was truncated, you will receive this token. Use it in your\n next request to receive the next set of results.

" + "smithy.api#documentation": "

If the previous response was truncated, you will receive this token. Use it in your next\n request to receive the next set of results.

" } }, "MaxResults": { "target": "com.amazonaws.sagemaker#MaxResults", "traits": { - "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken\n is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" + "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single\n response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken is provided in the\n response. The NextToken indicates that the user should get the next set of\n results by providing this token as a part of a subsequent call. The default value for\n MaxResults is 10.

" } }, "SortOrder": { @@ -41986,7 +42520,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the previous response was truncated, you will receive this token. Use it in your\n next request to receive the next set of results.

" + "smithy.api#documentation": "

If the previous response was truncated, you will receive this token. Use it in your next\n request to receive the next set of results.

" } } }, @@ -42090,7 +42624,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the Amazon SageMaker Studio Lifecycle Configurations in your Amazon Web Services Account.

", + "smithy.api#documentation": "

Lists the Amazon SageMaker Studio Lifecycle Configurations in your Amazon Web Services\n Account.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -42105,49 +42639,49 @@ "MaxResults": { "target": "com.amazonaws.sagemaker#MaxResults", "traits": { - "smithy.api#documentation": "

The total number of items to return in the response. If the total\n number of items available is more than the value specified, a NextToken\n is provided in the response. To resume pagination, provide the NextToken\n value in the as part of a subsequent call. The default value is 10.

" + "smithy.api#documentation": "

The total number of items to return in the response. If the total number of items\n available is more than the value specified, a NextToken is provided in the\n response. To resume pagination, provide the NextToken value in the as part of a\n subsequent call. The default value is 10.

" } }, "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the previous call to ListStudioLifecycleConfigs didn't return the full set of Lifecycle Configurations, the call returns a token for getting the next set of Lifecycle Configurations.

" + "smithy.api#documentation": "

If the previous call to ListStudioLifecycleConfigs didn't return the full set of Lifecycle\n Configurations, the call returns a token for getting the next set of Lifecycle\n Configurations.

" } }, "NameContains": { "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { - "smithy.api#documentation": "

A string in the Lifecycle Configuration name. This filter returns only Lifecycle Configurations whose name contains the specified string.

" + "smithy.api#documentation": "

A string in the Lifecycle Configuration name. This filter returns only Lifecycle\n Configurations whose name contains the specified string.

" } }, "AppTypeEquals": { "target": "com.amazonaws.sagemaker#StudioLifecycleConfigAppType", "traits": { - "smithy.api#documentation": "

A parameter to search for the App Type to which the Lifecycle Configuration is attached.

" + "smithy.api#documentation": "

A parameter to search for the App Type to which the Lifecycle Configuration is\n attached.

" } }, "CreationTimeBefore": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations created on or before the specified time.

" + "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations created on or before the specified\n time.

" } }, "CreationTimeAfter": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations created on or after the specified time.

" + "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations created on or after the specified\n time.

" } }, "ModifiedTimeBefore": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations modified before the specified time.

" + "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations modified before the specified\n time.

" } }, "ModifiedTimeAfter": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations modified after the specified time.

" + "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations modified after the specified\n time.

" } }, "SortBy": { @@ -42173,7 +42707,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the previous response was truncated, you will receive this token.\n Use it in your next request to receive the next set of results.

" + "smithy.api#documentation": "

If the previous response was truncated, you will receive this token. Use it in your next\n request to receive the next set of results.

" } }, "StudioLifecycleConfigs": { @@ -42874,13 +43408,13 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the previous response was truncated, you will receive this token.\n Use it in your next request to receive the next set of results.

" + "smithy.api#documentation": "

If the previous response was truncated, you will receive this token. Use it in your next\n request to receive the next set of results.

" } }, "MaxResults": { "target": "com.amazonaws.sagemaker#MaxResults", "traits": { - "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken\n is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" + "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single\n response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken is provided in the\n response. The NextToken indicates that the user should get the next set of\n results by providing this token as a part of a subsequent call. The default value for\n MaxResults is 10.

" } }, "SortOrder": { @@ -42924,7 +43458,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the previous response was truncated, you will receive this token.\n Use it in your next request to receive the next set of results.

" + "smithy.api#documentation": "

If the previous response was truncated, you will receive this token. Use it in your next\n request to receive the next set of results.

" } } }, @@ -44517,6 +45051,26 @@ "smithy.api#documentation": "

Configures the timeout and maximum number of retries for processing a transform job\n invocation.

" } }, + "com.amazonaws.sagemaker#ModelCompilationConfig": { + "type": "structure", + "members": { + "Image": { + "target": "com.amazonaws.sagemaker#OptimizationContainerImage", + "traits": { + "smithy.api#documentation": "

The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization.

" + } + }, + "OverrideEnvironment": { + "target": "com.amazonaws.sagemaker#OptimizationJobEnvironmentVariables", + "traits": { + "smithy.api#documentation": "

Environment variables that override the default ones in the model container.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for the model compilation technique that's applied by a model optimization job.

" + } + }, "com.amazonaws.sagemaker#ModelCompressionType": { "type": "enum", "members": { @@ -46149,6 +46703,26 @@ "smithy.api#documentation": "

The input for the model quality monitoring job. Currently endpoints are supported for\n input for model quality monitoring jobs.

" } }, + "com.amazonaws.sagemaker#ModelQuantizationConfig": { + "type": "structure", + "members": { + "Image": { + "target": "com.amazonaws.sagemaker#OptimizationContainerImage", + "traits": { + "smithy.api#documentation": "

The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization.

" + } + }, + "OverrideEnvironment": { + "target": "com.amazonaws.sagemaker#OptimizationJobEnvironmentVariables", + "traits": { + "smithy.api#documentation": "

Environment variables that override the default ones in the model container.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for the model quantization technique that's applied by a model optimization job.

" + } + }, "com.amazonaws.sagemaker#ModelRegisterSettings": { "type": "structure", "members": { @@ -48680,6 +49254,521 @@ } } }, + "com.amazonaws.sagemaker#OptimizationConfig": { + "type": "union", + "members": { + "ModelQuantizationConfig": { + "target": "com.amazonaws.sagemaker#ModelQuantizationConfig", + "traits": { + "smithy.api#documentation": "

Settings for the model quantization technique that's applied by a model optimization job.

" + } + }, + "ModelCompilationConfig": { + "target": "com.amazonaws.sagemaker#ModelCompilationConfig", + "traits": { + "smithy.api#documentation": "

Settings for the model compilation technique that's applied by a model optimization job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for an optimization technique that you apply with a model optimization\n job.

" + } + }, + "com.amazonaws.sagemaker#OptimizationConfigs": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#OptimizationConfig" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.sagemaker#OptimizationContainerImage": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 255 + }, + "smithy.api#pattern": "^[\\S]+$" + } + }, + "com.amazonaws.sagemaker#OptimizationJobArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:optimization-job/" + } + }, + "com.amazonaws.sagemaker#OptimizationJobDeploymentInstanceType": { + "type": "enum", + "members": { + "ML_P4D_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.p4d.24xlarge" + } + }, + "ML_P4DE_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.p4de.24xlarge" + } + }, + "ML_P5_48XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.p5.48xlarge" + } + }, + "ML_G5_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.xlarge" + } + }, + "ML_G5_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.2xlarge" + } + }, + "ML_G5_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.4xlarge" + } + }, + "ML_G5_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.8xlarge" + } + }, + "ML_G5_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.12xlarge" + } + }, + "ML_G5_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.16xlarge" + } + }, + "ML_G5_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.24xlarge" + } + }, + "ML_G5_48XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.48xlarge" + } + }, + "ML_G6_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.xlarge" + } + }, + "ML_G6_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.2xlarge" + } + }, + "ML_G6_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.4xlarge" + } + }, + "ML_G6_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.8xlarge" + } + }, + "ML_G6_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.12xlarge" + } + }, + "ML_G6_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.16xlarge" + } + }, + "ML_G6_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.24xlarge" + } + }, + "ML_G6_48XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.48xlarge" + } + }, + "ML_INF2_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.inf2.xlarge" + } + }, + "ML_INF2_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.inf2.8xlarge" + } + }, + "ML_INF2_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.inf2.24xlarge" + } + }, + "ML_INF2_48XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.inf2.48xlarge" + } + }, + "ML_TRN1_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.trn1.2xlarge" + } + }, + "ML_TRN1_32XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.trn1.32xlarge" + } + }, + "ML_TRN1N_32XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.trn1n.32xlarge" + } + } + } + }, + "com.amazonaws.sagemaker#OptimizationJobEnvironmentVariables": { + "type": "map", + "key": { + "target": "com.amazonaws.sagemaker#NonEmptyString256" + }, + "value": { + "target": "com.amazonaws.sagemaker#String256" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 25 + } + } + }, + "com.amazonaws.sagemaker#OptimizationJobModelSource": { + "type": "structure", + "members": { + "S3": { + "target": "com.amazonaws.sagemaker#OptimizationJobModelSourceS3", + "traits": { + "smithy.api#documentation": "

The Amazon S3 location of a source model to optimize with an optimization job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The location of the source model to optimize with an optimization job.

" + } + }, + "com.amazonaws.sagemaker#OptimizationJobModelSourceS3": { + "type": "structure", + "members": { + "S3Uri": { + "target": "com.amazonaws.sagemaker#S3Uri", + "traits": { + "smithy.api#documentation": "

An Amazon S3 URI that locates a source model to optimize with an optimization job.

" + } + }, + "ModelAccessConfig": { + "target": "com.amazonaws.sagemaker#OptimizationModelAccessConfig", + "traits": { + "smithy.api#documentation": "

The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Amazon S3 location of a source model to optimize with an optimization job.

" + } + }, + "com.amazonaws.sagemaker#OptimizationJobOutputConfig": { + "type": "structure", + "members": { + "KmsKeyId": { + "target": "com.amazonaws.sagemaker#KmsKeyId", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a key in Amazon Web Services KMS. SageMaker uses they key to encrypt the artifacts of the\n optimized model when SageMaker uploads the model to Amazon S3.

" + } + }, + "S3OutputLocation": { + "target": "com.amazonaws.sagemaker#S3Uri", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon S3 URI for where to store the optimized model that you create with an optimization\n job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Details for where to store the optimized model that you create with the optimization job.

" + } + }, + "com.amazonaws.sagemaker#OptimizationJobStatus": { + "type": "enum", + "members": { + "INPROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INPROGRESS" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "STARTING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STARTING" + } + }, + "STOPPING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPING" + } + }, + "STOPPED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPED" + } + } + } + }, + "com.amazonaws.sagemaker#OptimizationJobSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#OptimizationJobSummary" + } + }, + "com.amazonaws.sagemaker#OptimizationJobSummary": { + "type": "structure", + "members": { + "OptimizationJobName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name that you assigned to the optimization job.

", + "smithy.api#required": {} + } + }, + "OptimizationJobArn": { + "target": "com.amazonaws.sagemaker#OptimizationJobArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the optimization job.

", + "smithy.api#required": {} + } + }, + "CreationTime": { + "target": "com.amazonaws.sagemaker#CreationTime", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The time when you created the optimization job.

", + "smithy.api#required": {} + } + }, + "OptimizationJobStatus": { + "target": "com.amazonaws.sagemaker#OptimizationJobStatus", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The current status of the optimization job.

", + "smithy.api#required": {} + } + }, + "OptimizationStartTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

The time when the optimization job started.

" + } + }, + "OptimizationEndTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

The time when the optimization job finished processing.

" + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.sagemaker#LastModifiedTime", + "traits": { + "smithy.api#documentation": "

The time when the optimization job was last updated.

" + } + }, + "DeploymentInstanceType": { + "target": "com.amazonaws.sagemaker#OptimizationJobDeploymentInstanceType", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The type of instance that hosts the optimized model that you create with the optimization job.

", + "smithy.api#required": {} + } + }, + "OptimizationTypes": { + "target": "com.amazonaws.sagemaker#OptimizationTypes", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The optimization techniques that are applied by the optimization job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Summarizes an optimization job by providing some of its key properties.

" + } + }, + "com.amazonaws.sagemaker#OptimizationModelAcceptEula": { + "type": "boolean" + }, + "com.amazonaws.sagemaker#OptimizationModelAccessConfig": { + "type": "structure", + "members": { + "AcceptEula": { + "target": "com.amazonaws.sagemaker#OptimizationModelAcceptEula", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Specifies agreement to the model end-user license agreement (EULA). The\n AcceptEula value must be explicitly defined as True in order\n to accept the EULA that this model requires. You are responsible for reviewing and\n complying with any applicable license terms and making sure they are acceptable for your\n use case before downloading or using a model.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA).

" + } + }, + "com.amazonaws.sagemaker#OptimizationOutput": { + "type": "structure", + "members": { + "RecommendedInferenceImage": { + "target": "com.amazonaws.sagemaker#OptimizationContainerImage", + "traits": { + "smithy.api#documentation": "

The image that SageMaker recommends that you use to host the optimized model that you created\n with an optimization job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Output values produced by an optimization job.

" + } + }, + "com.amazonaws.sagemaker#OptimizationType": { + "type": "string" + }, + "com.amazonaws.sagemaker#OptimizationTypes": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#OptimizationType" + } + }, + "com.amazonaws.sagemaker#OptimizationVpcConfig": { + "type": "structure", + "members": { + "SecurityGroupIds": { + "target": "com.amazonaws.sagemaker#OptimizationVpcSecurityGroupIds", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security\n groups for the VPC that is specified in the Subnets field.

", + "smithy.api#required": {} + } + }, + "Subnets": { + "target": "com.amazonaws.sagemaker#OptimizationVpcSubnets", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the subnets in the VPC to which you want to connect your optimized\n model.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A VPC in Amazon VPC that's accessible to an optimized that you create with an optimization\n job. You can control access to and from your resources by configuring a VPC. For more\n information, see Give SageMaker Access to Resources in your Amazon VPC.

" + } + }, + "com.amazonaws.sagemaker#OptimizationVpcSecurityGroupId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 32 + }, + "smithy.api#pattern": "^[-0-9a-zA-Z]+$" + } + }, + "com.amazonaws.sagemaker#OptimizationVpcSecurityGroupIds": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#OptimizationVpcSecurityGroupId" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, + "com.amazonaws.sagemaker#OptimizationVpcSubnetId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 32 + }, + "smithy.api#pattern": "^[-0-9a-zA-Z]+$" + } + }, + "com.amazonaws.sagemaker#OptimizationVpcSubnets": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#OptimizationVpcSubnetId" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 16 + } + } + }, "com.amazonaws.sagemaker#OptionalDouble": { "type": "double" }, @@ -50423,6 +51512,102 @@ "traits": { "smithy.api#enumValue": "ml.g4dn.16xlarge" } + }, + "ML_G5_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.xlarge" + } + }, + "ML_G5_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.2xlarge" + } + }, + "ML_G5_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.4xlarge" + } + }, + "ML_G5_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.8xlarge" + } + }, + "ML_G5_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.16xlarge" + } + }, + "ML_G5_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.12xlarge" + } + }, + "ML_G5_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.24xlarge" + } + }, + "ML_G5_48XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g5.48xlarge" + } + }, + "ML_R5D_LARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.large" + } + }, + "ML_R5D_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.xlarge" + } + }, + "ML_R5D_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.2xlarge" + } + }, + "ML_R5D_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.4xlarge" + } + }, + "ML_R5D_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.8xlarge" + } + }, + "ML_R5D_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.12xlarge" + } + }, + "ML_R5D_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.16xlarge" + } + }, + "ML_R5D_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.24xlarge" + } } } }, @@ -53217,6 +54402,12 @@ "smithy.api#output": {} } }, + "com.amazonaws.sagemaker#QProfileArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:[-.a-z0-9]{1,63}:codewhisperer:([-.a-z0-9]{0,63}:){2}([a-zA-Z0-9-_:/]){1,1023}$" + } + }, "com.amazonaws.sagemaker#QualityCheckStepMetadata": { "type": "structure", "members": { @@ -53509,7 +54700,7 @@ "CustomImages": { "target": "com.amazonaws.sagemaker#CustomImages", "traits": { - "smithy.api#documentation": "

A list of custom SageMaker images that are configured to run as a RSession app.

" + "smithy.api#documentation": "

A list of custom SageMaker images that are configured to run as a RSession\n app.

" } } }, @@ -53546,12 +54737,12 @@ "UserGroup": { "target": "com.amazonaws.sagemaker#RStudioServerProUserGroup", "traits": { - "smithy.api#documentation": "

The level of permissions that the user has within the RStudioServerPro\n app. This value defaults to `User`. The `Admin` value allows the user access to the\n RStudio Administrative Dashboard.

" + "smithy.api#documentation": "

The level of permissions that the user has within the RStudioServerPro app.\n This value defaults to `User`. The `Admin` value allows the user access to the RStudio\n Administrative Dashboard.

" } } }, "traits": { - "smithy.api#documentation": "

A collection of settings that configure user interaction with the\n RStudioServerPro app.

" + "smithy.api#documentation": "

A collection of settings that configure user interaction with the\n RStudioServerPro app.

" } }, "com.amazonaws.sagemaker#RStudioServerProDomainSettings": { @@ -53613,7 +54804,7 @@ } }, "traits": { - "smithy.api#documentation": "

A collection of settings that update the current configuration for the\n RStudioServerPro Domain-level app.

" + "smithy.api#documentation": "

A collection of settings that update the current configuration for the\n RStudioServerPro Domain-level app.

" } }, "com.amazonaws.sagemaker#RStudioServerProUserGroup": { @@ -55134,7 +56325,7 @@ "LifecycleConfigArn": { "target": "com.amazonaws.sagemaker#StudioLifecycleConfigArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the\n Resource.

" } } }, @@ -55889,6 +57080,9 @@ { "target": "com.amazonaws.sagemaker#CreateNotebookInstanceLifecycleConfig" }, + { + "target": "com.amazonaws.sagemaker#CreateOptimizationJob" + }, { "target": "com.amazonaws.sagemaker#CreatePipeline" }, @@ -56057,6 +57251,9 @@ { "target": "com.amazonaws.sagemaker#DeleteNotebookInstanceLifecycleConfig" }, + { + "target": "com.amazonaws.sagemaker#DeleteOptimizationJob" + }, { "target": "com.amazonaws.sagemaker#DeletePipeline" }, @@ -56231,6 +57428,9 @@ { "target": "com.amazonaws.sagemaker#DescribeNotebookInstanceLifecycleConfig" }, + { + "target": "com.amazonaws.sagemaker#DescribeOptimizationJob" + }, { "target": "com.amazonaws.sagemaker#DescribePipeline" }, @@ -56474,6 +57674,9 @@ { "target": "com.amazonaws.sagemaker#ListNotebookInstances" }, + { + "target": "com.amazonaws.sagemaker#ListOptimizationJobs" + }, { "target": "com.amazonaws.sagemaker#ListPipelineExecutions" }, @@ -56609,6 +57812,9 @@ { "target": "com.amazonaws.sagemaker#StopNotebookInstance" }, + { + "target": "com.amazonaws.sagemaker#StopOptimizationJob" + }, { "target": "com.amazonaws.sagemaker#StopPipelineExecution" }, @@ -58789,24 +59995,24 @@ "NotebookOutputOption": { "target": "com.amazonaws.sagemaker#NotebookOutputOption", "traits": { - "smithy.api#documentation": "

Whether to include the notebook cell output when sharing the notebook. The default\n is Disabled.

" + "smithy.api#documentation": "

Whether to include the notebook cell output when sharing the notebook. The default is\n Disabled.

" } }, "S3OutputPath": { "target": "com.amazonaws.sagemaker#S3Uri", "traits": { - "smithy.api#documentation": "

When NotebookOutputOption is Allowed, the Amazon S3 bucket used\n to store the shared notebook snapshots.

" + "smithy.api#documentation": "

When NotebookOutputOption is Allowed, the Amazon S3\n bucket used to store the shared notebook snapshots.

" } }, "S3KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

When NotebookOutputOption is Allowed, the Amazon Web Services Key Management Service (KMS)\n encryption key ID used to encrypt the notebook cell output in the Amazon S3 bucket.

" + "smithy.api#documentation": "

When NotebookOutputOption is Allowed, the Amazon Web Services Key\n Management Service (KMS) encryption key ID used to encrypt the notebook cell output in the\n Amazon S3 bucket.

" } } }, "traits": { - "smithy.api#documentation": "

Specifies options for sharing Amazon SageMaker Studio notebooks. These settings are\n specified as part of DefaultUserSettings when the CreateDomain\n API is called, and as part of UserSettings when the CreateUserProfile\n API is called. When SharingSettings is not specified, notebook sharing\n isn't allowed.

" + "smithy.api#documentation": "

Specifies options for sharing Amazon SageMaker Studio notebooks. These settings are\n specified as part of DefaultUserSettings when the CreateDomain API\n is called, and as part of UserSettings when the CreateUserProfile\n API is called. When SharingSettings is not specified, notebook sharing isn't\n allowed.

" } }, "com.amazonaws.sagemaker#SharingType": { @@ -60486,6 +61692,39 @@ "smithy.api#input": {} } }, + "com.amazonaws.sagemaker#StopOptimizationJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#StopOptimizationJobRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Ends a running inference optimization job.

" + } + }, + "com.amazonaws.sagemaker#StopOptimizationJobRequest": { + "type": "structure", + "members": { + "OptimizationJobName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name that you assigned to the optimization job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.sagemaker#StopPipelineExecution": { "type": "operation", "input": { @@ -60667,7 +61906,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies a limit to how long a model training job or model compilation job can run.\n It also specifies how long a managed spot training job has to complete. When the job\n reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap\n model training costs.

\n

To stop a training job, SageMaker sends the algorithm the SIGTERM signal,\n which delays job termination for 120 seconds. Algorithms can use this 120-second window\n to save the model artifacts, so the results of training are not lost.

\n

The training algorithms provided by SageMaker automatically save the intermediate results\n of a model training job when possible. This attempt to save artifacts is only a best\n effort case as model might not be in a state from which it can be saved. For example, if\n training has just started, the model might not be ready to save. When saved, this\n intermediate data is a valid model artifact. You can use it to create a model with\n CreateModel.

\n \n

The Neural Topic Model (NTM) currently does not support saving intermediate model\n artifacts. When training NTMs, make sure that the maximum runtime is sufficient for\n the training job to complete.

\n
" + "smithy.api#documentation": "

Specifies a limit to how long a job can run. When the job reaches the time limit, SageMaker\n ends the job. Use this API to cap costs.

\n

To stop a training job, SageMaker sends the algorithm the SIGTERM signal,\n which delays job termination for 120 seconds. Algorithms can use this 120-second window\n to save the model artifacts, so the results of training are not lost.

\n

The training algorithms provided by SageMaker automatically save the intermediate results\n of a model training job when possible. This attempt to save artifacts is only a best\n effort case as model might not be in a state from which it can be saved. For example, if\n training has just started, the model might not be ready to save. When saved, this\n intermediate data is a valid model artifact. You can use it to create a model with\n CreateModel.

\n \n

The Neural Topic Model (NTM) currently does not support saving intermediate model\n artifacts. When training NTMs, make sure that the maximum runtime is sufficient for\n the training job to complete.

\n
" } }, "com.amazonaws.sagemaker#StorageType": { @@ -60846,7 +62085,7 @@ "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle Configurations are immutable.

" + "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle\n Configurations are immutable.

" } }, "StudioLifecycleConfigAppType": { @@ -60928,12 +62167,12 @@ "HiddenAppTypes": { "target": "com.amazonaws.sagemaker#HiddenAppTypesList", "traits": { - "smithy.api#documentation": "

The Applications supported in Studio that are hidden from the Studio left navigation pane.

" + "smithy.api#documentation": "

The Applications supported in Studio that are hidden from the Studio left navigation\n pane.

" } } }, "traits": { - "smithy.api#documentation": "

Studio settings. If these settings are applied on a user level, they take priority over the settings applied on a domain level.

" + "smithy.api#documentation": "

Studio settings. If these settings are applied on a user level, they take priority over\n the settings applied on a domain level.

" } }, "com.amazonaws.sagemaker#SubnetId": { @@ -61689,7 +62928,7 @@ "DefaultResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" + "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the SageMaker\n image created on the instance.

" } } }, @@ -62988,6 +64227,126 @@ "traits": { "smithy.api#enumValue": "ml.c6i.32xlarge" } + }, + "ML_R5D_LARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.large" + } + }, + "ML_R5D_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.xlarge" + } + }, + "ML_R5D_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.2xlarge" + } + }, + "ML_R5D_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.4xlarge" + } + }, + "ML_R5D_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.8xlarge" + } + }, + "ML_R5D_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.12xlarge" + } + }, + "ML_R5D_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.16xlarge" + } + }, + "ML_R5D_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5d.24xlarge" + } + }, + "ML_T3_MEDIUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.t3.medium" + } + }, + "ML_T3_LARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.t3.large" + } + }, + "ML_T3_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.t3.xlarge" + } + }, + "ML_T3_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.t3.2xlarge" + } + }, + "ML_R5_LARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5.large" + } + }, + "ML_R5_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5.xlarge" + } + }, + "ML_R5_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5.2xlarge" + } + }, + "ML_R5_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5.4xlarge" + } + }, + "ML_R5_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5.8xlarge" + } + }, + "ML_R5_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5.12xlarge" + } + }, + "ML_R5_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5.16xlarge" + } + }, + "ML_R5_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r5.24xlarge" + } } } }, @@ -66201,7 +67560,7 @@ "AppSecurityGroupManagement": { "target": "com.amazonaws.sagemaker#AppSecurityGroupManagement", "traits": { - "smithy.api#documentation": "

The entity that creates and manages the required security groups for inter-app\n communication in VPCOnly mode. Required when\n CreateDomain.AppNetworkAccessType is VPCOnly and\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is\n provided. If setting up the domain for use with RStudio, this value must be set to\n Service.

" + "smithy.api#documentation": "

The entity that creates and manages the required security groups for inter-app\n communication in VPCOnly mode. Required when\n CreateDomain.AppNetworkAccessType is VPCOnly and\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is\n provided. If setting up the domain for use with RStudio, this value must be set to\n Service.

" } }, "DefaultSpaceSettings": { @@ -66213,13 +67572,13 @@ "SubnetIds": { "target": "com.amazonaws.sagemaker#Subnets", "traits": { - "smithy.api#documentation": "

The VPC subnets that Studio uses for communication.

\n

If removing subnets, ensure there are no apps in the InService,\n Pending, or Deleting state.

" + "smithy.api#documentation": "

The VPC subnets that Studio uses for communication.

\n

If removing subnets, ensure there are no apps in the InService,\n Pending, or Deleting state.

" } }, "AppNetworkAccessType": { "target": "com.amazonaws.sagemaker#AppNetworkAccessType", "traits": { - "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by\n Amazon SageMaker, which allows direct internet access.

    \n
  • \n
  • \n

    \n VpcOnly - All Studio traffic is through the specified VPC and\n subnets.

    \n
  • \n
\n

This configuration can only be modified if there are no apps in the\n InService, Pending, or Deleting state. The\n configuration cannot be updated if\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already\n set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is\n provided as part of the same request.

" + "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access.

    \n
  • \n
  • \n

    \n VpcOnly - All Studio traffic is through the specified VPC and\n subnets.

    \n
  • \n
\n

This configuration can only be modified if there are no apps in the\n InService, Pending, or Deleting state. The\n configuration cannot be updated if\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already\n set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is\n provided as part of the same request.

" } } }, @@ -68502,7 +69861,7 @@ "SecurityGroups": { "target": "com.amazonaws.sagemaker#SecurityGroupIds", "traits": { - "smithy.api#documentation": "

The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.

\n

Optional when the CreateDomain.AppNetworkAccessType parameter is set to\n PublicInternetOnly.

\n

Required when the CreateDomain.AppNetworkAccessType parameter is set to\n VpcOnly, unless specified as part of the DefaultUserSettings for the domain.

\n

Amazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the\n number of security groups that you can specify is one less than the maximum number shown.

" + "smithy.api#documentation": "

The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for\n communication.

\n

Optional when the CreateDomain.AppNetworkAccessType parameter is set to\n PublicInternetOnly.

\n

Required when the CreateDomain.AppNetworkAccessType parameter is set to\n VpcOnly, unless specified as part of the DefaultUserSettings for\n the domain.

\n

Amazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the\n maximum number shown.

" } }, "SharingSettings": { @@ -68532,7 +69891,7 @@ "RStudioServerProAppSettings": { "target": "com.amazonaws.sagemaker#RStudioServerProAppSettings", "traits": { - "smithy.api#documentation": "

A collection of settings that configure user interaction with the\n RStudioServerPro app.

" + "smithy.api#documentation": "

A collection of settings that configure user interaction with the\n RStudioServerPro app.

" } }, "RSessionAppSettings": { @@ -68568,13 +69927,13 @@ "DefaultLandingUri": { "target": "com.amazonaws.sagemaker#LandingUri", "traits": { - "smithy.api#documentation": "

The default experience that the user is directed to when accessing the domain. The supported values are:

\n
    \n
  • \n

    \n studio::: Indicates that Studio is the default experience. This value can only be passed if StudioWebPortal is set to ENABLED.

    \n
  • \n
  • \n

    \n app:JupyterServer:: Indicates that Studio Classic is the default experience.

    \n
  • \n
" + "smithy.api#documentation": "

The default experience that the user is directed to when accessing the domain. The\n supported values are:

\n
    \n
  • \n

    \n studio::: Indicates that Studio is the default experience. This value can\n only be passed if StudioWebPortal is set to ENABLED.

    \n
  • \n
  • \n

    \n app:JupyterServer:: Indicates that Studio Classic is the default\n experience.

    \n
  • \n
" } }, "StudioWebPortal": { "target": "com.amazonaws.sagemaker#StudioWebPortal", "traits": { - "smithy.api#documentation": "

Whether the user can access Studio. If this value is set to DISABLED, the user cannot access Studio, even if that is the default experience for the domain.

" + "smithy.api#documentation": "

Whether the user can access Studio. If this value is set to DISABLED, the\n user cannot access Studio, even if that is the default experience for the domain.

" } }, "CustomPosixUserConfig": { @@ -68592,12 +69951,12 @@ "StudioWebPortalSettings": { "target": "com.amazonaws.sagemaker#StudioWebPortalSettings", "traits": { - "smithy.api#documentation": "

Studio settings. If these settings are applied on a user level, they take priority over the settings applied on a domain level.

" + "smithy.api#documentation": "

Studio settings. If these settings are applied on a user level, they take priority over\n the settings applied on a domain level.

" } } }, "traits": { - "smithy.api#documentation": "

A collection of settings that apply to users in a domain. These settings are\n specified when the CreateUserProfile API is called, and as DefaultUserSettings\n when the CreateDomain API is called.

\n

\n SecurityGroups is aggregated when specified in both calls. For all other\n settings in UserSettings, the values specified in CreateUserProfile\n take precedence over those specified in CreateDomain.

" + "smithy.api#documentation": "

A collection of settings that apply to users in a domain. These settings are specified\n when the CreateUserProfile API is called, and as DefaultUserSettings\n when the CreateDomain API is called.

\n

\n SecurityGroups is aggregated when specified in both calls. For all other\n settings in UserSettings, the values specified in CreateUserProfile\n take precedence over those specified in CreateDomain.

" } }, "com.amazonaws.sagemaker#UsersPerStep": { diff --git a/models/secrets-manager.json b/models/secrets-manager.json index 7a89a0443d..831a88fc17 100644 --- a/models/secrets-manager.json +++ b/models/secrets-manager.json @@ -360,7 +360,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new secret. A secret can be a password, a set of \n credentials such as a user name and password, an OAuth token, or other secret information \n that you store in an encrypted form in Secrets Manager. The secret also \n includes the connection information to access a database or other service, which Secrets Manager \n doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the\n important information needed to manage the secret.

\n

For secrets that use managed rotation, you need to create the secret through the managing service. For more information, see Secrets Manager secrets managed by other Amazon Web Services services.\n\n

\n

For information about creating a secret in the console, see Create a secret.

\n

To create a secret, you can provide the secret value to be encrypted in either the\n SecretString parameter or the SecretBinary parameter, but not both. \n If you include SecretString or SecretBinary\n then Secrets Manager creates an initial secret version and automatically attaches the staging\n label AWSCURRENT to it.

\n

For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret,\n you must make sure the JSON you store in the SecretString matches the JSON structure of\n a database secret.

\n

If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key \n aws/secretsmanager. If this key \n doesn't already exist in your account, then Secrets Manager creates it for you automatically. All\n users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. \n Creating aws/secretsmanager can result in a one-time significant delay in returning the \n result.

\n

If the secret is in a different Amazon Web Services account from the credentials calling the API, then \n you can't use aws/secretsmanager to encrypt the secret, and you must create \n and use a customer managed KMS key.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:CreateSecret. If you \n include tags in the secret, you also need secretsmanager:TagResource. To add replica Regions, you must also have secretsmanager:ReplicateSecretToRegions.\n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager.

\n

To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key.

", + "smithy.api#documentation": "

Creates a new secret. A secret can be a password, a set of \n credentials such as a user name and password, an OAuth token, or other secret information \n that you store in an encrypted form in Secrets Manager. The secret also \n includes the connection information to access a database or other service, which Secrets Manager \n doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the\n important information needed to manage the secret.

\n

For secrets that use managed rotation, you need to create the secret through the managing service. For more information, see Secrets Manager secrets managed by other Amazon Web Services services.\n\n

\n

For information about creating a secret in the console, see Create a secret.

\n

To create a secret, you can provide the secret value to be encrypted in either the\n SecretString parameter or the SecretBinary parameter, but not both. \n If you include SecretString or SecretBinary\n then Secrets Manager creates an initial secret version and automatically attaches the staging\n label AWSCURRENT to it.

\n

For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret,\n you must make sure the JSON you store in the SecretString matches the JSON structure of\n a database secret.

\n

If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key \n aws/secretsmanager. If this key \n doesn't already exist in your account, then Secrets Manager creates it for you automatically. All\n users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. \n Creating aws/secretsmanager can result in a one-time significant delay in returning the \n result.

\n

If the secret is in a different Amazon Web Services account from the credentials calling the API, then \n you can't use aws/secretsmanager to encrypt the secret, and you must create \n and use a customer managed KMS key.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:CreateSecret. If you \n include tags in the secret, you also need secretsmanager:TagResource. To add replica Regions, you must also have secretsmanager:ReplicateSecretToRegions.\n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager.

\n

To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key.

\n \n

When you enter commands in a command shell, there is a risk of the command history being accessed or utilities having access to your command parameters. This is a concern if the command includes the value of a secret. Learn how to Mitigate the risks of using command-line tools to store Secrets Manager secrets.

\n
", "smithy.api#examples": [ { "title": "To create a basic secret", @@ -1901,7 +1901,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new version with a new encrypted secret value and attaches it to the secret. The \n version can contain a new SecretString value or a new SecretBinary value.

\n

We recommend you avoid calling PutSecretValue at a sustained rate of more than \n once every 10 minutes. When you update the secret value, Secrets Manager creates a new version \n of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not \n remove versions created less than 24 hours ago. If you call PutSecretValue more \n than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach \n the quota for secret versions.

\n

You can specify the staging labels to attach to the new version in VersionStages. \n If you don't include VersionStages, then Secrets Manager automatically\n moves the staging label AWSCURRENT to this version. If this operation creates \n the first version for the secret, then Secrets Manager\n automatically attaches the staging label AWSCURRENT to it. \n If this operation moves the staging label AWSCURRENT from another version to this\n version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to\n the version that AWSCURRENT was removed from.

\n

This operation is idempotent. If you call this operation with a ClientRequestToken \n that matches an existing version's VersionId, and you specify the\n same secret data, the operation succeeds but does nothing. However, if the secret data is\n different, then the operation fails because you can't modify an existing version; you can\n only create new ones.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:PutSecretValue. \n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager.

", + "smithy.api#documentation": "

Creates a new version with a new encrypted secret value and attaches it to the secret. The \n version can contain a new SecretString value or a new SecretBinary value.

\n

We recommend you avoid calling PutSecretValue at a sustained rate of more than \n once every 10 minutes. When you update the secret value, Secrets Manager creates a new version \n of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not \n remove versions created less than 24 hours ago. If you call PutSecretValue more \n than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach \n the quota for secret versions.

\n

You can specify the staging labels to attach to the new version in VersionStages. \n If you don't include VersionStages, then Secrets Manager automatically\n moves the staging label AWSCURRENT to this version. If this operation creates \n the first version for the secret, then Secrets Manager\n automatically attaches the staging label AWSCURRENT to it. \n If this operation moves the staging label AWSCURRENT from another version to this\n version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to\n the version that AWSCURRENT was removed from.

\n

This operation is idempotent. If you call this operation with a ClientRequestToken \n that matches an existing version's VersionId, and you specify the\n same secret data, the operation succeeds but does nothing. However, if the secret data is\n different, then the operation fails because you can't modify an existing version; you can\n only create new ones.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:PutSecretValue. \n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager.

\n \n

When you enter commands in a command shell, there is a risk of the command history being accessed or utilities having access to your command parameters. This is a concern if the command includes the value of a secret. Learn how to Mitigate the risks of using command-line tools to store Secrets Manager secrets.

\n
", "smithy.api#examples": [ { "title": "To store a secret value in a new version of a secret", @@ -3201,8 +3201,21 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue.

\n

To change the rotation configuration of a secret, use RotateSecret instead.

\n

To change a secret so that it is managed by another service, you need to recreate the secret in that service. See Secrets Manager secrets managed by other Amazon Web Services services.

\n

We recommend you avoid calling UpdateSecret at a sustained rate of more than \n once every 10 minutes. When you call UpdateSecret to update the secret value, Secrets Manager creates a new version \n of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not \n remove versions created less than 24 hours ago. If you update the secret value more \n than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach \n the quota for secret versions.

\n

If you include SecretString or SecretBinary to create a new\n secret version, Secrets Manager automatically moves the staging label AWSCURRENT to the new\n version. Then it attaches the label AWSPREVIOUS\n to the version that AWSCURRENT was removed from.

\n

If you call this operation with a ClientRequestToken that matches an existing version's \n VersionId, the operation results in an error. You can't modify an existing \n version, you can only create a new version. To remove a version, remove all staging labels from it. See \n UpdateSecretVersionStage.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:UpdateSecret. \n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager. \n If you use a customer managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and \n kms:Decrypt permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more information, see \n Secret encryption and decryption.

", + "smithy.api#documentation": "

Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue.

\n

To change the rotation configuration of a secret, use RotateSecret instead.

\n

To change a secret so that it is managed by another service, you need to recreate the secret in that service. See Secrets Manager secrets managed by other Amazon Web Services services.

\n

We recommend you avoid calling UpdateSecret at a sustained rate of more than \n once every 10 minutes. When you call UpdateSecret to update the secret value, Secrets Manager creates a new version \n of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not \n remove versions created less than 24 hours ago. If you update the secret value more \n than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach \n the quota for secret versions.

\n

If you include SecretString or SecretBinary to create a new\n secret version, Secrets Manager automatically moves the staging label AWSCURRENT to the new\n version. Then it attaches the label AWSPREVIOUS\n to the version that AWSCURRENT was removed from.

\n

If you call this operation with a ClientRequestToken that matches an existing version's \n VersionId, the operation results in an error. You can't modify an existing \n version, you can only create a new version. To remove a version, remove all staging labels from it. See \n UpdateSecretVersionStage.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:UpdateSecret. \n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager. \n If you use a customer managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and \n kms:Decrypt permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-encrypt existing secret versions with the new key. For more information, see \n Secret encryption and decryption.

\n \n

When you enter commands in a command shell, there is a risk of the command history being accessed or utilities having access to your command parameters. This is a concern if the command includes the value of a secret. Learn how to Mitigate the risks of using command-line tools to store Secrets Manager secrets.

\n
", "smithy.api#examples": [ + { + "title": "To create a new version of the encrypted secret value", + "documentation": "The following example shows how to create a new version of the secret by updating the SecretString field. Alternatively, you can use the put-secret-value operation.", + "input": { + "SecretId": "MyTestDatabaseSecret", + "SecretString": "{JSON STRING WITH CREDENTIALS}" + }, + "output": { + "ARN": "aws:arn:secretsmanager:us-west-2:123456789012:secret:MyTestDatabaseSecret-a1b2c3", + "Name": "MyTestDatabaseSecret", + "VersionId": "EXAMPLE1-90ab-cdef-fedc-ba987EXAMPLE" + } + }, { "title": "To update the description of a secret", "documentation": "The following example shows how to modify the description of a secret.", @@ -3227,19 +3240,6 @@ "ARN": "arn:aws:secretsmanager:us-west-2:123456789012:secret:MyTestDatabaseSecret-a1b2c3", "Name": "MyTestDatabaseSecret" } - }, - { - "title": "To create a new version of the encrypted secret value", - "documentation": "The following example shows how to create a new version of the secret by updating the SecretString field. Alternatively, you can use the put-secret-value operation.", - "input": { - "SecretId": "MyTestDatabaseSecret", - "SecretString": "{JSON STRING WITH CREDENTIALS}" - }, - "output": { - "ARN": "aws:arn:secretsmanager:us-west-2:123456789012:secret:MyTestDatabaseSecret-a1b2c3", - "Name": "MyTestDatabaseSecret", - "VersionId": "EXAMPLE1-90ab-cdef-fedc-ba987EXAMPLE" - } } ] } @@ -3270,7 +3270,7 @@ "KmsKeyId": { "target": "com.amazonaws.secretsmanager#KmsKeyIdType", "traits": { - "smithy.api#documentation": "

The ARN, key ID, or alias of the KMS key that Secrets Manager \n uses to encrypt new secret versions as well as any existing versions with the staging labels \n AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more information about versions and staging labels, see Concepts: Version.

\n

A key alias is always prefixed by alias/, for example alias/aws/secretsmanager.\n For more information, see About aliases.

\n

If you set this to an empty string, Secrets Manager uses the Amazon Web Services managed key \n aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager \n creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access \n to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time \n significant delay in returning the result.

\n \n

You can only use the Amazon Web Services managed key aws/secretsmanager if you call this\n operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in\n a different account, then you must use a customer managed key and provide the ARN of that KMS key in\n this field. The user making the call must have permissions to both the secret and the KMS key in\n their respective accounts.

\n
" + "smithy.api#documentation": "

The ARN, key ID, or alias of the KMS key that Secrets Manager \n uses to encrypt new secret versions as well as any existing versions with the staging labels \n AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-encrypt existing secret versions with the new key. For more information about versions and staging labels, see Concepts: Version.

\n

A key alias is always prefixed by alias/, for example alias/aws/secretsmanager.\n For more information, see About aliases.

\n

If you set this to an empty string, Secrets Manager uses the Amazon Web Services managed key \n aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager \n creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access \n to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time \n significant delay in returning the result.

\n \n

You can only use the Amazon Web Services managed key aws/secretsmanager if you call this\n operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in\n a different account, then you must use a customer managed key and provide the ARN of that KMS key in\n this field. The user making the call must have permissions to both the secret and the KMS key in\n their respective accounts.

\n
" } }, "SecretBinary": { diff --git a/models/ses.json b/models/ses.json index 0585475f6b..7486984fef 100644 --- a/models/ses.json +++ b/models/ses.json @@ -3822,7 +3822,20 @@ "outputToken": "NextToken", "items": "Identities", "pageSize": "MaxItems" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListIdentitiesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.ses#ListIdentitiesRequest": { @@ -6478,7 +6491,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -6521,7 +6533,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -6534,7 +6547,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -6548,7 +6560,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -6571,7 +6582,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -6606,7 +6616,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -6617,14 +6626,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -6638,14 +6649,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -6654,11 +6663,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -6669,14 +6678,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -6690,7 +6701,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -6710,7 +6720,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -6721,14 +6730,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -6739,9 +6750,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/models/sns.json b/models/sns.json index b523df57d0..8d7701b251 100644 --- a/models/sns.json +++ b/models/sns.json @@ -3388,7 +3388,20 @@ "inputToken": "NextToken", "outputToken": "NextToken", "items": "Topics" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListTopicsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.sns#ListTopicsInput": { diff --git a/models/taxsettings.json b/models/taxsettings.json index 35348647de..91e913db66 100644 --- a/models/taxsettings.json +++ b/models/taxsettings.json @@ -2280,18 +2280,15 @@ "aws.auth#sigv4": { "name": "tax" }, + "aws.endpoints#standardPartitionalEndpoints": { + "endpointPatternType": "service_region_dnsSuffix" + }, "aws.protocols#restJson1": {}, "smithy.api#documentation": "

You can use the tax setting API to programmatically set, modify, and delete the tax\n registration number (TRN), associated business legal name, and address (Collectively referred\n to as \"TRN information\"). You can also programmatically view TRN information and tax addresses\n (\"Tax profiles\").

\n

You can use this API to automate your TRN information settings instead of manually using\n the console.

\n

Service Endpoint

\n
    \n
  • \n

    https://tax.us-east-1.amazonaws.com

    \n
  • \n
", "smithy.api#title": "Tax Settings", "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -2311,6 +2308,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -2462,18 +2465,19 @@ "rules": [ { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://tax-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://tax-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" @@ -2496,6 +2500,15 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } ], "rules": [ @@ -2520,18 +2533,19 @@ "rules": [ { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://tax-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://tax-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" @@ -2546,6 +2560,15 @@ }, { "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, { "fn": "booleanEquals", "argv": [ @@ -2578,18 +2601,19 @@ "rules": [ { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://tax.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://tax.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" @@ -2604,18 +2628,19 @@ }, { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://tax.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://tax.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" @@ -2635,10 +2660,51 @@ }, "smithy.rules#endpointTests": { "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://tax-fips.us-east-1.api.aws" } }, @@ -2652,6 +2718,14 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://tax-fips.us-east-1.amazonaws.com" } }, @@ -2665,6 +2739,14 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://tax.us-east-1.api.aws" } }, @@ -2678,6 +2760,14 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://tax.us-east-1.amazonaws.com" } }, @@ -2688,105 +2778,169 @@ } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://tax-fips.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://tax-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://tax-fips.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://tax-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://tax.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://tax.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://tax.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://tax.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://tax-fips.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://tax-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://tax-fips.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://tax-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://tax.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://tax.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://tax.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://tax.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } @@ -2806,6 +2960,14 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://tax-fips.us-iso-east-1.c2s.ic.gov" } }, @@ -2830,6 +2992,14 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://tax.us-iso-east-1.c2s.ic.gov" } }, @@ -2854,6 +3024,14 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://tax-fips.us-isob-east-1.sc2s.sgov.gov" } }, @@ -2878,6 +3056,14 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://tax.us-isob-east-1.sc2s.sgov.gov" } }, @@ -2888,54 +3074,131 @@ } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://tax-fips.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-east-1", + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://tax.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://tax-fips.us-isof-south-1.csp.hci.ic.gov" + } }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://tax.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { diff --git a/models/timestream-query.json b/models/timestream-query.json index 720ffb3560..e5c96b6106 100644 --- a/models/timestream-query.json +++ b/models/timestream-query.json @@ -3238,7 +3238,7 @@ "QueryPricingModel": { "target": "com.amazonaws.timestreamquery#QueryPricingModel", "traits": { - "smithy.api#documentation": "

The pricing model for queries in an account.

" + "smithy.api#documentation": "

The pricing model for queries in an account.

\n \n

The QueryPricingModel parameter is used by several Timestream operations; however, the UpdateAccountSettings API operation doesn't recognize any values other than COMPUTE_UNITS.

\n
" } } }, diff --git a/models/workspaces-thin-client.json b/models/workspaces-thin-client.json index 20322a9b47..aa00d2b127 100644 --- a/models/workspaces-thin-client.json +++ b/models/workspaces-thin-client.json @@ -996,7 +996,7 @@ "deviceCreationTags": { "target": "com.amazonaws.workspacesthinclient#DeviceCreationTagsMap", "traits": { - "smithy.api#documentation": "

\"The tag keys and optional values for the newly created devices for this environment.\"

" + "smithy.api#documentation": "

The tag keys and optional values for the newly created devices for this environment.

" } } },